diff options
Diffstat (limited to 'extraconfig/tasks')
26 files changed, 216 insertions, 1157 deletions
diff --git a/extraconfig/tasks/aodh_data_migration.sh b/extraconfig/tasks/aodh_data_migration.sh deleted file mode 100644 index d4c29673..00000000 --- a/extraconfig/tasks/aodh_data_migration.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# -# This delivers the aodh data migration script to be invoked as part of the tripleo -# major upgrade workflow to migrate all the alarm data from mongodb to mysql. -# This needs to run post controller node upgrades so new aodh mysql db configured and -# running. -# -set -eu - -#Get existing mongodb connection -MONGO_DB_CONNECTION="$(crudini --get /etc/ceilometer/ceilometer.conf database connection)" - -# Get the aodh database string from hiera data -MYSQL_DB_CONNECTION="$(crudini --get /etc/aodh/aodh.conf database connection)" - -#Run migration -/usr/bin/aodh-data-migration --nosql-conn $MONGO_DB_CONNECTION --sql-conn $MYSQL_DB_CONNECTION - - diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml deleted file mode 100644 index cf5d7a84..00000000 --- a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml +++ /dev/null @@ -1,62 +0,0 @@ -heat_template_version: ocata - -description: > - Software-config for ceilometer configuration under httpd during upgrades - -parameters: - servers: - type: json - input_values: - type: json - description: input values for the software deployments -resources: - CeilometerWsgiMitakaNewtonPreUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: puppet - config: - get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp - - CeilometerWsgiMitakaNewtonUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - "#!/bin/bash\n\nset -e\n\n" - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - "disable_standalone_ceilometer_api\n\n" - - CeilometerWsgiMitakaNewtonPostUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: | - #!/bin/bash - set -e - /usr/bin/systemctl reload httpd - - CeilometerWsgiMitakaNewtonPreUpgradeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment - servers: {get_param: [servers, Controller]} - config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig} - - CeilometerWsgiMitakaNewtonUpgradeConfigDeployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment - properties: - name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment - servers: {get_param: [servers, Controller]} - config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig} - - CeilometerWsgiMitakaNewtonPostUpgradeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment - properties: - name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment - servers: {get_param: [servers, Controller]} - config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig} diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh deleted file mode 100755 index 8bdff5e7..00000000 --- a/extraconfig/tasks/major_upgrade_check.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash - -set -eu - -check_cluster() -{ - if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then - echo_error "ERROR: upgrade cannot start with some cluster nodes being offline" - exit 1 - fi -} - -check_pcsd() -{ - if pcs status 2>&1 | grep -E 'Offline'; then - echo_error "ERROR: upgrade cannot start with some pcsd daemon offline" - exit 1 - fi -} - -mysql_need_update() -{ - # Shall we upgrade mysql data directory during the stack upgrade? - if [ "$mariadb_do_major_upgrade" = "auto" ]; then - ret=$(is_mysql_upgrade_needed) - if [ $ret = "1" ]; then - DO_MYSQL_UPGRADE=1 - else - DO_MYSQL_UPGRADE=0 - fi - echo "mysql upgrade required: $DO_MYSQL_UPGRADE" - elif [ "$mariadb_do_major_upgrade" = "no" ]; then - DO_MYSQL_UPGRADE=0 - else - DO_MYSQL_UPGRADE=1 - fi -} - -check_disk_for_mysql_dump() -{ - # Where to backup current database if mysql need to be upgraded - MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp - MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup - # Spare disk ratio for extra safety - MYSQL_BACKUP_SIZE_RATIO=1.2 - - mysql_need_update - - if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then - if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - - if [ -d "$MYSQL_BACKUP_DIR" ]; then - echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously" - exit 1 - fi - mkdir "$MYSQL_BACKUP_DIR" - if [ $? -ne 0 ]; then - echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR" - exit 1 - fi - - # the /root/.my.cnf is needed because we set the mysql root - # password from liberty onwards - backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" - # While not ideal, this step allows us to calculate exactly how much space the dump - # will need. Our main goal here is avoiding any chance of corruption due to disk space - # exhaustion - backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c) - database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }') - free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1) - - # we need at least space for a new mysql database + dump of the existing one, - # times a small factor for additional safety room - # note: bash doesn't do floating point math or floats in if statements, - # so use python to apply the ratio and cast it back to integer - required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))") - if [ $required_space -ge $free_space ]; then - echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)" - exit 1 - fi - fi - fi -} - -check_python_rpm() -{ - # If for some reason rpm-python are missing we want to error out early enough - if ! rpm -q rpm-python &> /dev/null; then - echo_error "ERROR: upgrade cannot start without rpm-python installed" - exit 1 - fi -} - -check_clean_cluster() -{ - if pcs status | grep -q Stopped:; then - echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running." - exit 1 - fi -} - -check_galera_root_password() -{ - # BZ: 1357112 - if [ ! -e /root/.my.cnf ]; then - echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update." - exit 1 - fi -} diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh deleted file mode 100755 index 080831ab..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -set -eu - -check_cluster -check_pcsd -if [[ -n $(is_bootstrap_node) ]]; then - check_clean_cluster -fi -check_python_rpm -check_galera_root_password -check_disk_for_mysql_dump - -# We want to disable fencing during the cluster --stop as it might fence -# nodes where a service fails to stop, which could be fatal during an upgrade -# procedure. So we remember the stonith state. If it was enabled we reenable it -# at the end of this script -if [[ -n $(is_bootstrap_node) ]]; then - STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }') - # We create this empty file if stonith was set to true so we can reenable stonith in step2 - rm -f /var/tmp/stonith-true - if [ $STONITH_STATE == "true" ]; then - touch /var/tmp/stonith-true - fi - pcs property set stonith-enabled=false -fi - -# Migrate to HA NG and fix up rabbitmq queues -# We fix up the rabbitmq ha queues after the migration because it will -# restart the rabbitmq resource. Doing it after the migration means no other -# services will be restart as there are no other constraints -if [[ -n $(is_bootstrap_node) ]]; then - migrate_full_to_ng_ha - rabbitmq_newton_ocata_upgrade -fi - diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh deleted file mode 100755 index 8b900842..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh +++ /dev/null @@ -1,177 +0,0 @@ -#!/bin/bash - -set -eu - -cluster_sync_timeout=1800 - -# After migrating the cluster to HA-NG the services not under pacemaker's control -# are still up and running. We need to stop them explicitely otherwise during the yum -# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which -# is going to take a long time because rabbit is down. By having the service stopped -# systemctl try-restart is a noop - -for service in $(services_to_migrate); do - manage_systemd_service stop "${service%%-clone}" - # So the reason for not reusing check_resource_systemd is that - # I have observed systemctl is-active returning unknown with at least - # one service that was stopped (See LP 1627254) - timeout=600 - tstart=$(date +%s) - tend=$(( $tstart + $timeout )) - check_interval=3 - while (( $(date +%s) < $tend )); do - if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then - echo "$service still active, sleeping $check_interval seconds." - sleep $check_interval - else - # we do not care if it is inactive, unknown or failed as long as it is - # not running - break - fi - - done -done - -# In case the mysql package is updated, the database on disk must be -# upgraded as well. This typically needs to happen during major -# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...) -# -# Because in-place upgrades are not supported across 2+ major versions -# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle -# https://bugzilla.redhat.com/show_bug.cgi?id=1341968 -# -# The default is to determine automatically if upgrade is needed based -# on mysql package versionning, but this can be overriden manually -# to support specific upgrade scenario - -# Calling this function will set the DO_MYSQL_UPGRADE variable which is used -# later -mysql_need_update - -if [[ -n $(is_bootstrap_node) ]]; then - if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" - mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql" - cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR" - fi - - pcs resource disable redis - check_resource redis stopped 600 - pcs resource disable rabbitmq - check_resource rabbitmq stopped 600 - pcs resource disable galera - check_resource galera stopped 600 - pcs resource disable openstack-cinder-volume - check_resource openstack-cinder-volume stopped 600 - # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address: - # https://bugzilla.redhat.com/show_bug.cgi?id=1330688 - for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do - pcs resource disable $vip - check_resource $vip stopped 60 - done - pcs cluster stop --all -fi - - -# Swift isn't controlled by pacemaker -systemctl_swift stop - -tstart=$(date +%s) -while systemctl is-active pacemaker; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > cluster_sync_timeout )) ; then - echo_error "ERROR: cluster shutdown timed out" - exit 1 - fi -done - -# The reason we do an sql dump *and* we move the old dir out of -# the way is because it gives us an extra level of safety in case -# something goes wrong during the upgrade. Once the restore is -# successful we go ahead and remove it. If the directory exists -# we bail out as it means the upgrade process had issues in the last -# run. -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then - echo_error "ERROR: mysql backup dir already exist" - exit 1 - fi - mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR -fi - -# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 -special_case_ovs_upgrade_if_needed - -yum -y install python-zaqarclient # needed for os-collect-config -yum -y -q update - -# We need to ensure at least those two configuration settings, otherwise -# mariadb 10.1+ won't activate galera replication. -# wsrep_cluster_address must only be set though, its value does not -# matter because it's overriden by the galera resource agent. -cat >> /etc/my.cnf.d/galera.cnf <<EOF -[mysqld] -wsrep_on = ON -wsrep_cluster_address = gcomm://localhost -EOF - -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - # Scripts run via heat have no HOME variable set and this confuses - # mysqladmin - export HOME=/root - - mkdir /var/lib/mysql || /bin/true - chown mysql:mysql /var/lib/mysql - chmod 0755 /var/lib/mysql - restorecon -R /var/lib/mysql/ - mysql_install_db --datadir=/var/lib/mysql --user=mysql - chown -R mysql:mysql /var/lib/mysql/ - - if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then - mysqld_safe --wsrep-new-cluster & - # We have a populated /root/.my.cnf with root/password here so - # we need to temporarily rename it because the newly created - # db is empty and no root password is set - mv /root/.my.cnf /root/.my.cnf.temporary - timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done' - mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql" - mv /root/.my.cnf.temporary /root/.my.cnf - mysqladmin -u root shutdown - # The import was successful so we may remove the folder - rm -r "$MYSQL_BACKUP_DIR" - fi -fi - -# If we reached here without error we can safely blow away the origin -# mysql dir from every controller - -# TODO: What if the upgrade fails on the bootstrap node, but not on -# this controller. Data may be lost. -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR -fi - -# Let's reset the stonith back to true if it was true, before starting the cluster -if [[ -n $(is_bootstrap_node) ]]; then - if [ -f /var/tmp/stonith-true ]; then - pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true - fi - rm -f /var/tmp/stonith-true -fi - -# Pin messages sent to compute nodes to kilo, these will be upgraded later -crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute" -# https://bugzilla.redhat.com/show_bug.cgi?id=1284047 -# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435 -crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit -# https://bugzilla.redhat.com/show_bug.cgi?id=1284058 -# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists -crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server" -# LP: 1615035, required only for M/N upgrade. -crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager -# LP: 1627450, required only for M/N upgrade -crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler - -crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm - diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh deleted file mode 100755 index a3cbd945..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash - -set -eu - -cluster_form_timeout=600 -cluster_settle_timeout=1800 -galera_sync_timeout=600 - -if [[ -n $(is_bootstrap_node) ]]; then - pcs cluster start --all - - tstart=$(date +%s) - while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > cluster_form_timeout )) ; then - echo_error "ERROR: timed out forming the cluster" - exit 1 - fi - done - - if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then - echo_error "ERROR: timed out waiting for cluster to finish transition" - exit 1 - fi - - for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do - pcs resource enable $vip - check_resource_pacemaker $vip started 60 - done -fi - -start_or_enable_service galera -check_resource galera started 600 -start_or_enable_service redis -check_resource redis started 600 -# We need mongod which is now a systemd service up and running before calling -# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes -# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely -# we should be good. -# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm -systemctl start mongod -check_resource mongod started 600 - -if [[ -n $(is_bootstrap_node) ]]; then - tstart=$(date +%s) - while ! clustercheck; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > galera_sync_timeout )) ; then - echo_error "ERROR galera sync timed out" - exit 1 - fi - done - - # Run all the db syncs - # TODO: check if this can be triggered in puppet and removed from here - ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types - cinder-manage db sync - glance-manage db_sync - heat-manage --config-file /etc/heat/heat.conf db_sync - keystone-manage db_sync - neutron-db-manage upgrade heads - nova-manage db sync - nova-manage api_db sync - nova-manage db online_data_migrations - sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head -fi diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh deleted file mode 100755 index d2cb9553..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -eu - -start_or_enable_service rabbitmq -check_resource rabbitmq started 600 -start_or_enable_service redis -check_resource redis started 600 -start_or_enable_service openstack-cinder-volume -check_resource openstack-cinder-volume started 600 - -# start httpd so keystone is available for gnocchi -# upgrade to run. -systemctl start httpd - -# Swift isn't controled by pacemaker -systemctl_swift start diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh deleted file mode 100755 index fa95f1f8..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -eu - -if [[ -n $(is_bootstrap_node) ]]; then - # run gnocchi upgrade - gnocchi-upgrade -fi diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh deleted file mode 100755 index d569084d..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -set -eu - -# We need to start the systemd services we explicitely stopped at step _1.sh -# FIXME: Should we let puppet during the convergence step do the service enabling or -# should we add it here? -services=$(services_to_migrate) -if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then - services=${services%%openstack-sahara*} -fi -for service in $services; do - manage_systemd_service start "${service%%-clone}" - check_resource_systemd "${service%%-clone}" started 600 -done diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml deleted file mode 100644 index 74d3be71..00000000 --- a/extraconfig/tasks/major_upgrade_pacemaker.yaml +++ /dev/null @@ -1,175 +0,0 @@ -heat_template_version: ocata -description: 'Upgrade for Pacemaker deployments' - -parameters: - servers: - type: json - input_values: - type: json - description: input values for the software deployments - - UpgradeLevelNovaCompute: - type: string - description: Nova Compute upgrade level - default: '' - MySqlMajorUpgrade: - type: string - description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade - constraints: - - allowed_values: ['auto', 'yes', 'no'] - default: 'auto' - KeepSaharaServicesOnUpgrade: - type: boolean - default: true - description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton - - -resources: - # TODO(jistr): for Mitaka->Newton upgrades and further we can use - # map_merge with input_values instead of feeding params into scripts - # via str_replace on bash snippets - - ControllerPacemakerUpgradeConfig_Step1: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' - params: - UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - - str_replace: - template: | - #!/bin/bash - mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE' - params: - MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_check.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_1.sh - - ControllerPacemakerUpgradeDeployment_Step1: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step1} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step2: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' - params: - UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - - str_replace: - template: | - #!/bin/bash - mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE' - params: - MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_check.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_2.sh - - ControllerPacemakerUpgradeDeployment_Step2: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step1 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step2} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step3: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_3.sh - - ControllerPacemakerUpgradeDeployment_Step3: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step2 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step3} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step4: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_4.sh - - ControllerPacemakerUpgradeDeployment_Step4: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step3 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step4} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step5: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_5.sh - - ControllerPacemakerUpgradeDeployment_Step5: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step4 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step5} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step6: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE' - params: - KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_6.sh - - ControllerPacemakerUpgradeDeployment_Step6: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step5 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step6} - input_values: {get_param: input_values} diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh deleted file mode 100644 index ae22a1e7..00000000 --- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh +++ /dev/null @@ -1,200 +0,0 @@ -#!/bin/bash - -# Special pieces of upgrade migration logic go into this -# file. E.g. Pacemaker cluster transitions for existing deployments, -# matching changes to overcloud_controller_pacemaker.pp (Puppet -# handles deployment, this file handles migrations). -# -# This file shouldn't execute any action on its own, all logic should -# be wrapped into bash functions. Upgrade scripts will source this -# file and call the functions defined in this file where appropriate. -# -# The migration functions should be idempotent. If the migration has -# been already applied, it should be possible to call the function -# again without damaging the deployment or failing the upgrade. - -# If the major version of mysql is going to change after the major -# upgrade, the database must be upgraded on disk to avoid failures -# due to internal incompatibilities between major mysql versions -# https://bugs.launchpad.net/tripleo/+bug/1587449 -# This function detects whether a database upgrade is required -# after a mysql package upgrade. It returns 0 when no major upgrade -# has to take place, 1 otherwise. -function is_mysql_upgrade_needed { - # The name of the package which provides mysql might differ - # after the upgrade. Consider the generic package name, which - # should capture the major version change (e.g. 5.5 -> 10.1) - local name="mariadb" - local output - local ret - set +e - output=$(yum -q check-update $name) - ret=$? - set -e - if [ $ret -ne 100 ]; then - # no updates so we exit - echo "0" - return - fi - - local currentepoch=$(rpm -q --qf "%{epoch}" $name) - local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2) - local currentrelease=$(rpm -q --qf "%{release}" $name) - local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name) - local newepoch=$(echo "$newoutput" | awk '{ print $1 }') - local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2) - local newrelease=$(echo "$newoutput" | awk '{ print $3 }') - - # With this we trigger the dump restore/path if we change either epoch or - # version in the package If only the release tag changes we do not do it - # FIXME: we could refine this by trying to parse the mariadb version - # into X.Y.Z and trigger the update only if X and/or Y change. - output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc") - if [ "$output" != "-1" ]; then - echo "0" - return - fi - echo "1" -} - -# This function returns the list of services to be migrated away from pacemaker -# and to systemd. The reason to have these services in a separate function is because -# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2} -# and in the function to migrate the cluster from full HA to HA NG -function services_to_migrate { - # The following PCMK resources the ones the we are going to delete - PCMK_RESOURCE_TODELETE=" - httpd-clone - memcached-clone - mongod-clone - neutron-dhcp-agent-clone - neutron-l3-agent-clone - neutron-metadata-agent-clone - neutron-netns-cleanup-clone - neutron-openvswitch-agent-clone - neutron-ovs-cleanup-clone - neutron-server-clone - openstack-aodh-evaluator-clone - openstack-aodh-listener-clone - openstack-aodh-notifier-clone - openstack-ceilometer-central-clone - openstack-ceilometer-collector-clone - openstack-ceilometer-notification-clone - openstack-cinder-api-clone - openstack-cinder-scheduler-clone - openstack-glance-api-clone - openstack-gnocchi-metricd-clone - openstack-gnocchi-statsd-clone - openstack-heat-api-cfn-clone - openstack-heat-api-clone - openstack-heat-api-cloudwatch-clone - openstack-heat-engine-clone - openstack-nova-api-clone - openstack-nova-conductor-clone - openstack-nova-consoleauth-clone - openstack-nova-novncproxy-clone - openstack-nova-scheduler-clone - openstack-sahara-api-clone - openstack-sahara-engine-clone - " - echo $PCMK_RESOURCE_TODELETE -} - -# This function will migrate a mitaka system where all the resources are managed -# via pacemaker to a newton setup where only a few services will be managed by pacemaker -# On a high-level it will operate as follows: -# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place -# during the conversion -# 2. Remove all the colocation constraints and then the ordering constraints, except the -# ones related to haproxy/VIPs which exist in Newton as well -# 3. Take the cluster out of maintenance-mode -# 4. Remove all the resources that won't be managed by pacemaker in newton. The -# outcome will be -# that they are stopped and removed from pacemakers control -# 5. Do a resource cleanup to make sure the cluster is in a clean state -function migrate_full_to_ng_ha { - if [[ -n $(pcmk_running) ]]; then - pcs property set maintenance-mode=true - - # First we go through all the colocation constraints (except the ones - # we want to keep, i.e. the haproxy/ip ones) and we remove those - COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\)) - for constraint in $COL_CONSTRAINTS; do - log_debug "Deleting colocation constraint $constraint from CIB" - pcs constraint remove "$constraint" - done - - # Now we kill all the ordering constraints (except the haproxy/ip ones) - ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\)) - for constraint in $ORD_CONSTRAINTS; do - log_debug "Deleting ordering constraint $constraint from CIB" - pcs constraint remove "$constraint" - done - # At this stage all the pacemaker resources are removed from the CIB. - # Once we remove the maintenance-mode those systemd resources will keep - # on running. They shall be systemd enabled via the puppet converge - # step later on - pcs property set maintenance-mode=false - - # At this stage there are no constraints whatsoever except the haproxy/ip ones - # which we want to keep. We now disable and then delete each resource - # that will move to systemd. - # We want the systemd resources be stopped before doing "yum update", - # that way "systemctl try-restart <service>" is no-op because the - # service was down already - PCS_STATUS_OUTPUT="$(pcs status)" - for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do - if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then - log_debug "Deleting $resource from the CIB" - if ! pcs resource disable "$resource" --wait=600; then - echo_error "ERROR: resource $resource failed to be disabled" - exit 1 - fi - pcs resource delete --force "$resource" - else - log_debug "Service $resource not found as a pacemaker resource, not trying to delete." - fi - done - - # We need to do a pcs resource cleanup here + crm_resource --wait to - # make sure the cluster is in a clean state before we stop everything, - # upgrade and restart everything - pcs resource cleanup - # We are making sure here that the cluster is stable before proceeding - if ! timeout -k 10 600 crm_resource --wait; then - echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting." - exit 1 - fi - fi -} - -function disable_standalone_ceilometer_api { - if [[ -n $(is_bootstrap_node) ]]; then - if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then - # Disable pacemaker resources for ceilometer-api - manage_pacemaker_service disable openstack-ceilometer-api - check_resource_pacemaker openstack-ceilometer-api stopped 600 - pcs resource delete openstack-ceilometer-api --wait=600 - fi - fi -} - - -# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton -# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}" -# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}" -# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances) -# Note that changing an attribute like this makes the rabbitmq resource restart -function rabbitmq_newton_ocata_upgrade { - if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then - # Number of controller is obtained by counting how many hostnames we - # have in controller_node_names hiera key - nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1)) - nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2))) - if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then - echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues" - exit 1 - fi - pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600 - fi -} diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml deleted file mode 100644 index 45933fb7..00000000 --- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml +++ /dev/null @@ -1,25 +0,0 @@ -heat_template_version: ocata - -description: > - Software-config for performing aodh data migration - -parameters: - servers: - type: json - input_values: - type: json - description: input values for the software deployments -resources: - - AodhMysqlMigrationScriptConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: aodh_data_migration.sh} - - AodhMysqlMigrationScriptDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: AodhMysqlMigrationScriptConfig} - input_values: {get_param: input_values} diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp deleted file mode 100644 index a8d43663..00000000 --- a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This puppet manifest is to be used only during a Mitaka->Newton upgrade -# It configures ceilometer to be run under httpd but it makes sure to not -# restart any services. This snippet needs to be called before init as a -# pre upgrade migration. - -Service <| - tag == 'ceilometer-service' -|> { - hasrestart => true, - restart => '/bin/true', - start => '/bin/true', - stop => '/bin/true', -} - -if $::hostname == downcase(hiera('bootstrap_nodeid')) { - $pacemaker_master = true - $sync_db = true -} else { - $pacemaker_master = false - $sync_db = false -} - -include ::tripleo::packages - - -if str2bool(hiera('mongodb::server::ipv6', false)) { - $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[') - $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') -} else { - $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017') -} -$mongodb_replset = hiera('mongodb::server::replset') -$mongo_node_string = join($mongo_node_ips_with_port, ',') -$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - -$rabbit_hosts = hiera('rabbitmq_node_ips', undef) -$rabbit_port = hiera('ceilometer::rabbit_port', 5672) -$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}") - -class { '::ceilometer' : - rabbit_hosts => $rabbit_endpoints, -} - -class {'::ceilometer::db': - database_connection => $database_connection, -} - -if $sync_db { - include ::ceilometer::db::sync -} - -include ::ceilometer::config - -class { '::ceilometer::api': - enabled => true, - service_name => 'httpd', - keystone_password => hiera('ceilometer::keystone::auth::password'), - identity_uri => hiera('ceilometer::keystone::authtoken::auth_url'), - auth_uri => hiera('ceilometer::keystone::authtoken::auth_uri'), - keystone_tenant => hiera('ceilometer::keystone::authtoken::project_name'), -} - -class { '::apache' : - service_enable => false, - service_manage => true, - service_restart => '/bin/true', - purge_configs => false, - purge_vhost_dir => false, -} - -# To ensure existing ports are not overridden -class { '::aodh::wsgi::apache': - servername => $::hostname, - ssl => false, -} -class { '::gnocchi::wsgi::apache': - servername => $::hostname, - ssl => false, -} - -class { '::keystone::wsgi::apache': - servername => $::hostname, - ssl => false, -} -class { '::ceilometer::wsgi::apache': - servername => $::hostname, - ssl => false, -} diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh index aae4a2de..f17a073a 100755 --- a/extraconfig/tasks/pacemaker_common_functions.sh +++ b/extraconfig/tasks/pacemaker_common_functions.sh @@ -299,9 +299,10 @@ function systemctl_swift { } # Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 +# Update condition and add --notriggerun for +bug/1669714 function special_case_ovs_upgrade_if_needed { - if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then - echo "Manual upgrade of openvswitch - restart in postun detected" + if rpm -qa | grep "^openvswitch-2.5.0-14" || rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart" ; then + echo "Manual upgrade of openvswitch - ovs-2.5.0-14 or restart in postun detected" rm -rf OVS_UPGRADE mkdir OVS_UPGRADE && pushd OVS_UPGRADE echo "Attempting to downloading latest openvswitch with yumdownloader" @@ -310,8 +311,8 @@ function special_case_ovs_upgrade_if_needed { if rpm -U --test $pkg 2>&1 | grep "already installed" ; then echo "Looks like newer version of $pkg is already installed, skipping" else - echo "Updating $pkg with nopostun option" - rpm -U --replacepkgs --nopostun $pkg + echo "Updating $pkg with --nopostun --notriggerun" + rpm -U --replacepkgs --nopostun --notriggerun $pkg fi done popd @@ -321,3 +322,52 @@ function special_case_ovs_upgrade_if_needed { } +# This code is meant to fix https://bugs.launchpad.net/tripleo/+bug/1686357 on +# existing setups via a minor update workflow and be idempotent. We need to +# run this before the yum update because we fix this up even when there are no +# packages to update on the system (in which case the script exits). +# This code must be called with set +eu (due to the ocf scripts being sourced) +function fixup_wrong_ipv6_vip { + # This XPath query identifies of all the VIPs in pacemaker with netmask /64. Those are IPv6 only resources that have the wrong netmask + # This gives the address of the resource in the CIB, one address per line. For example: + # /cib/configuration/resources/primitive[@id='ip-2001.db8.ca2.4..10']/instance_attributes[@id='ip-2001.db8.ca2.4..10-instance_attributes']\ + # /nvpair[@id='ip-2001.db8.ca2.4..10-instance_attributes-cidr_netmask'] + vip_xpath_query="//resources/primitive[@type='IPaddr2']/instance_attributes/nvpair[@name='cidr_netmask' and @value='64']" + vip_xpath_xml_addresses=$(cibadmin --query --xpath "$vip_xpath_query" -e 2>/dev/null) + # The following extracts the @id value of the resource + vip_resources_to_fix=$(echo -e "$vip_xpath_xml_addresses" | sed -n "s/.*primitive\[@id='\([^']*\)'.*/\1/p") + # Runnning this in a subshell so that sourcing files cannot possibly affect the running script + ( + OCF_PATH="/usr/lib/ocf/lib/heartbeat" + if [ -n "$vip_resources_to_fix" -a -f $OCF_PATH/ocf-shellfuncs -a -f $OCF_PATH/findif.sh ]; then + source $OCF_PATH/ocf-shellfuncs + source $OCF_PATH/findif.sh + for resource in $vip_resources_to_fix; do + echo "Updating IPv6 VIP $resource with a /128 and a correct addrlabel" + # The following will give us something like: + # <nvpair id="ip-2001.db8.ca2.4..10-instance_attributes-ip" name="ip" value="2001:db8:ca2:4::10"/> + ip_cib_nvpair=$(cibadmin --query --xpath "//resources/primitive[@type='IPaddr2' and @id='$resource']/instance_attributes/nvpair[@name='ip']") + # Let's filter out the value of the nvpair to get the ip address + ip_address=$(echo $ip_cib_nvpair | xmllint --xpath 'string(//nvpair/@value)' -) + OCF_RESKEY_cidr_netmask="64" + OCF_RESKEY_ip="$ip_address" + # Unfortunately due to https://bugzilla.redhat.com/show_bug.cgi?id=1445628 + # we need to find out the appropiate nic given the ip address. + nic=$(findif $ip_address | awk '{ print $1 }') + ret=$? + if [ -z "$nic" -o $ret -ne 0 ]; then + echo "NIC autodetection failed for VIP $ip_address, not updating VIPs" + # Only exits the subshell + exit 1 + fi + ocf_run -info pcs resource update --wait "$resource" ip="$ip_address" cidr_netmask=128 nic="$nic" lvs_ipv6_addrlabel=true lvs_ipv6_addrlabel_value=99 + ret=$? + if [ $ret -ne 0 ]; then + echo "pcs resource update for VIP $resource failed, not updating VIPs" + # Only exits the subshell + exit 1 + fi + done + fi + ) +} diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.j2.yaml index a63868c9..7fc258d6 100644 --- a/extraconfig/tasks/post_puppet_pacemaker.yaml +++ b/extraconfig/tasks/post_puppet_pacemaker.j2.yaml @@ -1,4 +1,4 @@ -heat_template_version: ocata +heat_template_version: pike description: 'Post-Puppet Config for Pacemaker deployments' parameters: @@ -10,7 +10,9 @@ parameters: resources: - ControllerPostPuppetMaintenanceModeConfig: +{%- for role in roles -%} +{% if "controller" in role.tags %} + {{role.name}}PostPuppetMaintenanceModeConfig: type: OS::Heat::SoftwareConfig properties: group: script @@ -22,16 +24,19 @@ resources: pcs property set maintenance-mode=false fi - ControllerPostPuppetMaintenanceModeDeployment: + {{role.name}}PostPuppetMaintenanceModeDeployment: type: OS::Heat::SoftwareDeployments properties: - servers: {get_param: servers} - config: {get_resource: ControllerPostPuppetMaintenanceModeConfig} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}PostPuppetMaintenanceModeConfig} input_values: {get_param: input_values} - ControllerPostPuppetRestart: - type: OS::TripleO::Tasks::ControllerPostPuppetRestart - depends_on: ControllerPostPuppetMaintenanceModeDeployment + {{role.name}}PostPuppetRestart: + type: OS::TripleO::Tasks::{{role.name}}PostPuppetRestart + depends_on: {{role.name}}PostPuppetMaintenanceModeDeployment properties: - servers: {get_param: servers} + servers: {get_param: [servers, {{role.name}}]} input_values: {get_param: input_values} +{%- endif -%} +{% endfor %} + diff --git a/extraconfig/tasks/post_puppet_pacemaker_restart.yaml b/extraconfig/tasks/post_puppet_pacemaker_restart.yaml index 475a6688..203ca1f8 100644 --- a/extraconfig/tasks/post_puppet_pacemaker_restart.yaml +++ b/extraconfig/tasks/post_puppet_pacemaker_restart.yaml @@ -1,4 +1,4 @@ -heat_template_version: ocata +heat_template_version: pike description: 'Post-Puppet restart config for Pacemaker deployments' parameters: @@ -23,6 +23,6 @@ resources: ControllerPostPuppetRestartDeployment: type: OS::Heat::SoftwareDeployments properties: - servers: {get_param: servers} + servers: {get_param: servers} config: {get_resource: ControllerPostPuppetRestartConfig} input_values: {get_param: input_values} diff --git a/extraconfig/tasks/pre_puppet_pacemaker.yaml b/extraconfig/tasks/pre_puppet_pacemaker.yaml index aa7514f9..98b37be7 100644 --- a/extraconfig/tasks/pre_puppet_pacemaker.yaml +++ b/extraconfig/tasks/pre_puppet_pacemaker.yaml @@ -1,4 +1,4 @@ -heat_template_version: ocata +heat_template_version: pike description: 'Pre-Puppet Config for Pacemaker deployments' parameters: @@ -20,6 +20,6 @@ resources: ControllerPrePuppetMaintenanceModeDeployment: type: OS::Heat::SoftwareDeployments properties: - servers: {get_param: servers} + servers: {get_param: servers} config: {get_resource: ControllerPrePuppetMaintenanceModeConfig} input_values: {get_param: input_values} diff --git a/extraconfig/tasks/run_puppet.sh b/extraconfig/tasks/run_puppet.sh index b7771e33..e3f6c493 100755 --- a/extraconfig/tasks/run_puppet.sh +++ b/extraconfig/tasks/run_puppet.sh @@ -10,7 +10,10 @@ function run_puppet { export FACTER_deploy_config_name="${role}Deployment_Step${step}" if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then set +e - puppet apply --detailed-exitcodes "${manifest}" + puppet apply --detailed-exitcodes \ + --modulepath \ + /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \ + "${manifest}" rc=$? echo "puppet apply exited with exit code $rc" else diff --git a/extraconfig/tasks/ssh/host_public_key.yaml b/extraconfig/tasks/ssh/host_public_key.yaml new file mode 100644 index 00000000..e4ba0cc4 --- /dev/null +++ b/extraconfig/tasks/ssh/host_public_key.yaml @@ -0,0 +1,42 @@ +heat_template_version: pike + +description: > + This is a template which will fetch the ssh host public key. + +parameters: + server: + description: ID of the node to apply this config to + type: string + +resources: + SshHostPubKeyConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + outputs: + - name: rsa + - name: ecdsa + - name: ed25519 + config: | + #!/bin/sh -x + test -e '/etc/ssh/ssh_host_rsa_key.pub' && cat /etc/ssh/ssh_host_rsa_key.pub > $heat_outputs_path.rsa + test -e '/etc/ssh/ssh_host_ecdsa_key.pub' && cat /etc/ssh/ssh_host_ecdsa_key.pub > $heat_outputs_path.ecdsa + test -e '/etc/ssh/ssh_host_ed25519_key.pub' && cat /etc/ssh/ssh_host_ed25519_key.pub > $heat_outputs_path.ed25519 + + SshHostPubKeyDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: SshHostPubKeyConfig} + server: {get_param: server} + + +outputs: + ecdsa: + description: Host ssh public key (ecdsa) + value: {get_attr: [SshHostPubKeyDeployment, ecdsa]} + rsa: + description: Host ssh public key (rsa) + value: {get_attr: [SshHostPubKeyDeployment, rsa]} + ed25519: + description: Host ssh public key (ed25519) + value: {get_attr: [SshHostPubKeyDeployment, ed25519]} diff --git a/extraconfig/tasks/ssh/known_hosts_config.yaml b/extraconfig/tasks/ssh/known_hosts_config.yaml new file mode 100644 index 00000000..50bde653 --- /dev/null +++ b/extraconfig/tasks/ssh/known_hosts_config.yaml @@ -0,0 +1,36 @@ +heat_template_version: pike +description: 'SSH Known Hosts Config' + +parameters: + known_hosts: + type: string + +resources: + + SSHKnownHostsConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: known_hosts + default: {get_param: known_hosts} + config: | + #!/bin/bash + set -eux + set -o pipefail + + echo "Creating ssh known hosts file" + + if [ ! -z "${known_hosts}" ]; then + echo "${known_hosts}" + echo -ne "${known_hosts}" > /etc/ssh/ssh_known_hosts + chmod 0644 /etc/ssh/ssh_known_hosts + else + rm -f /etc/ssh/ssh_known_hosts + echo "No ssh known hosts" + fi + +outputs: + OS::stack_id: + description: The SSHKnownHostsConfig resource. + value: {get_resource: SSHKnownHostsConfig}
\ No newline at end of file diff --git a/extraconfig/tasks/swift-ring-deploy.yaml b/extraconfig/tasks/swift-ring-deploy.yaml deleted file mode 100644 index d17f78ae..00000000 --- a/extraconfig/tasks/swift-ring-deploy.yaml +++ /dev/null @@ -1,31 +0,0 @@ -heat_template_version: ocata - -parameters: - servers: - type: json - SwiftRingGetTempurl: - default: '' - description: A temporary Swift URL to download rings from. - type: string - -resources: - SwiftRingDeployConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - inputs: - - name: swift_ring_get_tempurl - config: | - #!/bin/sh - pushd / - curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true - popd - - SwiftRingDeploy: - type: OS::Heat::SoftwareDeployments - properties: - name: SwiftRingDeploy - config: {get_resource: SwiftRingDeployConfig} - servers: {get_param: servers} - input_values: - swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl} diff --git a/extraconfig/tasks/swift-ring-update.yaml b/extraconfig/tasks/swift-ring-update.yaml deleted file mode 100644 index 440c6883..00000000 --- a/extraconfig/tasks/swift-ring-update.yaml +++ /dev/null @@ -1,42 +0,0 @@ -heat_template_version: ocata - -parameters: - servers: - type: json - SwiftRingPutTempurl: - default: '' - description: A temporary Swift URL to upload rings to. - type: string - -resources: - SwiftRingUpdateConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - inputs: - - name: swift_ring_put_tempurl - config: | - #!/bin/sh - TMP_DATA=$(mktemp -d) - function cleanup { - rm -Rf "$TMP_DATA" - } - trap cleanup EXIT - # sanity check in case rings are not consistent within cluster - swift-recon --md5 | grep -q "doesn't match" && exit 1 - pushd ${TMP_DATA} - tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/* - resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz` - popd - if [ "$resp" != "201" ]; then - exit 1 - fi - - SwiftRingUpdate: - type: OS::Heat::SoftwareDeployments - properties: - name: SwiftRingUpdate - config: {get_resource: SwiftRingUpdateConfig} - servers: {get_param: servers} - input_values: - swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl} diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh index c2565410..1114897f 100644 --- a/extraconfig/tasks/tripleo_upgrade_node.sh +++ b/extraconfig/tasks/tripleo_upgrade_node.sh @@ -28,37 +28,44 @@ SCRIPT_NAME=$(basename $0) $(declare -f log_debug) $(declare -f manage_systemd_service) $(declare -f systemctl_swift) +$(declare -f special_case_ovs_upgrade_if_needed) # pin nova messaging +-1 for the nova-compute service if [[ -n \$NOVA_COMPUTE ]]; then crudini --set /etc/nova/nova.conf upgrade_levels compute auto fi -$(declare -f special_case_ovs_upgrade_if_needed) special_case_ovs_upgrade_if_needed -yum -y install python-zaqarclient # needed for os-collect-config if [[ -n \$SWIFT_STORAGE ]]; then systemctl_swift stop fi + yum -y update + if [[ -n \$SWIFT_STORAGE ]]; then systemctl_swift start fi # Due to bug#1640177 we need to restart compute agent if [[ -n \$NOVA_COMPUTE ]]; then - echo "Restarting openstack ceilometer agent compute" + log_debug "Restarting openstack ceilometer agent compute" systemctl restart openstack-ceilometer-compute + yum install -y openstack-nova-migration fi # Apply puppet manifest to converge just right after the ${ROLE} upgrade $(declare -f run_puppet) for step in 1 2 3 4 5 6; do + log_debug "Running puppet step \$step for ${ROLE}" if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then - echo "Puppet failure at step \${step}" + log_debug "Puppet failure at step \${step}" exit 1 fi + log_debug "Completed puppet step \$step" done + +log_debug "TripleO upgrade run completed." + ENDOFCAT # ensure the permissions are OK diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index 6bf415b2..cb9cc5b1 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -38,39 +38,48 @@ if [[ -a "$timestamp_file" ]]; then fi touch "$timestamp_file" -command_arguments=${command_arguments:-} - -list_updates=$(yum list updates) - -if [[ "$list_updates" == "" ]]; then - echo "No packages require updating" - exit 0 -fi - pacemaker_status="" -if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then +# We include word boundaries in order to not match pacemaker_remote +if hiera -c /etc/puppet/hiera.yaml service_names | grep -q '\bpacemaker\b'; then pacemaker_status=$(systemctl is-active pacemaker) fi -# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455 -# and https://bugs.launchpad.net/tripleo/+bug/1634851 +# (NB: when backporting this s/pacemaker_short_bootstrap_node_name/bootstrap_nodeid) +# This runs before the yum_update so we are guaranteed to run it even in the absence +# of packages to update (the check for -z "$update_identifier" guarantees that this +# is run only on overcloud stack update -i) if [[ "$pacemaker_status" == "active" && \ - "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]] ; then - if pcs resource show rabbitmq | grep -E "start.*timeout=100"; then - pcs resource update rabbitmq op start timeout=200s - fi - if pcs resource show rabbitmq | grep -E "stop.*timeout=90"; then - pcs resource update rabbitmq op stop timeout=200s - fi - if pcs resource show redis | grep -E "start.*timeout=120"; then - pcs resource update redis op start timeout=200s - fi - if pcs resource show redis | grep -E "stop.*timeout=120"; then - pcs resource update redis op stop timeout=200s + "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name)" == "$(facter hostname)" ]] ; then \ + # OCF scripts don't cope with -eu + echo "Verifying if we need to fix up any IPv6 VIPs" + set +eu + fixup_wrong_ipv6_vip + ret=$? + set -eu + if [ $ret -ne 0 ]; then + echo "Fixing up IPv6 VIPs failed. Stopping here. (See https://bugs.launchpad.net/tripleo/+bug/1686357 for more info)" + exit 1 fi fi -# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 +command_arguments=${command_arguments:-} + +# yum check-update exits 100 if updates are available +set +e +check_update=$(yum check-update 2>&1) +check_update_exit=$? +set -e + +if [[ "$check_update_exit" == "1" ]]; then + echo "Failed to check for package updates" + echo "$check_update" + exit 1 +elif [[ "$check_update_exit" != "100" ]]; then + echo "No packages require updating" + exit 0 +fi + +# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714 special_case_ovs_upgrade_if_needed if [[ "$pacemaker_status" == "active" ]] ; then @@ -100,17 +109,6 @@ return_code=$? echo "$result" echo "yum return code: $return_code" -# Writes any changes caused by alterations to os-net-config and bounces the -# interfaces *before* restarting the cluster. -os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes -RETVAL=$? -if [[ $RETVAL == 2 ]]; then - echo "os-net-config: interface configuration files updated successfully" -elif [[ $RETVAL != 0 ]]; then - echo "ERROR: os-net-config configuration failed" - exit $RETVAL -fi - if [[ "$pacemaker_status" == "active" ]] ; then echo "Starting cluster node" pcs cluster start @@ -127,15 +125,19 @@ if [[ "$pacemaker_status" == "active" ]] ; then fi done - tstart=$(date +%s) - while ! clustercheck; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > galera_sync_timeout )) ; then - echo "ERROR galera sync timed out" - exit 1 - fi - done + RETVAL=$( pcs resource show galera-master | grep wsrep_cluster_address | grep -q `crm_node -n` ; echo $? ) + + if [[ $RETVAL -eq 0 && -e /etc/sysconfig/clustercheck ]]; then + tstart=$(date +%s) + while ! clustercheck; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > galera_sync_timeout )) ; then + echo "ERROR galera sync timed out" + exit 1 + fi + done + fi echo "Waiting for pacemaker cluster to settle" if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then @@ -146,6 +148,7 @@ if [[ "$pacemaker_status" == "active" ]] ; then pcs status fi -echo "Finished yum_update.sh on server $deploy_server_id at `date`" + +echo "Finished yum_update.sh on server $deploy_server_id at `date` with return code: $return_code" exit $return_code diff --git a/extraconfig/tasks/yum_update.yaml b/extraconfig/tasks/yum_update.yaml index 8cff838e..9daa8353 100644 --- a/extraconfig/tasks/yum_update.yaml +++ b/extraconfig/tasks/yum_update.yaml @@ -1,4 +1,4 @@ -heat_template_version: ocata +heat_template_version: pike description: > Software-config for performing package updates using yum diff --git a/extraconfig/tasks/yum_update_noop.yaml b/extraconfig/tasks/yum_update_noop.yaml index 9400c1d2..2ede5bea 100644 --- a/extraconfig/tasks/yum_update_noop.yaml +++ b/extraconfig/tasks/yum_update_noop.yaml @@ -1,4 +1,4 @@ -heat_template_version: ocata +heat_template_version: pike description: 'No-op yum update task' resources: |