diff options
Diffstat (limited to 'extraconfig')
27 files changed, 310 insertions, 1468 deletions
diff --git a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml index c388358a..24557517 100644 --- a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml +++ b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml @@ -21,3 +21,7 @@ parameter_defaults: rhel_reg_type: "" rhel_reg_method: "" rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms" + rhel_reg_http_proxy_host: "" + rhel_reg_http_proxy_port: "" + rhel_reg_http_proxy_username: "" + rhel_reg_http_proxy_password: "" diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml index fdf2e957..e8316c53 100644 --- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml +++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml @@ -45,6 +45,14 @@ parameters: type: string rhel_reg_sat_repo: type: string + rhel_reg_http_proxy_host: + type: string + rhel_reg_http_proxy_port: + type: string + rhel_reg_http_proxy_username: + type: string + rhel_reg_http_proxy_password: + type: string resources: @@ -71,6 +79,10 @@ resources: - name: REG_TYPE - name: REG_METHOD - name: REG_SAT_REPO + - name: REG_HTTP_PROXY_HOST + - name: REG_HTTP_PROXY_PORT + - name: REG_HTTP_PROXY_USERNAME + - name: REG_HTTP_PROXY_PASSWORD config: {get_file: scripts/rhel-registration} RHELRegistrationDeployment: @@ -99,6 +111,10 @@ resources: REG_TYPE: {get_param: rhel_reg_type} REG_METHOD: {get_param: rhel_reg_method} REG_SAT_REPO: {get_param: rhel_reg_sat_repo} + REG_HTTP_PROXY_HOST: {get_param: rhel_reg_http_proxy_host} + REG_HTTP_PROXY_PORT: {get_param: rhel_reg_http_proxy_port} + REG_HTTP_PROXY_USERNAME: {get_param: rhel_reg_http_proxy_username} + REG_HTTP_PROXY_PASSWORD: {get_param: rhel_reg_http_proxy_password} RHELUnregistration: type: OS::Heat::SoftwareConfig diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration index 2650a967..0d0fa3f1 100644 --- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration +++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration @@ -11,12 +11,20 @@ if [ -e $OK ] ; then exit 0 fi -retryCount=0 +retry_max_count=10 opts= +config_opts= attach_opts= sat5_opts= repos="repos --enable rhel-7-server-rpms" satellite_repo=${REG_SAT_REPO} +proxy_host= +proxy_port= +proxy_url= +proxy_username= +proxy_password= + +# process variables.. if [ -n "${REG_AUTO_ATTACH:-}" ]; then opts="$opts --auto-attach" @@ -97,28 +105,93 @@ if [ -n "${REG_TYPE:-}" ]; then opts="$opts --type=$REG_TYPE" fi -function retry() { - if [[ $retryCount < 3 ]]; then - $@ - if ! [[ $? == 0 ]]; then - retryCount=$(echo $retryCount + 1 | bc) - echo "WARN: Failed to connect when running '$@', retrying..." - retry $@ +# Proxy settings (host and port) +if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then + proxy_host="${REG_HTTP_PROXY_HOST}" +fi + +if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then + proxy_port="${REG_HTTP_PROXY_PORT}" +fi + +# Proxy settings (user and password) +if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then + proxy_username="${REG_HTTP_PROXY_USERNAME}" +fi + +if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then + proxy_password="${REG_HTTP_PROXY_PASSWORD}" +fi + +# Sanity Checks for proxy host/port/user/password +if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then + if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then + # Good both values are not empty + proxy_url="http://${proxy_host}:${proxy_port}" + config_opts="--server.proxy_hostname=${proxy_host} --server.proxy_port=${proxy_port}" + sat5_opts="${sat5_opts} --proxy_hostname=${proxy_url}" + echo "RHSM Proxy set to: ${proxy_url}" + if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then + if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then + config_opts="${config_opts} --server.proxy_user=${proxy_username} --server.proxy_password=${proxy_password}" + sat5_opts="${sat5_opts} --proxyUser=${proxy_username} --proxyPassword=${proxy_password}" + else + echo "Warning: REG_HTTP_PROXY_PASSWORD cannot be null with non-empty REG_HTTP_PROXY_USERNAME! Skipping..." + proxy_username= ; proxy_password= + fi + else + if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then + echo "Warning: REG_HTTP_PROXY_USERNAME cannot be null with non-empty REG_HTTP_PROXY_PASSWORD! Skipping..." + proxy_username= ; proxy_password= + fi + fi else - retryCount=0 + echo "Warning: REG_HTTP_PROXY_PORT cannot be null with non-empty REG_HTTP_PROXY_HOST! Skipping..." + proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password= fi - else - echo "ERROR: Failed to connect after 3 attempts when running '$@'" - exit 1 - fi +else + if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then + echo "Warning: REG_HTTP_PROXY_HOST cannot be null with non-empty REG_HTTP_PROXY_PORT! Skipping..." + proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password= + fi +fi + +function retry() { + # Inhibit -e since we want to retry without exiting.. + set +e + # Retry delay (seconds) + retry_delay=2.0 + retry_count=0 + mycli="$@" + while [ $retry_count -lt ${retry_max_count} ] + do + echo "INFO: Sleeping ${retry_delay} ..." + sleep ${retry_delay} + echo "INFO: Executing '${mycli}' ..." + ${mycli} + if [ $? -eq 0 ]; then + echo "INFO: Ran '${mycli}' successfully, not retrying..." + break + else + echo "WARN: Failed to connect when running '${mycli}', retrying (attempt #$retry_count )..." + retry_count=$(echo $retry_count + 1 | bc) + fi + done + + if [ $retry_count -ge ${retry_max_count} ]; then + echo "ERROR: Failed to connect after ${retry_max_count} attempts when running '${mycli}'" + exit 1 + fi + # Re-enable -e when exiting retry() + set -e } function detect_satellite_version { ping_api=$REG_SAT_URL/katello/api/ping - if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then + if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then echo Satellite 6 detected at $REG_SAT_URL satellite_version=6 - elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then + elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then echo Satellite 5 detected at $REG_SAT_URL satellite_version=5 else @@ -127,20 +200,41 @@ function detect_satellite_version { fi } +if [ "x${proxy_url}" != "x" ];then + # Config subscription-manager for proxy + subscription-manager config ${config_opts} + + # Config yum for proxy.. + sed -i -e '/^proxy=/d' /etc/yum.conf + echo "proxy=${proxy_url}" >> /etc/yum.conf + + # Handle optional username/password + if [ -n "${proxy_username}" ]; then + sed -i -e '/^proxy_username=/d' /etc/yum.conf + echo "proxy_username=${proxy_username}" >> /etc/yum.conf + fi + + if [ -n "${proxy_password}" ]; then + sed -i -e '/^proxy_password=/d' /etc/yum.conf + echo "proxy_password=${proxy_password}" >> /etc/yum.conf + fi + +fi + case "${REG_METHOD:-}" in portal) retry subscription-manager register $opts if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then retry subscription-manager attach $attach_opts fi - retry subscription-manager repos --disable '*' + retry subscription-manager repos --disable='*' retry subscription-manager $repos ;; satellite) detect_satellite_version if [ "$satellite_version" = "6" ]; then repos="$repos --enable ${satellite_repo}" - curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm" + curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm" rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true retry subscription-manager register $opts retry subscription-manager $repos @@ -149,7 +243,7 @@ case "${REG_METHOD:-}" in retry subscription-manager repos --disable ${satellite_repo} else pushd /usr/share/rhn/ - curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT + curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT popd retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts fi diff --git a/extraconfig/tasks/aodh_data_migration.sh b/extraconfig/tasks/aodh_data_migration.sh deleted file mode 100644 index d4c29673..00000000 --- a/extraconfig/tasks/aodh_data_migration.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# -# This delivers the aodh data migration script to be invoked as part of the tripleo -# major upgrade workflow to migrate all the alarm data from mongodb to mysql. -# This needs to run post controller node upgrades so new aodh mysql db configured and -# running. -# -set -eu - -#Get existing mongodb connection -MONGO_DB_CONNECTION="$(crudini --get /etc/ceilometer/ceilometer.conf database connection)" - -# Get the aodh database string from hiera data -MYSQL_DB_CONNECTION="$(crudini --get /etc/aodh/aodh.conf database connection)" - -#Run migration -/usr/bin/aodh-data-migration --nosql-conn $MONGO_DB_CONNECTION --sql-conn $MYSQL_DB_CONNECTION - - diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh deleted file mode 100644 index 64c4457e..00000000 --- a/extraconfig/tasks/major_upgrade_block_storage.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# This runs an upgrade of Cinder Block Storage nodes. -# -set -eu - -# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 -special_case_ovs_upgrade_if_needed - -yum -y install python-zaqarclient # needed for os-collect-config -yum -y -q update diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml deleted file mode 100644 index cf5d7a84..00000000 --- a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml +++ /dev/null @@ -1,62 +0,0 @@ -heat_template_version: ocata - -description: > - Software-config for ceilometer configuration under httpd during upgrades - -parameters: - servers: - type: json - input_values: - type: json - description: input values for the software deployments -resources: - CeilometerWsgiMitakaNewtonPreUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: puppet - config: - get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp - - CeilometerWsgiMitakaNewtonUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - "#!/bin/bash\n\nset -e\n\n" - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - "disable_standalone_ceilometer_api\n\n" - - CeilometerWsgiMitakaNewtonPostUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: | - #!/bin/bash - set -e - /usr/bin/systemctl reload httpd - - CeilometerWsgiMitakaNewtonPreUpgradeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment - servers: {get_param: [servers, Controller]} - config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig} - - CeilometerWsgiMitakaNewtonUpgradeConfigDeployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment - properties: - name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment - servers: {get_param: [servers, Controller]} - config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig} - - CeilometerWsgiMitakaNewtonPostUpgradeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment - properties: - name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment - servers: {get_param: [servers, Controller]} - config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig} diff --git a/extraconfig/tasks/major_upgrade_ceph_mon.sh b/extraconfig/tasks/major_upgrade_ceph_mon.sh deleted file mode 100755 index e0d160f1..00000000 --- a/extraconfig/tasks/major_upgrade_ceph_mon.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash -set -eu -set -o pipefail - -echo INFO: starting $(basename "$0") - -# Exit if not running -if ! pidof ceph-mon &> /dev/null; then - echo INFO: ceph-mon is not running, skipping - exit 0 -fi - -# Exit if not Hammer -INSTALLED_VERSION=$(ceph --version | awk '{print $3}') -if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then - echo INFO: version of Ceph installed is not 0.94, skipping - exit 0 -fi - -CEPH_STATUS=$(ceph health | awk '{print $1}') -if [ ${CEPH_STATUS} = HEALTH_ERR ]; then - echo ERROR: Ceph cluster status is HEALTH_ERR, cannot be upgraded - exit 1 -fi - -# Useful when upgrading with OSDs num < replica size -if [[ ${ignore_ceph_upgrade_warnings:-False} != [Tt]rue ]]; then - timeout 300 bash -c "while [ ${CEPH_STATUS} != HEALTH_OK ]; do - echo WARNING: Waiting for Ceph cluster status to go HEALTH_OK; - sleep 30; - CEPH_STATUS=$(ceph health | awk '{print $1}') - done" -fi - -MON_PID=$(pidof ceph-mon) -MON_ID=$(hostname -s) - -# Stop daemon using Hammer sysvinit script -service ceph stop mon.${MON_ID} - -# Ensure it's stopped -timeout 60 bash -c "while kill -0 ${MON_PID} 2> /dev/null; do - sleep 2; -done" - -# Update to Jewel -yum -y -q update ceph-mon ceph - -# Restart/Exit if not on Jewel, only in that case we need the changes -UPDATED_VERSION=$(ceph --version | awk '{print $3}') -if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then - echo WARNING: Ceph was not upgraded, restarting daemons - service ceph start mon.${MON_ID} -elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then - # RPM could own some of these but we can't take risks on the pre-existing files - for d in /var/lib/ceph/mon /var/log/ceph /var/run/ceph /etc/ceph; do - chown -L -R ceph:ceph $d || echo WARNING: chown of $d failed - done - - # Replay udev events with newer rules - udevadm trigger - - # Enable systemd unit - systemctl enable ceph-mon.target - systemctl enable ceph-mon@${MON_ID} - systemctl start ceph-mon@${MON_ID} - - # Wait for daemon to be back in the quorum - timeout 300 bash -c "until (ceph quorum_status | jq .quorum_names | grep -sq ${MON_ID}); do - echo WARNING: Waiting for mon.${MON_ID} to re-join quorum; - sleep 10; - done" - - # if tunables become legacy, cluster status will be HEALTH_WARN causing - # upgrade to fail on following node - ceph osd crush tunables default - - echo INFO: Ceph was upgraded to Jewel -else - echo ERROR: Ceph was upgraded to an unknown release, daemon is stopped, need manual intervention - exit 1 -fi diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh deleted file mode 100644 index a745e723..00000000 --- a/extraconfig/tasks/major_upgrade_ceph_storage.sh +++ /dev/null @@ -1,106 +0,0 @@ -#!/bin/bash -# -# This delivers the ceph-storage upgrade script to be invoked as part of the tripleo -# major upgrade workflow. -# -set -eu -set -o pipefail - -UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh - -declare -f special_case_ovs_upgrade_if_needed > $UPGRADE_SCRIPT -# use >> here so we don't lose the declaration we added above -cat >> $UPGRADE_SCRIPT << 'ENDOFCAT' -#!/bin/bash -### DO NOT MODIFY THIS FILE -### This file is automatically delivered to the ceph-storage nodes as part of the -### tripleo upgrades workflow -set -eu - -echo INFO: starting $(basename "$0") - -# Exit if not running -if ! pidof ceph-osd &> /dev/null; then - echo INFO: ceph-osd is not running, skipping - exit 0 -fi - -# Exit if not Hammer -INSTALLED_VERSION=$(ceph --version | awk '{print $3}') -if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then - echo INFO: version of Ceph installed is not 0.94, skipping - exit 0 -fi - -OSD_PIDS=$(pidof ceph-osd) -OSD_IDS=$(ls /var/lib/ceph/osd | awk 'BEGIN { FS = "-" } ; { print $2 }') - -# "so that mirrors aren't rebalanced as if the OSD died" - gfidente / leseb -ceph osd set noout -ceph osd set norebalance -ceph osd set nodeep-scrub -ceph osd set noscrub - -# Stop daemon using Hammer sysvinit script -for OSD_ID in $OSD_IDS; do - service ceph stop osd.${OSD_ID} -done - -# Nice guy will return non-0 only when all failed -timeout 60 bash -c "while kill -0 ${OSD_PIDS} 2> /dev/null; do - sleep 2; -done" - -special_case_ovs_upgrade_if_needed - -# Update (Ceph to Jewel) -yum -y install python-zaqarclient # needed for os-collect-config -yum -y update - -# Restart/Exit if not on Jewel, only in that case we need the changes -UPDATED_VERSION=$(ceph --version | awk '{print $3}') -if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then - echo WARNING: Ceph was not upgraded, restarting daemon - for OSD_ID in $OSD_IDS; do - service ceph start osd.${OSD_ID} - done -elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then - # RPM could own some of these but we can't take risks on the pre-existing files - for d in /var/lib/ceph/osd /var/log/ceph /var/run/ceph /etc/ceph; do - chown -L -R ceph:ceph $d || echo WARNING: chown of $d failed - done - - # Replay udev events with newer rules - udevadm trigger && udevadm settle - - # If on ext4, we need to enforce lower values for name and namespace len - # or ceph-osd will refuse to start, see: http://tracker.ceph.com/issues/16187 - for OSD_ID in $OSD_IDS; do - OSD_FS=$(df -l --output=fstype /var/lib/ceph/osd/ceph-${OSD_ID} | tail -n +2) - if [ ${OSD_FS} = ext4 ]; then - crudini --set /etc/ceph/ceph.conf global osd_max_object_name_len 256 - crudini --set /etc/ceph/ceph.conf global osd_max_object_namespace_len 64 - fi - done - - # Enable systemd unit - systemctl enable ceph-osd.target - for OSD_ID in $OSD_IDS; do - systemctl enable ceph-osd@${OSD_ID} - systemctl start ceph-osd@${OSD_ID} - done - - echo INFO: Ceph was upgraded to Jewel -else - echo ERROR: Ceph was upgraded to an unknown release, daemon is stopped, need manual intervention - exit 1 -fi - -ceph osd unset noout -ceph osd unset norebalance -ceph osd unset nodeep-scrub -ceph osd unset noscrub -ENDOFCAT - -# ensure the permissions are OK -chmod 0755 $UPGRADE_SCRIPT diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh deleted file mode 100755 index 8bdff5e7..00000000 --- a/extraconfig/tasks/major_upgrade_check.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash - -set -eu - -check_cluster() -{ - if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then - echo_error "ERROR: upgrade cannot start with some cluster nodes being offline" - exit 1 - fi -} - -check_pcsd() -{ - if pcs status 2>&1 | grep -E 'Offline'; then - echo_error "ERROR: upgrade cannot start with some pcsd daemon offline" - exit 1 - fi -} - -mysql_need_update() -{ - # Shall we upgrade mysql data directory during the stack upgrade? - if [ "$mariadb_do_major_upgrade" = "auto" ]; then - ret=$(is_mysql_upgrade_needed) - if [ $ret = "1" ]; then - DO_MYSQL_UPGRADE=1 - else - DO_MYSQL_UPGRADE=0 - fi - echo "mysql upgrade required: $DO_MYSQL_UPGRADE" - elif [ "$mariadb_do_major_upgrade" = "no" ]; then - DO_MYSQL_UPGRADE=0 - else - DO_MYSQL_UPGRADE=1 - fi -} - -check_disk_for_mysql_dump() -{ - # Where to backup current database if mysql need to be upgraded - MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp - MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup - # Spare disk ratio for extra safety - MYSQL_BACKUP_SIZE_RATIO=1.2 - - mysql_need_update - - if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then - if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - - if [ -d "$MYSQL_BACKUP_DIR" ]; then - echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously" - exit 1 - fi - mkdir "$MYSQL_BACKUP_DIR" - if [ $? -ne 0 ]; then - echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR" - exit 1 - fi - - # the /root/.my.cnf is needed because we set the mysql root - # password from liberty onwards - backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" - # While not ideal, this step allows us to calculate exactly how much space the dump - # will need. Our main goal here is avoiding any chance of corruption due to disk space - # exhaustion - backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c) - database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }') - free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1) - - # we need at least space for a new mysql database + dump of the existing one, - # times a small factor for additional safety room - # note: bash doesn't do floating point math or floats in if statements, - # so use python to apply the ratio and cast it back to integer - required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))") - if [ $required_space -ge $free_space ]; then - echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)" - exit 1 - fi - fi - fi -} - -check_python_rpm() -{ - # If for some reason rpm-python are missing we want to error out early enough - if ! rpm -q rpm-python &> /dev/null; then - echo_error "ERROR: upgrade cannot start without rpm-python installed" - exit 1 - fi -} - -check_clean_cluster() -{ - if pcs status | grep -q Stopped:; then - echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running." - exit 1 - fi -} - -check_galera_root_password() -{ - # BZ: 1357112 - if [ ! -e /root/.my.cnf ]; then - echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update." - exit 1 - fi -} diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh deleted file mode 100644 index 7a3e1073..00000000 --- a/extraconfig/tasks/major_upgrade_compute.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# -# This delivers the compute upgrade script to be invoked as part of the tripleo -# major upgrade workflow. -# -set -eu - -UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh - -cat > $UPGRADE_SCRIPT << ENDOFCAT -### DO NOT MODIFY THIS FILE -### This file is automatically delivered to the compute nodes as part of the -### tripleo upgrades workflow - -set -eu - -# pin nova to kilo (messaging +-1) for the nova-compute service - -crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute - -$(declare -f special_case_ovs_upgrade_if_needed) -special_case_ovs_upgrade_if_needed - -yum -y install python-zaqarclient # needed for os-collect-config -yum -y update - -# Due to bug#1640177 we need to restart compute agent -echo "Restarting openstack ceilometer agent compute" -systemctl restart openstack-ceilometer-compute - -ENDOFCAT - -# ensure the permissions are OK -chmod 0755 $UPGRADE_SCRIPT - diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh deleted file mode 100755 index 080831ab..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -set -eu - -check_cluster -check_pcsd -if [[ -n $(is_bootstrap_node) ]]; then - check_clean_cluster -fi -check_python_rpm -check_galera_root_password -check_disk_for_mysql_dump - -# We want to disable fencing during the cluster --stop as it might fence -# nodes where a service fails to stop, which could be fatal during an upgrade -# procedure. So we remember the stonith state. If it was enabled we reenable it -# at the end of this script -if [[ -n $(is_bootstrap_node) ]]; then - STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }') - # We create this empty file if stonith was set to true so we can reenable stonith in step2 - rm -f /var/tmp/stonith-true - if [ $STONITH_STATE == "true" ]; then - touch /var/tmp/stonith-true - fi - pcs property set stonith-enabled=false -fi - -# Migrate to HA NG and fix up rabbitmq queues -# We fix up the rabbitmq ha queues after the migration because it will -# restart the rabbitmq resource. Doing it after the migration means no other -# services will be restart as there are no other constraints -if [[ -n $(is_bootstrap_node) ]]; then - migrate_full_to_ng_ha - rabbitmq_newton_ocata_upgrade -fi - diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh deleted file mode 100755 index 6bfe1239..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh +++ /dev/null @@ -1,176 +0,0 @@ -#!/bin/bash - -set -eu - -cluster_sync_timeout=1800 - -# After migrating the cluster to HA-NG the services not under pacemaker's control -# are still up and running. We need to stop them explicitely otherwise during the yum -# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which -# is going to take a long time because rabbit is down. By having the service stopped -# systemctl try-restart is a noop - -for service in $(services_to_migrate); do - manage_systemd_service stop "${service%%-clone}" - # So the reason for not reusing check_resource_systemd is that - # I have observed systemctl is-active returning unknown with at least - # one service that was stopped (See LP 1627254) - timeout=600 - tstart=$(date +%s) - tend=$(( $tstart + $timeout )) - check_interval=3 - while (( $(date +%s) < $tend )); do - if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then - echo "$service still active, sleeping $check_interval seconds." - sleep $check_interval - else - # we do not care if it is inactive, unknown or failed as long as it is - # not running - break - fi - - done -done - -# In case the mysql package is updated, the database on disk must be -# upgraded as well. This typically needs to happen during major -# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...) -# -# Because in-place upgrades are not supported across 2+ major versions -# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle -# https://bugzilla.redhat.com/show_bug.cgi?id=1341968 -# -# The default is to determine automatically if upgrade is needed based -# on mysql package versionning, but this can be overriden manually -# to support specific upgrade scenario - -# Calling this function will set the DO_MYSQL_UPGRADE variable which is used -# later -mysql_need_update - -if [[ -n $(is_bootstrap_node) ]]; then - if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql" - cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR" - fi - - pcs resource disable redis - check_resource redis stopped 600 - pcs resource disable rabbitmq - check_resource rabbitmq stopped 600 - pcs resource disable galera - check_resource galera stopped 600 - pcs resource disable openstack-cinder-volume - check_resource openstack-cinder-volume stopped 600 - # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address: - # https://bugzilla.redhat.com/show_bug.cgi?id=1330688 - for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do - pcs resource disable $vip - check_resource $vip stopped 60 - done - pcs cluster stop --all -fi - - -# Swift isn't controlled by pacemaker -systemctl_swift stop - -tstart=$(date +%s) -while systemctl is-active pacemaker; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > cluster_sync_timeout )) ; then - echo_error "ERROR: cluster shutdown timed out" - exit 1 - fi -done - -# The reason we do an sql dump *and* we move the old dir out of -# the way is because it gives us an extra level of safety in case -# something goes wrong during the upgrade. Once the restore is -# successful we go ahead and remove it. If the directory exists -# we bail out as it means the upgrade process had issues in the last -# run. -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then - echo_error "ERROR: mysql backup dir already exist" - exit 1 - fi - mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR -fi - -# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 -special_case_ovs_upgrade_if_needed - -yum -y install python-zaqarclient # needed for os-collect-config -yum -y -q update - -# We need to ensure at least those two configuration settings, otherwise -# mariadb 10.1+ won't activate galera replication. -# wsrep_cluster_address must only be set though, its value does not -# matter because it's overriden by the galera resource agent. -cat >> /etc/my.cnf.d/galera.cnf <<EOF -[mysqld] -wsrep_on = ON -wsrep_cluster_address = gcomm://localhost -EOF - -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - # Scripts run via heat have no HOME variable set and this confuses - # mysqladmin - export HOME=/root - - mkdir /var/lib/mysql || /bin/true - chown mysql:mysql /var/lib/mysql - chmod 0755 /var/lib/mysql - restorecon -R /var/lib/mysql/ - mysql_install_db --datadir=/var/lib/mysql --user=mysql - chown -R mysql:mysql /var/lib/mysql/ - - if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then - mysqld_safe --wsrep-new-cluster & - # We have a populated /root/.my.cnf with root/password here so - # we need to temporarily rename it because the newly created - # db is empty and no root password is set - mv /root/.my.cnf /root/.my.cnf.temporary - timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done' - mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql" - mv /root/.my.cnf.temporary /root/.my.cnf - mysqladmin -u root shutdown - # The import was successful so we may remove the folder - rm -r "$MYSQL_BACKUP_DIR" - fi -fi - -# If we reached here without error we can safely blow away the origin -# mysql dir from every controller - -# TODO: What if the upgrade fails on the bootstrap node, but not on -# this controller. Data may be lost. -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR -fi - -# Let's reset the stonith back to true if it was true, before starting the cluster -if [[ -n $(is_bootstrap_node) ]]; then - if [ -f /var/tmp/stonith-true ]; then - pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true - fi - rm -f /var/tmp/stonith-true -fi - -# Pin messages sent to compute nodes to kilo, these will be upgraded later -crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute" -# https://bugzilla.redhat.com/show_bug.cgi?id=1284047 -# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435 -crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit -# https://bugzilla.redhat.com/show_bug.cgi?id=1284058 -# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists -crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server" -# LP: 1615035, required only for M/N upgrade. -crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager -# LP: 1627450, required only for M/N upgrade -crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler - -crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm - diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh deleted file mode 100755 index a3cbd945..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash - -set -eu - -cluster_form_timeout=600 -cluster_settle_timeout=1800 -galera_sync_timeout=600 - -if [[ -n $(is_bootstrap_node) ]]; then - pcs cluster start --all - - tstart=$(date +%s) - while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > cluster_form_timeout )) ; then - echo_error "ERROR: timed out forming the cluster" - exit 1 - fi - done - - if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then - echo_error "ERROR: timed out waiting for cluster to finish transition" - exit 1 - fi - - for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do - pcs resource enable $vip - check_resource_pacemaker $vip started 60 - done -fi - -start_or_enable_service galera -check_resource galera started 600 -start_or_enable_service redis -check_resource redis started 600 -# We need mongod which is now a systemd service up and running before calling -# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes -# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely -# we should be good. -# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm -systemctl start mongod -check_resource mongod started 600 - -if [[ -n $(is_bootstrap_node) ]]; then - tstart=$(date +%s) - while ! clustercheck; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > galera_sync_timeout )) ; then - echo_error "ERROR galera sync timed out" - exit 1 - fi - done - - # Run all the db syncs - # TODO: check if this can be triggered in puppet and removed from here - ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types - cinder-manage db sync - glance-manage db_sync - heat-manage --config-file /etc/heat/heat.conf db_sync - keystone-manage db_sync - neutron-db-manage upgrade heads - nova-manage db sync - nova-manage api_db sync - nova-manage db online_data_migrations - sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head -fi diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh deleted file mode 100755 index d2cb9553..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -eu - -start_or_enable_service rabbitmq -check_resource rabbitmq started 600 -start_or_enable_service redis -check_resource redis started 600 -start_or_enable_service openstack-cinder-volume -check_resource openstack-cinder-volume started 600 - -# start httpd so keystone is available for gnocchi -# upgrade to run. -systemctl start httpd - -# Swift isn't controled by pacemaker -systemctl_swift start diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh deleted file mode 100755 index fa95f1f8..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -eu - -if [[ -n $(is_bootstrap_node) ]]; then - # run gnocchi upgrade - gnocchi-upgrade -fi diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh deleted file mode 100755 index d569084d..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -set -eu - -# We need to start the systemd services we explicitely stopped at step _1.sh -# FIXME: Should we let puppet during the convergence step do the service enabling or -# should we add it here? -services=$(services_to_migrate) -if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then - services=${services%%openstack-sahara*} -fi -for service in $services; do - manage_systemd_service start "${service%%-clone}" - check_resource_systemd "${service%%-clone}" started 600 -done diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh deleted file mode 100644 index d9d1b4d5..00000000 --- a/extraconfig/tasks/major_upgrade_object_storage.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -# -# This delivers the swift-storage upgrade script to be invoked as part of the tripleo -# major upgrade workflow. -# -set -eu - -UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh - -cat > $UPGRADE_SCRIPT << ENDOFCAT -### DO NOT MODIFY THIS FILE -### This file is automatically delivered to the swift-storage nodes as part of the -### tripleo upgrades workflow - -set -eu - -function systemctl_swift { - action=\$1 - for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \ - openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \ - openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object; do - systemctl \$action \$S - done -} - -$(declare -f special_case_ovs_upgrade_if_needed) -special_case_ovs_upgrade_if_needed - -systemctl_swift stop - -yum -y install python-zaqarclient # needed for os-collect-config -yum -y update - -systemctl_swift start - - - -ENDOFCAT - -# ensure the permissions are OK -chmod 0755 $UPGRADE_SCRIPT - diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml deleted file mode 100644 index b63aafbd..00000000 --- a/extraconfig/tasks/major_upgrade_pacemaker.yaml +++ /dev/null @@ -1,225 +0,0 @@ -heat_template_version: ocata -description: 'Upgrade for Pacemaker deployments' - -parameters: - servers: - type: json - input_values: - type: json - description: input values for the software deployments - - UpgradeLevelNovaCompute: - type: string - description: Nova Compute upgrade level - default: '' - MySqlMajorUpgrade: - type: string - description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade - constraints: - - allowed_values: ['auto', 'yes', 'no'] - default: 'auto' - IgnoreCephUpgradeWarnings: - type: boolean - default: false - description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean - KeepSaharaServicesOnUpgrade: - type: boolean - default: true - description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton - - -resources: - # TODO(jistr): for Mitaka->Newton upgrades and further we can use - # map_merge with input_values instead of feeding params into scripts - # via str_replace on bash snippets - - CephMonUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - ignore_ceph_upgrade_warnings='IGNORE_CEPH_UPGRADE_WARNINGS' - params: - IGNORE_CEPH_UPGRADE_WARNINGS: {get_param: IgnoreCephUpgradeWarnings} - - get_file: major_upgrade_ceph_mon.sh - - CephMonUpgradeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: CephMonUpgradeConfig} - input_values: {get_param: input_values} - update_policy: - batch_create: - max_batch_size: 1 - rolling_update: - max_batch_size: 1 - - ControllerPacemakerUpgradeConfig_Step1: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' - params: - UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - - str_replace: - template: | - #!/bin/bash - mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE' - params: - MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_check.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_1.sh - - ControllerPacemakerUpgradeDeployment_Step1: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: CephMonUpgradeDeployment - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step1} - input_values: {get_param: input_values} - - BlockStorageUpgradeConfig: - type: OS::Heat::SoftwareConfig - depends_on: ControllerPacemakerUpgradeDeployment_Step1 - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_block_storage.sh - - BlockStorageUpgradeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, BlockStorage]} - config: {get_resource: BlockStorageUpgradeConfig} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step2: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' - params: - UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - - str_replace: - template: | - #!/bin/bash - mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE' - params: - MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_check.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_2.sh - - ControllerPacemakerUpgradeDeployment_Step2: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: BlockStorageUpgradeDeployment - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step2} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step3: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_3.sh - - ControllerPacemakerUpgradeDeployment_Step3: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step2 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step3} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step4: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_4.sh - - ControllerPacemakerUpgradeDeployment_Step4: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step3 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step4} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step5: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_5.sh - - ControllerPacemakerUpgradeDeployment_Step5: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step4 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step5} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step6: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE' - params: - KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_6.sh - - ControllerPacemakerUpgradeDeployment_Step6: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step5 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step6} - input_values: {get_param: input_values} diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml deleted file mode 100644 index c308720b..00000000 --- a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml +++ /dev/null @@ -1,96 +0,0 @@ -heat_template_version: ocata -description: 'Upgrade for Pacemaker deployments' - -parameters: - - servers: - type: json - input_values: - type: json - description: input values for the software deployments - - UpgradeInitCommand: - type: string - description: | - Command or script snippet to run on all overcloud nodes to - initialize the upgrade process. E.g. a repository switch. - default: '' - UpgradeLevelNovaCompute: - type: string - description: Nova Compute upgrade level - default: '' - -resources: - - # For the UpgradeInit also rename /etc/resolv.conf.save for +bug/1567004 - - UpgradeInitConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - "#!/bin/bash\n\n" - - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" - - get_param: UpgradeInitCommand - - # TODO(jistr): for Mitaka->Newton upgrades and further we can use - # map_merge with input_values instead of feeding params into scripts - # via str_replace on bash snippets - - # FIXME(shardy) we have hard-coded per-role *ScriptConfig's here - # Would be better to have a common config for all roles - ComputeDeliverUpgradeScriptConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' - params: - UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_compute.sh - - ObjectStorageDeliverUpgradeScriptConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_object_storage.sh - - CephStorageDeliverUpgradeScriptConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_ceph_storage.sh - -{% for role in roles %} - UpgradeInit{{role.name}}Deployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: UpgradeInitConfig} - input_values: {get_param: input_values} - - {% if not role.name in ['Controller', 'BlockStorage'] %} - {{role.name}}DeliverUpgradeScriptDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig} - input_values: {get_param: input_values} - {% endif %} -{% endfor %} diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh deleted file mode 100644 index ae22a1e7..00000000 --- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh +++ /dev/null @@ -1,200 +0,0 @@ -#!/bin/bash - -# Special pieces of upgrade migration logic go into this -# file. E.g. Pacemaker cluster transitions for existing deployments, -# matching changes to overcloud_controller_pacemaker.pp (Puppet -# handles deployment, this file handles migrations). -# -# This file shouldn't execute any action on its own, all logic should -# be wrapped into bash functions. Upgrade scripts will source this -# file and call the functions defined in this file where appropriate. -# -# The migration functions should be idempotent. If the migration has -# been already applied, it should be possible to call the function -# again without damaging the deployment or failing the upgrade. - -# If the major version of mysql is going to change after the major -# upgrade, the database must be upgraded on disk to avoid failures -# due to internal incompatibilities between major mysql versions -# https://bugs.launchpad.net/tripleo/+bug/1587449 -# This function detects whether a database upgrade is required -# after a mysql package upgrade. It returns 0 when no major upgrade -# has to take place, 1 otherwise. -function is_mysql_upgrade_needed { - # The name of the package which provides mysql might differ - # after the upgrade. Consider the generic package name, which - # should capture the major version change (e.g. 5.5 -> 10.1) - local name="mariadb" - local output - local ret - set +e - output=$(yum -q check-update $name) - ret=$? - set -e - if [ $ret -ne 100 ]; then - # no updates so we exit - echo "0" - return - fi - - local currentepoch=$(rpm -q --qf "%{epoch}" $name) - local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2) - local currentrelease=$(rpm -q --qf "%{release}" $name) - local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name) - local newepoch=$(echo "$newoutput" | awk '{ print $1 }') - local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2) - local newrelease=$(echo "$newoutput" | awk '{ print $3 }') - - # With this we trigger the dump restore/path if we change either epoch or - # version in the package If only the release tag changes we do not do it - # FIXME: we could refine this by trying to parse the mariadb version - # into X.Y.Z and trigger the update only if X and/or Y change. - output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc") - if [ "$output" != "-1" ]; then - echo "0" - return - fi - echo "1" -} - -# This function returns the list of services to be migrated away from pacemaker -# and to systemd. The reason to have these services in a separate function is because -# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2} -# and in the function to migrate the cluster from full HA to HA NG -function services_to_migrate { - # The following PCMK resources the ones the we are going to delete - PCMK_RESOURCE_TODELETE=" - httpd-clone - memcached-clone - mongod-clone - neutron-dhcp-agent-clone - neutron-l3-agent-clone - neutron-metadata-agent-clone - neutron-netns-cleanup-clone - neutron-openvswitch-agent-clone - neutron-ovs-cleanup-clone - neutron-server-clone - openstack-aodh-evaluator-clone - openstack-aodh-listener-clone - openstack-aodh-notifier-clone - openstack-ceilometer-central-clone - openstack-ceilometer-collector-clone - openstack-ceilometer-notification-clone - openstack-cinder-api-clone - openstack-cinder-scheduler-clone - openstack-glance-api-clone - openstack-gnocchi-metricd-clone - openstack-gnocchi-statsd-clone - openstack-heat-api-cfn-clone - openstack-heat-api-clone - openstack-heat-api-cloudwatch-clone - openstack-heat-engine-clone - openstack-nova-api-clone - openstack-nova-conductor-clone - openstack-nova-consoleauth-clone - openstack-nova-novncproxy-clone - openstack-nova-scheduler-clone - openstack-sahara-api-clone - openstack-sahara-engine-clone - " - echo $PCMK_RESOURCE_TODELETE -} - -# This function will migrate a mitaka system where all the resources are managed -# via pacemaker to a newton setup where only a few services will be managed by pacemaker -# On a high-level it will operate as follows: -# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place -# during the conversion -# 2. Remove all the colocation constraints and then the ordering constraints, except the -# ones related to haproxy/VIPs which exist in Newton as well -# 3. Take the cluster out of maintenance-mode -# 4. Remove all the resources that won't be managed by pacemaker in newton. The -# outcome will be -# that they are stopped and removed from pacemakers control -# 5. Do a resource cleanup to make sure the cluster is in a clean state -function migrate_full_to_ng_ha { - if [[ -n $(pcmk_running) ]]; then - pcs property set maintenance-mode=true - - # First we go through all the colocation constraints (except the ones - # we want to keep, i.e. the haproxy/ip ones) and we remove those - COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\)) - for constraint in $COL_CONSTRAINTS; do - log_debug "Deleting colocation constraint $constraint from CIB" - pcs constraint remove "$constraint" - done - - # Now we kill all the ordering constraints (except the haproxy/ip ones) - ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\)) - for constraint in $ORD_CONSTRAINTS; do - log_debug "Deleting ordering constraint $constraint from CIB" - pcs constraint remove "$constraint" - done - # At this stage all the pacemaker resources are removed from the CIB. - # Once we remove the maintenance-mode those systemd resources will keep - # on running. They shall be systemd enabled via the puppet converge - # step later on - pcs property set maintenance-mode=false - - # At this stage there are no constraints whatsoever except the haproxy/ip ones - # which we want to keep. We now disable and then delete each resource - # that will move to systemd. - # We want the systemd resources be stopped before doing "yum update", - # that way "systemctl try-restart <service>" is no-op because the - # service was down already - PCS_STATUS_OUTPUT="$(pcs status)" - for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do - if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then - log_debug "Deleting $resource from the CIB" - if ! pcs resource disable "$resource" --wait=600; then - echo_error "ERROR: resource $resource failed to be disabled" - exit 1 - fi - pcs resource delete --force "$resource" - else - log_debug "Service $resource not found as a pacemaker resource, not trying to delete." - fi - done - - # We need to do a pcs resource cleanup here + crm_resource --wait to - # make sure the cluster is in a clean state before we stop everything, - # upgrade and restart everything - pcs resource cleanup - # We are making sure here that the cluster is stable before proceeding - if ! timeout -k 10 600 crm_resource --wait; then - echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting." - exit 1 - fi - fi -} - -function disable_standalone_ceilometer_api { - if [[ -n $(is_bootstrap_node) ]]; then - if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then - # Disable pacemaker resources for ceilometer-api - manage_pacemaker_service disable openstack-ceilometer-api - check_resource_pacemaker openstack-ceilometer-api stopped 600 - pcs resource delete openstack-ceilometer-api --wait=600 - fi - fi -} - - -# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton -# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}" -# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}" -# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances) -# Note that changing an attribute like this makes the rabbitmq resource restart -function rabbitmq_newton_ocata_upgrade { - if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then - # Number of controller is obtained by counting how many hostnames we - # have in controller_node_names hiera key - nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1)) - nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2))) - if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then - echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues" - exit 1 - fi - pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600 - fi -} diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml deleted file mode 100644 index 45933fb7..00000000 --- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml +++ /dev/null @@ -1,25 +0,0 @@ -heat_template_version: ocata - -description: > - Software-config for performing aodh data migration - -parameters: - servers: - type: json - input_values: - type: json - description: input values for the software deployments -resources: - - AodhMysqlMigrationScriptConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: aodh_data_migration.sh} - - AodhMysqlMigrationScriptDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: AodhMysqlMigrationScriptConfig} - input_values: {get_param: input_values} diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp deleted file mode 100644 index a8d43663..00000000 --- a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This puppet manifest is to be used only during a Mitaka->Newton upgrade -# It configures ceilometer to be run under httpd but it makes sure to not -# restart any services. This snippet needs to be called before init as a -# pre upgrade migration. - -Service <| - tag == 'ceilometer-service' -|> { - hasrestart => true, - restart => '/bin/true', - start => '/bin/true', - stop => '/bin/true', -} - -if $::hostname == downcase(hiera('bootstrap_nodeid')) { - $pacemaker_master = true - $sync_db = true -} else { - $pacemaker_master = false - $sync_db = false -} - -include ::tripleo::packages - - -if str2bool(hiera('mongodb::server::ipv6', false)) { - $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[') - $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') -} else { - $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017') -} -$mongodb_replset = hiera('mongodb::server::replset') -$mongo_node_string = join($mongo_node_ips_with_port, ',') -$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - -$rabbit_hosts = hiera('rabbitmq_node_ips', undef) -$rabbit_port = hiera('ceilometer::rabbit_port', 5672) -$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}") - -class { '::ceilometer' : - rabbit_hosts => $rabbit_endpoints, -} - -class {'::ceilometer::db': - database_connection => $database_connection, -} - -if $sync_db { - include ::ceilometer::db::sync -} - -include ::ceilometer::config - -class { '::ceilometer::api': - enabled => true, - service_name => 'httpd', - keystone_password => hiera('ceilometer::keystone::auth::password'), - identity_uri => hiera('ceilometer::keystone::authtoken::auth_url'), - auth_uri => hiera('ceilometer::keystone::authtoken::auth_uri'), - keystone_tenant => hiera('ceilometer::keystone::authtoken::project_name'), -} - -class { '::apache' : - service_enable => false, - service_manage => true, - service_restart => '/bin/true', - purge_configs => false, - purge_vhost_dir => false, -} - -# To ensure existing ports are not overridden -class { '::aodh::wsgi::apache': - servername => $::hostname, - ssl => false, -} -class { '::gnocchi::wsgi::apache': - servername => $::hostname, - ssl => false, -} - -class { '::keystone::wsgi::apache': - servername => $::hostname, - ssl => false, -} -class { '::ceilometer::wsgi::apache': - servername => $::hostname, - ssl => false, -} diff --git a/extraconfig/tasks/run_puppet.sh b/extraconfig/tasks/run_puppet.sh new file mode 100755 index 00000000..b7771e33 --- /dev/null +++ b/extraconfig/tasks/run_puppet.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +function run_puppet { + set -eux + local manifest="$1" + local role="$2" + local step="$3" + local rc=0 + + export FACTER_deploy_config_name="${role}Deployment_Step${step}" + if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then + set +e + puppet apply --detailed-exitcodes "${manifest}" + rc=$? + echo "puppet apply exited with exit code $rc" + else + echo "Step${step} doesn't exist for ${role}" + fi + set -e + + if [ $rc -eq 2 -o $rc -eq 0 ]; then + set +xu + return 0 + fi + set +xu + return $rc +} diff --git a/extraconfig/tasks/swift-ring-deploy.yaml b/extraconfig/tasks/swift-ring-deploy.yaml new file mode 100644 index 00000000..d17f78ae --- /dev/null +++ b/extraconfig/tasks/swift-ring-deploy.yaml @@ -0,0 +1,31 @@ +heat_template_version: ocata + +parameters: + servers: + type: json + SwiftRingGetTempurl: + default: '' + description: A temporary Swift URL to download rings from. + type: string + +resources: + SwiftRingDeployConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: swift_ring_get_tempurl + config: | + #!/bin/sh + pushd / + curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true + popd + + SwiftRingDeploy: + type: OS::Heat::SoftwareDeployments + properties: + name: SwiftRingDeploy + config: {get_resource: SwiftRingDeployConfig} + servers: {get_param: servers} + input_values: + swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl} diff --git a/extraconfig/tasks/swift-ring-update.yaml b/extraconfig/tasks/swift-ring-update.yaml new file mode 100644 index 00000000..440c6883 --- /dev/null +++ b/extraconfig/tasks/swift-ring-update.yaml @@ -0,0 +1,42 @@ +heat_template_version: ocata + +parameters: + servers: + type: json + SwiftRingPutTempurl: + default: '' + description: A temporary Swift URL to upload rings to. + type: string + +resources: + SwiftRingUpdateConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: swift_ring_put_tempurl + config: | + #!/bin/sh + TMP_DATA=$(mktemp -d) + function cleanup { + rm -Rf "$TMP_DATA" + } + trap cleanup EXIT + # sanity check in case rings are not consistent within cluster + swift-recon --md5 | grep -q "doesn't match" && exit 1 + pushd ${TMP_DATA} + tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/* + resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz` + popd + if [ "$resp" != "201" ]; then + exit 1 + fi + + SwiftRingUpdate: + type: OS::Heat::SoftwareDeployments + properties: + name: SwiftRingUpdate + config: {get_resource: SwiftRingUpdateConfig} + servers: {get_param: servers} + input_values: + swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl} diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh new file mode 100644 index 00000000..24211ab0 --- /dev/null +++ b/extraconfig/tasks/tripleo_upgrade_node.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# +# This delivers the operator driven upgrade script to be invoked as part of +# the tripleo major upgrade workflow. The utility 'upgrade-non-controller.sh' +# is used from the undercloud to invoke the /root/tripleo_upgrade_node.sh +# +set -eu + +UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh + +cat > $UPGRADE_SCRIPT << ENDOFCAT +### DO NOT MODIFY THIS FILE +### This file is automatically delivered to those nodes where the +### disable_upgrade_deployment flag is set in roles_data.yaml. + +set -eu +NOVA_COMPUTE="" +if hiera -c /etc/puppet/hiera.yaml service_names | grep nova_compute ; then + NOVA_COMPUTE="true" +fi +SWIFT_STORAGE="" +if hiera -c /etc/puppet/hiera.yaml service_names | grep swift_storage ; then + SWIFT_STORAGE="true" +fi + +DEBUG="true" +SCRIPT_NAME=$(basename $0) +$(declare -f log_debug) +$(declare -f manage_systemd_service) +$(declare -f systemctl_swift) + +# pin nova messaging +-1 for the nova-compute service +if [[ -n \$NOVA_COMPUTE ]]; then + crudini --set /etc/nova/nova.conf upgrade_levels compute auto +fi + +if [[ -n \$SWIFT_STORAGE ]]; then + systemctl_swift stop +fi + +yum -y update + +if [[ -n \$SWIFT_STORAGE ]]; then + systemctl_swift start +fi +# Due to bug#1640177 we need to restart compute agent +if [[ -n \$NOVA_COMPUTE ]]; then + log_debug "Restarting openstack ceilometer agent compute" + systemctl restart openstack-ceilometer-compute +fi + +# Apply puppet manifest to converge just right after the ${ROLE} upgrade +$(declare -f run_puppet) +for step in 1 2 3 4 5 6; do + log_debug "Running puppet step \$step for ${ROLE}" + if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then + log_debug "Puppet failure at step \${step}" + exit 1 + fi + log_debug "Completed puppet step \$step" +done + +log_debug "TripleO upgrade run completed." + +ENDOFCAT + +# ensure the permissions are OK +chmod 0755 $UPGRADE_SCRIPT + diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index edcc9e8e..3bf72f14 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -10,6 +10,11 @@ echo "Started yum_update.sh on server $deploy_server_id at `date`" echo -n "false" > $heat_outputs_path.update_managed_packages +if [ -f /.dockerenv ]; then + echo "Not running due to running inside a container" + exit 0 +fi + if [[ -z "$update_identifier" ]]; then echo "Not running due to unset update_identifier" exit 0 @@ -42,7 +47,10 @@ if [[ "$list_updates" == "" ]]; then exit 0 fi -pacemaker_status=$(systemctl is-active pacemaker || :) +pacemaker_status="" +if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then + pacemaker_status=$(systemctl is-active pacemaker) +fi # Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455 # and https://bugs.launchpad.net/tripleo/+bug/1634851 @@ -62,9 +70,6 @@ if [[ "$pacemaker_status" == "active" && \ fi fi -# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 -special_case_ovs_upgrade_if_needed - if [[ "$pacemaker_status" == "active" ]] ; then echo "Pacemaker running, stopping cluster node and doing full package update" node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*") @@ -92,17 +97,6 @@ return_code=$? echo "$result" echo "yum return code: $return_code" -# Writes any changes caused by alterations to os-net-config and bounces the -# interfaces *before* restarting the cluster. -os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes -RETVAL=$? -if [[ $RETVAL == 2 ]]; then - echo "os-net-config: interface configuration files updated successfully" -elif [[ $RETVAL != 0 ]]; then - echo "ERROR: os-net-config configuration failed" - exit $RETVAL -fi - if [[ "$pacemaker_status" == "active" ]] ; then echo "Starting cluster node" pcs cluster start |