diff options
author | Jenkins <jenkins@review.openstack.org> | 2016-09-16 20:11:47 +0000 |
---|---|---|
committer | Gerrit Code Review <review@openstack.org> | 2016-09-16 20:11:47 +0000 |
commit | 75b7f0f0c84028cb13a15ec96cc913b3c472c564 (patch) | |
tree | ff6de084082666ae36e13a9c9f35f0691b3be2f1 /extraconfig | |
parent | 589e6ad27a1a9e2a804d3f8e35f604dcdae8f773 (diff) | |
parent | 575e42b0287e37d3ef261c040fb3d331d3419801 (diff) |
Merge "Refactor upgrade checks."
Diffstat (limited to 'extraconfig')
-rwxr-xr-x | extraconfig/tasks/major_upgrade_check.sh | 104 | ||||
-rwxr-xr-x | extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh | 68 | ||||
-rw-r--r-- | extraconfig/tasks/major_upgrade_pacemaker.yaml | 1 |
3 files changed, 111 insertions, 62 deletions
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh new file mode 100755 index 00000000..dc7ec71a --- /dev/null +++ b/extraconfig/tasks/major_upgrade_check.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +set -eu + +check_cluster() +{ + if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then + echo_error "ERROR: upgrade cannot start with some cluster nodes being offline" + exit 1 + fi +} + +check_pcsd() +{ + if pcs status 2>&1 | grep -E 'Offline'; then + echo_error "ERROR: upgrade cannot start with some pcsd daemon offline" + exit 1 + fi +} + +check_disk_for_mysql_dump() +{ + # Where to backup current database if mysql need to be upgraded + MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp + MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup + # Spare disk ratio for extra safety + MYSQL_BACKUP_SIZE_RATIO=1.2 + + # Shall we upgrade mysql data directory during the stack upgrade? + if [ "$mariadb_do_major_upgrade" = "auto" ]; then + ret=$(is_mysql_upgrade_needed) + if [ $ret = "1" ]; then + DO_MYSQL_UPGRADE=1 + else + DO_MYSQL_UPGRADE=0 + fi + echo "mysql upgrade required: $DO_MYSQL_UPGRADE" + elif [ "$mariadb_do_major_upgrade" = "no" ]; then + DO_MYSQL_UPGRADE=0 + else + DO_MYSQL_UPGRADE=1 + fi + + if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then + if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + + if [ -d "$MYSQL_BACKUP_DIR" ]; then + echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously" + exit 1 + fi + mkdir "$MYSQL_BACKUP_DIR" + if [ $? -ne 0 ]; then + echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR" + exit 1 + fi + + # the /root/.my.cnf is needed because we set the mysql root + # password from liberty onwards + backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" + # While not ideal, this step allows us to calculate exactly how much space the dump + # will need. Our main goal here is avoiding any chance of corruption due to disk space + # exhaustion + backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c) + database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }') + free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1) + + # we need at least space for a new mysql database + dump of the existing one, + # times a small factor for additional safety room + # note: bash doesn't do floating point math or floats in if statements, + # so use python to apply the ratio and cast it back to integer + required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))") + if [ $required_space -ge $free_space ]; then + echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)" + exit 1 + fi + fi + fi +} + +check_python_rpm() +{ + # If for some reason rpm-python are missing we want to error out early enough + if ! rpm -q rpm-python &> /dev/null; then + echo_error "ERROR: upgrade cannot start without rpm-python installed" + exit 1 + fi +} + +check_clean_cluster() +{ + if crm_mon -1 | grep -A3 Failed; then + echo_error "ERROR: upgrade cannot start with failed resources on the cluster. Clean them up before starting: pcs resource cleanup." + exit 1 + fi +} + +check_galera_root_password() +{ + # BZ: 1357112 + if [ ! -e /root/.my.cnf ]; then + echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update." + exit 1 + fi +} diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh index 0b702630..e81ca086 100755 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh @@ -4,11 +4,12 @@ set -eu cluster_sync_timeout=1800 -if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then - echo_error "ERROR: upgrade cannot start with some cluster nodes being offline" - exit 1 -fi - +check_cluster +check_pcsd +check_clean_cluster +check_python_rpm +check_galera_root_password +check_disk_for_mysql_dump # We want to disable fencing during the cluster --stop as it might fence # nodes where a service fails to stop, which could be fatal during an upgrade @@ -17,12 +18,6 @@ fi STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }') pcs property set stonith-enabled=false -# If for some reason rpm-python are missing we want to error out early enough -if ! rpm -q rpm-python &> /dev/null; then - echo_error "ERROR: upgrade cannot start without rpm-python installed" - exit 1 -fi - # In case the mysql package is updated, the database on disk must be # upgraded as well. This typically needs to happen during major # version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...) @@ -35,59 +30,8 @@ fi # on mysql package versionning, but this can be overriden manually # to support specific upgrade scenario -# Where to backup current database if mysql need to be upgraded -MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp -MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup -# Spare disk ratio for extra safety -MYSQL_BACKUP_SIZE_RATIO=1.2 - -# Shall we upgrade mysql data directory during the stack upgrade? -if [ "$mariadb_do_major_upgrade" = "auto" ]; then - ret=$(is_mysql_upgrade_needed) - if [ $ret = "1" ]; then - DO_MYSQL_UPGRADE=1 - else - DO_MYSQL_UPGRADE=0 - fi - echo "mysql upgrade required: $DO_MYSQL_UPGRADE" -elif [ "$mariadb_do_major_upgrade" = "no" ]; then - DO_MYSQL_UPGRADE=0 -else - DO_MYSQL_UPGRADE=1 -fi - if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - if [ -d "$MYSQL_BACKUP_DIR" ]; then - echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously" - exit 1 - fi - mkdir "$MYSQL_BACKUP_DIR" - if [ $? -ne 0 ]; then - echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR" - exit 1 - fi - - # the /root/.my.cnf is needed because we set the mysql root - # password from liberty onwards - backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" - # While not ideal, this step allows us to calculate exactly how much space the dump - # will need. Our main goal here is avoiding any chance of corruption due to disk space - # exhaustion - backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c) - database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }') - free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1) - - # we need at least space for a new mysql database + dump of the existing one, - # times a small factor for additional safety room - # note: bash doesn't do floating point math or floats in if statements, - # so use python to apply the ratio and cast it back to integer - required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))") - if [ $required_space -ge $free_space ]; then - echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)" - exit 1 - fi - mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql" cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR" fi diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml index e030ca05..7244f949 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker.yaml +++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml @@ -75,6 +75,7 @@ resources: params: MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade} - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_check.sh - get_file: major_upgrade_pacemaker_migrations.sh - get_file: major_upgrade_controller_pacemaker_1.sh |