diff options
Diffstat (limited to 'extraconfig/tasks/major_upgrade_pacemaker_migrations.sh')
-rw-r--r-- | extraconfig/tasks/major_upgrade_pacemaker_migrations.sh | 33 |
1 files changed, 24 insertions, 9 deletions
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh index d974bb79..df87c93f 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh +++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh @@ -109,7 +109,7 @@ function services_to_migrate { # during the conversion # 2. Remove all the colocation constraints and then the ordering constraints, except the # ones related to haproxy/VIPs which exist in Newton as well -# 3. Take the cluster out of maintenance-mode and do a resource cleanup +# 3. Take the cluster out of maintenance-mode # 4. Remove all the resources that won't be managed by pacemaker in newton. The # outcome will be # that they are stopped and removed from pacemakers control @@ -117,13 +117,9 @@ function services_to_migrate { function migrate_full_to_ng_ha { if [[ -n $(pcmk_running) ]]; then pcs property set maintenance-mode=true - # We are making sure here that the property has propagated everywhere - if ! timeout -k 10 300 crm_resource --wait; then - echo_error "ERROR: cluster remained unstable after setting maintenance-mode for more than 300 seconds, exiting." - exit 1 - fi - # First we go through all the colocation constraints (except the ones we want to keep, i.e. the haproxy/ip ones) - # and we remove those + + # First we go through all the colocation constraints (except the ones + # we want to keep, i.e. the haproxy/ip ones) and we remove those COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\)) for constraint in $COL_CONSTRAINTS; do log_debug "Deleting colocation constraint $constraint from CIB" @@ -158,7 +154,7 @@ function migrate_full_to_ng_ha { fi pcs resource delete --force "$resource" else - log_debug "Service $service not found as a pacemaker resource, not trying to delete." + log_debug "Service $resource not found as a pacemaker resource, not trying to delete." fi done @@ -173,3 +169,22 @@ function migrate_full_to_ng_ha { fi fi } + +# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton +# In mitaka we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}" +# In newton we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}" +# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances) +# Note that changing an attribute like this makes the rabbitmq resource restart +function rabbitmq_mitaka_newton_upgrade { + if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then + # Number of controller is obtained by counting how many hostnames we + # have in controller_node_names hiera key + nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1)) + nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2))) + if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then + echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues" + exit 1 + fi + pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600 + fi +} |