aboutsummaryrefslogtreecommitdiffstats
path: root/extraconfig/tasks
diff options
context:
space:
mode:
Diffstat (limited to 'extraconfig/tasks')
-rwxr-xr-xextraconfig/tasks/major_upgrade_ceph_mon.sh25
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh2
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh104
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh99
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh43
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml37
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml (renamed from extraconfig/tasks/major_upgrade_pacemaker_init.yaml)78
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh113
-rw-r--r--extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml12
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh281
-rwxr-xr-xextraconfig/tasks/pacemaker_resource_restart.sh6
11 files changed, 585 insertions, 215 deletions
diff --git a/extraconfig/tasks/major_upgrade_ceph_mon.sh b/extraconfig/tasks/major_upgrade_ceph_mon.sh
index 38befbbf..21a2b5bc 100755
--- a/extraconfig/tasks/major_upgrade_ceph_mon.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_mon.sh
@@ -17,6 +17,21 @@ if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then
exit 0
fi
+CEPH_STATUS=$(ceph health | awk '{print $1}')
+if [ ${CEPH_STATUS} = HEALTH_ERR ]; then
+ echo ERROR: Ceph cluster status is HEALTH_ERR, cannot be upgraded
+ exit 1
+fi
+
+# Useful when upgrading with OSDs num < replica size
+if [ ${ignore_ceph_upgrade_warnings:-false} != "true" ]; then
+ timeout 300 bash -c "while [ ${CEPH_STATUS} != HEALTH_OK ]; do
+ echo WARNING: Waiting for Ceph cluster status to go HEALTH_OK;
+ sleep 30;
+ CEPH_STATUS=$(ceph health | awk '{print $1}')
+ done"
+fi
+
MON_PID=$(pidof ceph-mon)
MON_ID=$(hostname -s)
@@ -29,7 +44,7 @@ timeout 60 bash -c "while kill -0 ${MON_PID} 2> /dev/null; do
done"
# Update to Jewel
-yum -y -q update ceph-mon
+yum -y -q update ceph-mon ceph
# Restart/Exit if not on Jewel, only in that case we need the changes
UPDATED_VERSION=$(ceph --version | awk '{print $3}')
@@ -37,11 +52,9 @@ if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
echo WARNING: Ceph was not upgraded, restarting daemons
service ceph start mon.${MON_ID}
elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
- echo INFO: Ceph was upgraded to Jewel
-
# RPM could own some of these but we can't take risks on the pre-existing files
for d in /var/lib/ceph/mon /var/log/ceph /var/run/ceph /etc/ceph; do
- chown -R ceph:ceph $d
+ chown -R ceph:ceph $d || echo WARNING: chown of $d failed
done
# Replay udev events with newer rules
@@ -54,9 +67,11 @@ elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
# Wait for daemon to be back in the quorum
timeout 300 bash -c "until (ceph quorum_status | jq .quorum_names | grep -sq ${MON_ID}); do
- echo Waiting for mon.${MON_ID} to re-join quorum;
+ echo WARNING: Waiting for mon.${MON_ID} to re-join quorum;
sleep 10;
done"
+
+ echo INFO: Ceph was upgraded to Jewel
else
echo ERROR: Ceph was upgraded to an unknown release, daemon is stopped, need manual intervention
exit 1
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
index 03a1c1c2..dc80a724 100644
--- a/extraconfig/tasks/major_upgrade_ceph_storage.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -63,7 +63,7 @@ if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
# RPM could own some of these but we can't take risks on the pre-existing files
for d in /var/lib/ceph/osd /var/log/ceph /var/run/ceph /etc/ceph; do
- chown -R ceph:ceph $d
+ chown -R ceph:ceph $d || echo WARNING: chown of $d failed
done
# Replay udev events with newer rules
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
new file mode 100755
index 00000000..dc7ec71a
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_check.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+set -eu
+
+check_cluster()
+{
+ if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
+ echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
+ exit 1
+ fi
+}
+
+check_pcsd()
+{
+ if pcs status 2>&1 | grep -E 'Offline'; then
+ echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
+ exit 1
+ fi
+}
+
+check_disk_for_mysql_dump()
+{
+ # Where to backup current database if mysql need to be upgraded
+ MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
+ MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
+ # Spare disk ratio for extra safety
+ MYSQL_BACKUP_SIZE_RATIO=1.2
+
+ # Shall we upgrade mysql data directory during the stack upgrade?
+ if [ "$mariadb_do_major_upgrade" = "auto" ]; then
+ ret=$(is_mysql_upgrade_needed)
+ if [ $ret = "1" ]; then
+ DO_MYSQL_UPGRADE=1
+ else
+ DO_MYSQL_UPGRADE=0
+ fi
+ echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
+ elif [ "$mariadb_do_major_upgrade" = "no" ]; then
+ DO_MYSQL_UPGRADE=0
+ else
+ DO_MYSQL_UPGRADE=1
+ fi
+
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+
+ if [ -d "$MYSQL_BACKUP_DIR" ]; then
+ echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
+ exit 1
+ fi
+ mkdir "$MYSQL_BACKUP_DIR"
+ if [ $? -ne 0 ]; then
+ echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
+ exit 1
+ fi
+
+ # the /root/.my.cnf is needed because we set the mysql root
+ # password from liberty onwards
+ backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
+ # While not ideal, this step allows us to calculate exactly how much space the dump
+ # will need. Our main goal here is avoiding any chance of corruption due to disk space
+ # exhaustion
+ backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
+ database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
+ free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
+
+ # we need at least space for a new mysql database + dump of the existing one,
+ # times a small factor for additional safety room
+ # note: bash doesn't do floating point math or floats in if statements,
+ # so use python to apply the ratio and cast it back to integer
+ required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
+ if [ $required_space -ge $free_space ]; then
+ echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
+ exit 1
+ fi
+ fi
+ fi
+}
+
+check_python_rpm()
+{
+ # If for some reason rpm-python are missing we want to error out early enough
+ if ! rpm -q rpm-python &> /dev/null; then
+ echo_error "ERROR: upgrade cannot start without rpm-python installed"
+ exit 1
+ fi
+}
+
+check_clean_cluster()
+{
+ if crm_mon -1 | grep -A3 Failed; then
+ echo_error "ERROR: upgrade cannot start with failed resources on the cluster. Clean them up before starting: pcs resource cleanup."
+ exit 1
+ fi
+}
+
+check_galera_root_password()
+{
+ # BZ: 1357112
+ if [ ! -e /root/.my.cnf ]; then
+ echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
+ exit 1
+ fi
+}
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
index 0b702630..2490ce27 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
@@ -4,11 +4,12 @@ set -eu
cluster_sync_timeout=1800
-if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
-fi
-
+check_cluster
+check_pcsd
+check_clean_cluster
+check_python_rpm
+check_galera_root_password
+check_disk_for_mysql_dump
# We want to disable fencing during the cluster --stop as it might fence
# nodes where a service fails to stop, which could be fatal during an upgrade
@@ -17,12 +18,22 @@ fi
STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
pcs property set stonith-enabled=false
-# If for some reason rpm-python are missing we want to error out early enough
-if ! rpm -q rpm-python &> /dev/null; then
- echo_error "ERROR: upgrade cannot start without rpm-python installed"
- exit 1
+# Migrate to HA NG
+if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ migrate_full_to_ng_ha
fi
+# After migrating the cluster to HA-NG the services not under pacemaker's control
+# are still up and running. We need to stop them explicitely otherwise during the yum
+# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
+# is going to take a long time because rabbit is down. By having the service stopped
+# systemctl try-restart is a noop
+
+for $service in $(services_to_migrate); do
+ manage_systemd_service stop "${service%%-clone}"
+ check_resource_systemd "${service%%-clone}" stopped 600
+done
+
# In case the mysql package is updated, the database on disk must be
# upgraded as well. This typically needs to happen during major
# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
@@ -35,75 +46,16 @@ fi
# on mysql package versionning, but this can be overriden manually
# to support specific upgrade scenario
-# Where to backup current database if mysql need to be upgraded
-MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
-MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
-# Spare disk ratio for extra safety
-MYSQL_BACKUP_SIZE_RATIO=1.2
-
-# Shall we upgrade mysql data directory during the stack upgrade?
-if [ "$mariadb_do_major_upgrade" = "auto" ]; then
- ret=$(is_mysql_upgrade_needed)
- if [ $ret = "1" ]; then
- DO_MYSQL_UPGRADE=1
- else
- DO_MYSQL_UPGRADE=0
- fi
- echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
-elif [ "$mariadb_do_major_upgrade" = "no" ]; then
- DO_MYSQL_UPGRADE=0
-else
- DO_MYSQL_UPGRADE=1
-fi
-
if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d "$MYSQL_BACKUP_DIR" ]; then
- echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
- exit 1
- fi
- mkdir "$MYSQL_BACKUP_DIR"
- if [ $? -ne 0 ]; then
- echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
- exit 1
- fi
-
- # the /root/.my.cnf is needed because we set the mysql root
- # password from liberty onwards
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- # While not ideal, this step allows us to calculate exactly how much space the dump
- # will need. Our main goal here is avoiding any chance of corruption due to disk space
- # exhaustion
- backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
- database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
- free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
- # we need at least space for a new mysql database + dump of the existing one,
- # times a small factor for additional safety room
- # note: bash doesn't do floating point math or floats in if statements,
- # so use python to apply the ratio and cast it back to integer
- required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
- if [ $required_space -ge $free_space ]; then
- echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
- exit 1
- fi
-
mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
fi
- pcs resource disable httpd
- check_resource httpd stopped 1800
- pcs resource disable openstack-core
- check_resource openstack-core stopped 1800
pcs resource disable redis
check_resource redis stopped 600
- pcs resource disable mongod
- check_resource mongod stopped 600
pcs resource disable rabbitmq
check_resource rabbitmq stopped 600
- pcs resource disable memcached
- check_resource memcached stopped 600
pcs resource disable galera
check_resource galera stopped 600
# Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
@@ -115,6 +67,7 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
pcs cluster stop --all
fi
+
# Swift isn't controled by pacemaker
systemctl_swift stop
@@ -198,11 +151,5 @@ fi
# Pin messages sent to compute nodes to kilo, these will be upgraded later
crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
+
+crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
index bc708cce..6bb2fa73 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -3,10 +3,10 @@
set -eu
cluster_form_timeout=600
-cluster_settle_timeout=600
+cluster_settle_timeout=1800
galera_sync_timeout=600
-if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+if [[ -n $(is_bootstrap_node) ]]; then
pcs cluster start --all
tstart=$(date +%s)
@@ -26,14 +26,14 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
pcs resource enable $vip
- check_resource $vip started 60
+ check_resource_pacemaker $vip started 60
done
+fi
- pcs resource enable galera
- check_resource galera started 600
- pcs resource enable mongod
- check_resource mongod started 600
+start_or_enable_service galera
+check_resource galera started 600
+if [[ -n $(is_bootstrap_node) ]]; then
tstart=$(date +%s)
while ! clustercheck; do
sleep 5
@@ -53,19 +53,22 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
keystone-manage db_sync
neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
nova-manage db sync
- nova-manage api_db sync
-
- pcs resource enable memcached
- check_resource memcached started 600
- pcs resource enable rabbitmq
- check_resource rabbitmq started 600
- pcs resource enable redis
- check_resource redis started 600
- pcs resource enable openstack-core
- check_resource openstack-core started 1800
- pcs resource enable httpd
- check_resource httpd started 1800
+ #TODO(marios):someone from sahara needs to check this:
+ # sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
fi
-# Swift isn't controled by heat
+start_or_enable_service rabbitmq
+check_resource rabbitmq started 600
+start_or_enable_service redis
+check_resource redis started 600
+
+# Swift isn't controled by pacemaker
systemctl_swift start
+
+# We need to start the systemd services we explicitely stopped at step _1.sh
+# FIXME: Should we let puppet during the convergence step do the service enabling or
+# should we add it here?
+for $service in $(services_to_migrate); do
+ manage_systemd_service stop "${service%%-clone}"
+ check_resource_systemd "${service%%-clone}" started 600
+done
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index c2e14880..7244f949 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -1,16 +1,8 @@
-heat_template_version: 2014-10-16
+heat_template_version: 2016-10-14
description: 'Upgrade for Pacemaker deployments'
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
input_values:
type: json
@@ -26,6 +18,10 @@ parameters:
constraints:
- allowed_values: ['auto', 'yes', 'no']
default: 'auto'
+ IgnoreCephUpgradeWarnings:
+ type: boolean
+ default: false
+ description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean
resources:
# TODO(jistr): for Mitaka->Newton upgrades and further we can use
@@ -36,14 +32,24 @@ resources:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: major_upgrade_ceph_mon.sh}
+ config:
+ list_join:
+ - ''
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ ignore_ceph_upgrade_warnings='IGNORE_CEPH_UPGRADE_WARNINGS'
+ params:
+ IGNORE_CEPH_UPGRADE_WARNINGS: {get_param: IgnoreCephUpgradeWarnings}
+ - get_file: major_upgrade_ceph_mon.sh
CephMonUpgradeDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: CephMonUpgradeConfig}
input_values: {get_param: input_values}
+ update_policy:
batch_create:
max_batch_size: 1
rolling_update:
@@ -69,6 +75,7 @@ resources:
params:
MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_check.sh
- get_file: major_upgrade_pacemaker_migrations.sh
- get_file: major_upgrade_controller_pacemaker_1.sh
@@ -76,7 +83,7 @@ resources:
type: OS::Heat::SoftwareDeploymentGroup
depends_on: CephMonUpgradeDeployment
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
input_values: {get_param: input_values}
@@ -90,7 +97,7 @@ resources:
BlockStorageUpgradeDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: blockstorage_servers}
+ servers: {get_param: servers, BlockStorage}
config: {get_resource: BlockStorageUpgradeConfig}
input_values: {get_param: input_values}
@@ -109,7 +116,7 @@ resources:
type: OS::Heat::SoftwareDeploymentGroup
depends_on: BlockStorageUpgradeDeployment
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
index 623549a0..f6aa3066 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
@@ -3,15 +3,7 @@ description: 'Upgrade for Pacemaker deployments'
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
input_values:
type: json
@@ -43,45 +35,12 @@ resources:
- "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- get_param: UpgradeInitCommand
- UpgradeInitControllerDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: controller_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitComputeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: compute_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitBlockStorageDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: blockstorage_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitObjectStorageDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: objectstorage_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitCephStorageDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: cephstorage_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
# TODO(jistr): for Mitaka->Newton upgrades and further we can use
# map_merge with input_values instead of feeding params into scripts
# via str_replace on bash snippets
+ # FIXME(shardy) we have hard-coded per-role *ScriptConfig's here
+ # Would be better to have a common config for all roles
ComputeDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -97,35 +56,32 @@ resources:
UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- get_file: major_upgrade_compute.sh
- ComputeDeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: compute_servers}
- config: {get_resource: ComputeDeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
-
ObjectStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: major_upgrade_object_storage.sh}
- ObjectStorageDeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: objectstorage_servers}
- config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
-
CephStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: major_upgrade_ceph_storage.sh}
- CephStorageDeliverUpgradeScriptDeployment:
+{% for role in roles %}
+ UpgradeInit{{role.name}}Deployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ {% if not role.name in ['Controller', 'BlockStorage'] %}
+ {{role.name}}DeliverUpgradeScriptDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: cephstorage_servers}
- config: {get_resource: CephStorageDeliverUpgradeScriptConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig}
input_values: {get_param: input_values}
+ {% endif %}
+{% endfor %}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
index 7ed7012d..b8c5321b 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
@@ -56,3 +56,116 @@ function is_mysql_upgrade_needed {
fi
echo "1"
}
+
+# This function returns the list of services to be migrated away from pacemaker
+# and to systemd. The reason to have these services in a separate function is because
+# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
+# and in the function to migrate the cluster from full HA to HA NG
+function services_to_migrate {
+ # The following PCMK resources the ones the we are going to delete
+ PCMK_RESOURCE_TODELETE="
+ httpd-clone
+ memcached-clone
+ mongod-clone
+ neutron-dhcp-agent-clone
+ neutron-l3-agent-clone
+ neutron-metadata-agent-clone
+ neutron-netns-cleanup-clone
+ neutron-openvswitch-agent-clone
+ neutron-ovs-cleanup-clone
+ neutron-server-clone
+ openstack-aodh-evaluator-clone
+ openstack-aodh-listener-clone
+ openstack-aodh-notifier-clone
+ openstack-ceilometer-api-clone
+ openstack-ceilometer-central-clone
+ openstack-ceilometer-collector-clone
+ openstack-ceilometer-notification-clone
+ openstack-cinder-api-clone
+ openstack-cinder-scheduler-clone
+ openstack-glance-api-clone
+ openstack-glance-registry-clone
+ openstack-gnocchi-metricd-clone
+ openstack-gnocchi-statsd-clone
+ openstack-heat-api-cfn-clone
+ openstack-heat-api-clone
+ openstack-heat-api-cloudwatch-clone
+ openstack-heat-engine-clone
+ openstack-nova-api-clone
+ openstack-nova-conductor-clone
+ openstack-nova-consoleauth-clone
+ openstack-nova-novncproxy-clone
+ openstack-nova-scheduler-clone
+ openstack-sahara-api-clone
+ openstack-sahara-engine-clone
+ "
+ echo $PCMK_RESOURCE_TODELETE
+}
+
+# This function will migrate a mitaka system where all the resources are managed
+# via pacemaker to a newton setup where only a few services will be managed by pacemaker
+# On a high-level it will operate as follows:
+# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
+# during the conversion
+# 2. Remove all the colocation constraints and then the ordering constraints, except the
+# ones related to haproxy/VIPs which exist in Newton as well
+# 3. Remove all the resources that won't be managed by pacemaker in newton. Note that they
+# will show up as ORPHANED but they will keep running normally via systemd. They will be
+# enabled to start at boot by puppet during the converge step
+# 4. Take the cluster out of maintenance-mode and do a resource cleanup
+function migrate_full_to_ng_ha {
+ if [[ -n $(pcmk_running) ]]; then
+ pcs property set maintenance-mode=true
+ # We are making sure here that the property has propagated everywhere
+ if ! timeout -k 10 300 crm_resource --wait; then
+ echo_error "ERROR: cluster remained unstable after setting maintenance-mode for more than 300 seconds, exiting."
+ exit 1
+ fi
+ # First we go through all the colocation constraints (except the ones we want to keep, i.e. the haproxy/ip ones)
+ # and we remove those
+ COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
+ for constraint in $COL_CONSTRAINTS; do
+ log_debug "Deleting colocation constraint $constraint from CIB"
+ pcs constraint remove "$constraint"
+ done
+
+ # Now we kill all the ordering constraints (except the haproxy/ip ones)
+ ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
+ for constraint in $ORD_CONSTRAINTS; do
+ log_debug "Deleting ordering constraint $constraint from CIB"
+ pcs constraint remove "$constraint"
+ done
+
+ # At this stage there are no constraints whatsoever except the haproxy/ip ones
+ # which we want to keep. We now delete each resource that will move to systemd
+ # Note that the corresponding systemd resource will stay running, which means that
+ # later when we do the "yum update", things will be a bit slower because each
+ # "systemctl try-restart <service>" is not a no-op any longer because the service is up
+ # and running and it will be restarted with rabbitmq being down.
+ PCS_STATUS_OUTPUT="$(pcs status)"
+ for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
+ if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
+ log_debug "Deleting $resource from the CIB"
+
+ # We need to add --force because the cluster is in maintenance mode and the resource
+ # is unmanaged. The if serves to make this idempotent
+ pcs resource delete --force "$resource"
+ else
+ log_debug "Service $service not found as a pacemaker resource, not trying to delete."
+ fi
+ done
+
+ # At this stage all the pacemaker resources are removed from the CIB. Once we remove the
+ # maintenance-mode those systemd resources will keep on running. They shall be systemd enabled
+ # via the puppet converge step later on
+ pcs property set maintenance-mode=false
+ # We need to do a pcs resource cleanup here + crm_resource --wait to make sure the
+ # cluster is in a clean state before we stop everything, upgrade and restart everything
+ pcs resource cleanup
+ # We are making sure here that the cluster is stable before proceeding
+ if ! timeout -k 10 600 crm_resource --wait; then
+ echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
+ exit 1
+ fi
+ fi
+}
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
index 9414ac19..91406fba 100644
--- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
+++ b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
@@ -4,15 +4,7 @@ description: >
Software-config for performing aodh data migration
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
input_values:
type: json
@@ -28,6 +20,6 @@ resources:
AodhMysqlMigrationScriptDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: AodhMysqlMigrationScriptConfig}
input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index 7d794c97..4f17b69a 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -2,51 +2,286 @@
set -eu
-function check_resource {
+DEBUG="true" # set false if the verbosity is a problem
+SCRIPT_NAME=$(basename $0)
+function log_debug {
+ if [[ $DEBUG = "true" ]]; then
+ echo "`date` $SCRIPT_NAME tripleo-upgrade $(facter hostname) $1"
+ fi
+}
+
+function is_bootstrap_node {
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ log_debug "Node is bootstrap"
+ echo "true"
+ fi
+}
+function check_resource_pacemaker {
if [ "$#" -ne 3 ]; then
- echo_error "ERROR: check_resource function expects 3 parameters, $# given"
- exit 1
+ echo_error "ERROR: check_resource function expects 3 parameters, $# given"
+ exit 1
fi
- service=$1
- state=$2
- timeout=$3
+ local service=$1
+ local state=$2
+ local timeout=$3
+
+ if [[ -z $(is_bootstrap_node) ]] ; then
+ log_debug "Node isn't bootstrap, skipping check for $service to be $state here "
+ return
+ else
+ log_debug "Node is bootstrap checking $service to be $state here"
+ fi
if [ "$state" = "stopped" ]; then
- match_for_incomplete='Started'
+ match_for_incomplete='Started'
else # started
- match_for_incomplete='Stopped'
+ match_for_incomplete='Stopped'
fi
nodes_local=$(pcs status | grep ^Online | sed 's/.*\[ \(.*\) \]/\1/g' | sed 's/ /\|/g')
if timeout -k 10 $timeout crm_resource --wait; then
- node_states=$(pcs status --full | grep "$service" | grep -v Clone | { egrep "$nodes_local" || true; } )
- if echo "$node_states" | grep -q "$match_for_incomplete"; then
- echo_error "ERROR: cluster finished transition but $service was not in $state state, exiting."
- exit 1
- else
- echo "$service has $state"
- fi
- else
- echo_error "ERROR: cluster remained unstable for more than $timeout seconds, exiting."
+ node_states=$(pcs status --full | grep "$service" | grep -v Clone | { egrep "$nodes_local" || true; } )
+ if echo "$node_states" | grep -q "$match_for_incomplete"; then
+ echo_error "ERROR: cluster finished transition but $service was not in $state state, exiting."
exit 1
+ else
+ echo "$service has $state"
+ fi
+ else
+ echo_error "ERROR: cluster remained unstable for more than $timeout seconds, exiting."
+ exit 1
+ fi
+
+}
+
+function pcmk_running {
+ if [[ $(systemctl is-active pacemaker) = "active" ]] ; then
+ echo "true"
+ fi
+}
+
+function is_systemd_unknown {
+ local service=$1
+ if [[ $(systemctl is-active "$service") = "unknown" ]]; then
+ log_debug "$service found to be unkown to systemd"
+ echo "true"
+ fi
+}
+
+function grep_is_cluster_controlled {
+ local service=$1
+ if [[ -n $(systemctl status $service -l | grep Drop-In -A 5 | grep pacemaker) ||
+ -n $(systemctl status $service -l | grep "Cluster Controlled $service") ]] ; then
+ log_debug "$service is pcmk managed from systemctl grep"
+ echo "true"
+ fi
+}
+
+
+function is_systemd_managed {
+ local service=$1
+ #if we have pcmk check to see if it is managed there
+ if [[ -n $(pcmk_running) ]]; then
+ if [[ -z $(pcs status --full | grep $service) && -z $(is_systemd_unknown $service) ]] ; then
+ log_debug "$service found to be systemd managed from pcs status"
+ echo "true"
+ fi
+ else
+ # if it is "unknown" to systemd, then it is pacemaker managed
+ if [[ -n $(is_systemd_unknown $service) ]] ; then
+ return
+ elif [[ -z $(grep_is_cluster_controlled $service) ]] ; then
+ echo "true"
+ fi
+ fi
+}
+
+function is_pacemaker_managed {
+ local service=$1
+ #if we have pcmk check to see if it is managed there
+ if [[ -n $(pcmk_running) ]]; then
+ if [[ -n $(pcs status --full | grep $service) ]]; then
+ log_debug "$service found to be pcmk managed from pcs status"
+ echo "true"
+ fi
+ else
+ # if it is unknown to systemd, then it is pcmk managed
+ if [[ -n $(is_systemd_unknown $service) ]]; then
+ echo "true"
+ elif [[ -n $(grep_is_cluster_controlled $service) ]] ; then
+ echo "true"
+ fi
+ fi
+}
+
+function is_managed {
+ local service=$1
+ if [[ -n $(is_pacemaker_managed $service) || -n $(is_systemd_managed $service) ]]; then
+ echo "true"
+ fi
+}
+
+function check_resource_systemd {
+
+ if [ "$#" -ne 3 ]; then
+ echo_error "ERROR: check_resource function expects 3 parameters, $# given"
+ exit 1
fi
+ local service=$1
+ local state=$2
+ local timeout=$3
+ local check_interval=3
+
+ if [ "$state" = "stopped" ]; then
+ match_for_incomplete='active'
+ else # started
+ match_for_incomplete='inactive'
+ fi
+
+ log_debug "Going to check_resource_systemd for $service to be $state"
+
+ #sanity check is systemd managed:
+ if [[ -z $(is_systemd_managed $service) ]]; then
+ echo "ERROR - $service not found to be systemd managed."
+ exit 1
+ fi
+
+ tstart=$(date +%s)
+ tend=$(( $tstart + $timeout ))
+ while (( $(date +%s) < $tend )); do
+ if [[ "$(systemctl is-active $service)" = $match_for_incomplete ]]; then
+ echo "$service not yet $state, sleeping $check_interval seconds."
+ sleep $check_interval
+ else
+ echo "$service is $state"
+ return
+ fi
+ done
+
+ echo "Timed out waiting for $service to go to $state after $timeout seconds"
+ exit 1
+}
+
+
+function check_resource {
+ local service=$1
+ local pcmk_managed=$(is_pacemaker_managed $service)
+ local systemd_managed=$(is_systemd_managed $service)
+
+ if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+ log_debug "ERROR $service managed by both systemd and pcmk - SKIPPING"
+ return
+ fi
+
+ if [[ -n $pcmk_managed ]]; then
+ check_resource_pacemaker $@
+ return
+ elif [[ -n $systemd_managed ]]; then
+ check_resource_systemd $@
+ return
+ fi
+ log_debug "ERROR cannot check_resource for $service, not managed here?"
+}
+
+function manage_systemd_service {
+ local action=$1
+ local service=$2
+ log_debug "Going to systemctl $action $service"
+ systemctl $action $service
+}
+
+function manage_pacemaker_service {
+ local action=$1
+ local service=$2
+ # not if pacemaker isn't running!
+ if [[ -z $(pcmk_running) ]]; then
+ echo "$(facter hostname) pacemaker not active, skipping $action $service here"
+ elif [[ -n $(is_bootstrap_node) ]]; then
+ log_debug "Going to pcs resource $action $service"
+ pcs resource $action $service
+ fi
+}
+
+function stop_or_disable_service {
+ local service=$1
+ local pcmk_managed=$(is_pacemaker_managed $service)
+ local systemd_managed=$(is_systemd_managed $service)
+
+ if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+ log_debug "Skipping stop_or_disable $service due to management conflict"
+ return
+ fi
+
+ log_debug "Stopping or disabling $service"
+ if [[ -n $pcmk_managed ]]; then
+ manage_pacemaker_service disable $service
+ return
+ elif [[ -n $systemd_managed ]]; then
+ manage_systemd_service stop $service
+ return
+ fi
+ log_debug "ERROR: $service not managed here?"
+}
+
+function start_or_enable_service {
+ local service=$1
+ local pcmk_managed=$(is_pacemaker_managed $service)
+ local systemd_managed=$(is_systemd_managed $service)
+
+ if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+ log_debug "Skipping start_or_enable $service due to management conflict"
+ return
+ fi
+
+ log_debug "Starting or enabling $service"
+ if [[ -n $pcmk_managed ]]; then
+ manage_pacemaker_service enable $service
+ return
+ elif [[ -n $systemd_managed ]]; then
+ manage_systemd_service start $service
+ return
+ fi
+ log_debug "ERROR $service not managed here?"
+}
+
+function restart_service {
+ local service=$1
+ local pcmk_managed=$(is_pacemaker_managed $service)
+ local systemd_managed=$(is_systemd_managed $service)
+
+ if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+ log_debug "ERROR $service managed by both systemd and pcmk - SKIPPING"
+ return
+ fi
+
+ log_debug "Restarting $service"
+ if [[ -n $pcmk_managed ]]; then
+ manage_pacemaker_service restart $service
+ return
+ elif [[ -n $systemd_managed ]]; then
+ manage_systemd_service restart $service
+ return
+ fi
+ log_debug "ERROR $service not managed here?"
}
function echo_error {
echo "$@" | tee /dev/fd2
}
+# swift is a special case because it is/was never handled by pacemaker
+# when stand-alone swift is used, only swift-proxy is running on controllers
function systemctl_swift {
services=( openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy )
- action=$1
+ local action=$1
case $action in
stop)
- services=$(systemctl | grep swift | grep running | awk '{print $1}')
+ services=$(systemctl | grep openstack-swift- | grep running | awk '{print $1}')
;;
start)
enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml 'enable_swift_storage')
@@ -54,9 +289,11 @@ function systemctl_swift {
services=( openstack-swift-proxy )
fi
;;
- *) services=() ;; # for safetly, should never happen
+ *) echo "Unknown action $action passed to systemctl_swift"
+ exit 1
+ ;; # shouldn't ever happen...
esac
- for S in ${services[@]}; do
- systemctl $action $S
+ for service in ${services[@]}; do
+ manage_systemd_service $action $service
done
}
diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh
index fd1fd0dc..3da7efec 100755
--- a/extraconfig/tasks/pacemaker_resource_restart.sh
+++ b/extraconfig/tasks/pacemaker_resource_restart.sh
@@ -2,12 +2,9 @@
set -eux
-pacemaker_status=$(systemctl is-active pacemaker)
-
# Run if pacemaker is running, we're the bootstrap node,
# and we're updating the deployment (not creating).
-if [ "$pacemaker_status" = "active" -a \
- "$(hiera bootstrap_nodeid)" = "$(facter hostname)" ]; then
+if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
TIMEOUT=600
SERVICES_TO_RESTART="$(ls /var/lib/tripleo/pacemaker-restarts)"
@@ -25,5 +22,4 @@ if [ "$pacemaker_status" = "active" -a \
pcs resource restart --wait=$TIMEOUT $service
rm -f /var/lib/tripleo/pacemaker-restarts/$service
done
-
fi