summaryrefslogtreecommitdiffstats
path: root/extraconfig
diff options
context:
space:
mode:
Diffstat (limited to 'extraconfig')
-rw-r--r--extraconfig/nova_metadata/krb-service-principals.yaml6
-rw-r--r--extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml4
-rw-r--r--extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml53
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration131
-rw-r--r--extraconfig/tasks/aodh_data_migration.sh19
-rw-r--r--extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml62
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh109
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh36
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh176
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_3.sh68
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_4.sh17
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_5.sh8
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_6.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml179
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh200
-rw-r--r--extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml25
-rw-r--r--extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp103
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh9
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker.yaml4
-rwxr-xr-xextraconfig/tasks/run_puppet.sh5
-rw-r--r--extraconfig/tasks/ssh/host_public_key.yaml42
-rw-r--r--extraconfig/tasks/ssh/known_hosts_config.yaml36
-rw-r--r--extraconfig/tasks/swift-ring-deploy.yaml31
-rw-r--r--extraconfig/tasks/swift-ring-update.yaml42
-rw-r--r--extraconfig/tasks/tripleo_upgrade_node.sh14
-rwxr-xr-xextraconfig/tasks/yum_update.sh54
26 files changed, 300 insertions, 1148 deletions
diff --git a/extraconfig/nova_metadata/krb-service-principals.yaml b/extraconfig/nova_metadata/krb-service-principals.yaml
index c66e6460..56d3cbc0 100644
--- a/extraconfig/nova_metadata/krb-service-principals.yaml
+++ b/extraconfig/nova_metadata/krb-service-principals.yaml
@@ -46,7 +46,7 @@ resources:
# Filter null values and values that contain don't contain
# 'metadata_settings', get the values from that key and get the
# unique ones.
- expression: list($.data.where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct())
+ expression: list(coalesce($.data, []).where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct())
data: {get_param: RoleData}
# Generates entries for nova metadata with the following format:
@@ -57,7 +57,7 @@ resources:
properties:
value:
yaql:
- expression: let(fqdns => $.data.fqdns) -> dict($.data.metadata.where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))]))
+ expression: let(fqdns => $.data.fqdns) -> dict(coalesce($.data.metadata, []).where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))]))
data:
metadata: {get_attr: [IncomingMetadataSettings, value]}
fqdns:
@@ -72,7 +72,7 @@ resources:
properties:
value:
yaql:
- expression: dict($.data.where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1]))
+ expression: dict(coalesce($.data, []).where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1]))
data: {get_attr: [IncomingMetadataSettings, value]}
outputs:
diff --git a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
index c388358a..24557517 100644
--- a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
@@ -21,3 +21,7 @@ parameter_defaults:
rhel_reg_type: ""
rhel_reg_method: ""
rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms"
+ rhel_reg_http_proxy_host: ""
+ rhel_reg_http_proxy_port: ""
+ rhel_reg_http_proxy_username: ""
+ rhel_reg_http_proxy_password: ""
diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
index fdf2e957..30a83550 100644
--- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
@@ -45,6 +45,20 @@ parameters:
type: string
rhel_reg_sat_repo:
type: string
+ rhel_reg_http_proxy_host:
+ type: string
+ rhel_reg_http_proxy_port:
+ type: string
+ rhel_reg_http_proxy_username:
+ type: string
+ rhel_reg_http_proxy_password:
+ type: string
+ UpdateOnRHELRegistration:
+ type: boolean
+ default: false
+ description: |
+ When enabled, the system will perform a yum update after performing the
+ RHEL Registration process.
resources:
@@ -71,6 +85,10 @@ resources:
- name: REG_TYPE
- name: REG_METHOD
- name: REG_SAT_REPO
+ - name: REG_HTTP_PROXY_HOST
+ - name: REG_HTTP_PROXY_PORT
+ - name: REG_HTTP_PROXY_USERNAME
+ - name: REG_HTTP_PROXY_PASSWORD
config: {get_file: scripts/rhel-registration}
RHELRegistrationDeployment:
@@ -99,6 +117,10 @@ resources:
REG_TYPE: {get_param: rhel_reg_type}
REG_METHOD: {get_param: rhel_reg_method}
REG_SAT_REPO: {get_param: rhel_reg_sat_repo}
+ REG_HTTP_PROXY_HOST: {get_param: rhel_reg_http_proxy_host}
+ REG_HTTP_PROXY_PORT: {get_param: rhel_reg_http_proxy_port}
+ REG_HTTP_PROXY_USERNAME: {get_param: rhel_reg_http_proxy_username}
+ REG_HTTP_PROXY_PASSWORD: {get_param: rhel_reg_http_proxy_password}
RHELUnregistration:
type: OS::Heat::SoftwareConfig
@@ -118,6 +140,37 @@ resources:
input_values:
REG_METHOD: {get_param: rhel_reg_method}
+ YumUpdateConfigurationAfterRHELRegistration:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -x
+ num_updates=$(yum list -q updates | wc -l)
+ if [ "$num_updates" -eq "0" ]; then
+ echo "No packages require updating"
+ exit 0
+ fi
+ full_command="yum -q -y update"
+ echo "Running: $full_command"
+ result=$($full_command)
+ return_code=$?
+ echo "$result"
+ echo "yum return code: $return_code"
+ exit $return_code
+
+ UpdateDeploymentAfterRHELRegistration:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: RHELRegistrationDeployment
+ conditions:
+ update_requested: {get_param: UpdateOnRHELRegistration}
+ properties:
+ name: UpdateDeploymentAfterRHELRegistration
+ config: {get_resource: YumUpdateConfigurationAfterRHELRegistration}
+ server: {get_param: server}
+ actions: ['CREATE'] # Only do this on CREATE
+
outputs:
deploy_stdout:
description: Deployment reference, used to trigger puppet apply on changes
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
index 2650a967..d14ed73f 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
@@ -11,12 +11,20 @@ if [ -e $OK ] ; then
exit 0
fi
-retryCount=0
+retry_max_count=10
opts=
+config_opts=
attach_opts=
sat5_opts=
repos="repos --enable rhel-7-server-rpms"
satellite_repo=${REG_SAT_REPO}
+proxy_host=
+proxy_port=
+proxy_url=
+proxy_username=
+proxy_password=
+
+# process variables..
if [ -n "${REG_AUTO_ATTACH:-}" ]; then
opts="$opts --auto-attach"
@@ -97,28 +105,93 @@ if [ -n "${REG_TYPE:-}" ]; then
opts="$opts --type=$REG_TYPE"
fi
-function retry() {
- if [[ $retryCount < 3 ]]; then
- $@
- if ! [[ $? == 0 ]]; then
- retryCount=$(echo $retryCount + 1 | bc)
- echo "WARN: Failed to connect when running '$@', retrying..."
- retry $@
+# Proxy settings (host and port)
+if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then
+ proxy_host="${REG_HTTP_PROXY_HOST}"
+fi
+
+if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then
+ proxy_port="${REG_HTTP_PROXY_PORT}"
+fi
+
+# Proxy settings (user and password)
+if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then
+ proxy_username="${REG_HTTP_PROXY_USERNAME}"
+fi
+
+if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
+ proxy_password="${REG_HTTP_PROXY_PASSWORD}"
+fi
+
+# Sanity Checks for proxy host/port/user/password
+if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then
+ if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then
+ # Good both values are not empty
+ proxy_url="http://${proxy_host}:${proxy_port}"
+ config_opts="--server.proxy_hostname=${proxy_host} --server.proxy_port=${proxy_port}"
+ sat5_opts="${sat5_opts} --proxy_hostname=${proxy_url}"
+ echo "RHSM Proxy set to: ${proxy_url}"
+ if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then
+ if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
+ config_opts="${config_opts} --server.proxy_user=${proxy_username} --server.proxy_password=${proxy_password}"
+ sat5_opts="${sat5_opts} --proxyUser=${proxy_username} --proxyPassword=${proxy_password}"
+ else
+ echo "Warning: REG_HTTP_PROXY_PASSWORD cannot be null with non-empty REG_HTTP_PROXY_USERNAME! Skipping..."
+ proxy_username= ; proxy_password=
+ fi
+ else
+ if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
+ echo "Warning: REG_HTTP_PROXY_USERNAME cannot be null with non-empty REG_HTTP_PROXY_PASSWORD! Skipping..."
+ proxy_username= ; proxy_password=
+ fi
+ fi
else
- retryCount=0
+ echo "Warning: REG_HTTP_PROXY_PORT cannot be null with non-empty REG_HTTP_PROXY_HOST! Skipping..."
+ proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password=
fi
- else
- echo "ERROR: Failed to connect after 3 attempts when running '$@'"
- exit 1
- fi
+else
+ if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then
+ echo "Warning: REG_HTTP_PROXY_HOST cannot be null with non-empty REG_HTTP_PROXY_PORT! Skipping..."
+ proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password=
+ fi
+fi
+
+function retry() {
+ # Inhibit -e since we want to retry without exiting..
+ set +e
+ # Retry delay (seconds)
+ retry_delay=2.0
+ retry_count=0
+ mycli="$@"
+ while [ $retry_count -lt ${retry_max_count} ]
+ do
+ echo "INFO: Sleeping ${retry_delay} ..."
+ sleep ${retry_delay}
+ echo "INFO: Executing '${mycli}' ..."
+ ${mycli}
+ if [ $? -eq 0 ]; then
+ echo "INFO: Ran '${mycli}' successfully, not retrying..."
+ break
+ else
+ echo "WARN: Failed to connect when running '${mycli}', retrying (attempt #$retry_count )..."
+ retry_count=$(echo $retry_count + 1 | bc)
+ fi
+ done
+
+ if [ $retry_count -ge ${retry_max_count} ]; then
+ echo "ERROR: Failed to connect after ${retry_max_count} attempts when running '${mycli}'"
+ exit 1
+ fi
+ # Re-enable -e when exiting retry()
+ set -e
}
function detect_satellite_version {
ping_api=$REG_SAT_URL/katello/api/ping
- if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
+ if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
echo Satellite 6 detected at $REG_SAT_URL
satellite_version=6
- elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
echo Satellite 5 detected at $REG_SAT_URL
satellite_version=5
else
@@ -127,29 +200,49 @@ function detect_satellite_version {
fi
}
+if [ "x${proxy_url}" != "x" ];then
+ # Config subscription-manager for proxy
+ subscription-manager config ${config_opts}
+
+ # Config yum for proxy..
+ sed -i -e '/^proxy=/d' /etc/yum.conf
+ echo "proxy=${proxy_url}" >> /etc/yum.conf
+
+ # Handle optional username/password
+ if [ -n "${proxy_username}" ]; then
+ sed -i -e '/^proxy_username=/d' /etc/yum.conf
+ echo "proxy_username=${proxy_username}" >> /etc/yum.conf
+ fi
+
+ if [ -n "${proxy_password}" ]; then
+ sed -i -e '/^proxy_password=/d' /etc/yum.conf
+ echo "proxy_password=${proxy_password}" >> /etc/yum.conf
+ fi
+
+fi
+
case "${REG_METHOD:-}" in
portal)
retry subscription-manager register $opts
if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then
retry subscription-manager attach $attach_opts
fi
- retry subscription-manager repos --disable '*'
+ retry subscription-manager repos --disable='*'
retry subscription-manager $repos
;;
satellite)
detect_satellite_version
if [ "$satellite_version" = "6" ]; then
repos="$repos --enable ${satellite_repo}"
- curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
retry subscription-manager register $opts
retry subscription-manager $repos
retry yum install -y katello-agent || true # needed for errata reporting to satellite6
katello-package-upload
- retry subscription-manager repos --disable ${satellite_repo}
else
pushd /usr/share/rhn/
- curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
popd
retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
fi
diff --git a/extraconfig/tasks/aodh_data_migration.sh b/extraconfig/tasks/aodh_data_migration.sh
deleted file mode 100644
index d4c29673..00000000
--- a/extraconfig/tasks/aodh_data_migration.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# This delivers the aodh data migration script to be invoked as part of the tripleo
-# major upgrade workflow to migrate all the alarm data from mongodb to mysql.
-# This needs to run post controller node upgrades so new aodh mysql db configured and
-# running.
-#
-set -eu
-
-#Get existing mongodb connection
-MONGO_DB_CONNECTION="$(crudini --get /etc/ceilometer/ceilometer.conf database connection)"
-
-# Get the aodh database string from hiera data
-MYSQL_DB_CONNECTION="$(crudini --get /etc/aodh/aodh.conf database connection)"
-
-#Run migration
-/usr/bin/aodh-data-migration --nosql-conn $MONGO_DB_CONNECTION --sql-conn $MYSQL_DB_CONNECTION
-
-
diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
deleted file mode 100644
index cf5d7a84..00000000
--- a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Software-config for ceilometer configuration under httpd during upgrades
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
- CeilometerWsgiMitakaNewtonPreUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- config:
- get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp
-
- CeilometerWsgiMitakaNewtonUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - "#!/bin/bash\n\nset -e\n\n"
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - "disable_standalone_ceilometer_api\n\n"
-
- CeilometerWsgiMitakaNewtonPostUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: |
- #!/bin/bash
- set -e
- /usr/bin/systemctl reload httpd
-
- CeilometerWsgiMitakaNewtonPreUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonUpgradeConfigDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonPostUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig}
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
deleted file mode 100755
index 8bdff5e7..00000000
--- a/extraconfig/tasks/major_upgrade_check.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster()
-{
- if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
- fi
-}
-
-check_pcsd()
-{
- if pcs status 2>&1 | grep -E 'Offline'; then
- echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
- exit 1
- fi
-}
-
-mysql_need_update()
-{
- # Shall we upgrade mysql data directory during the stack upgrade?
- if [ "$mariadb_do_major_upgrade" = "auto" ]; then
- ret=$(is_mysql_upgrade_needed)
- if [ $ret = "1" ]; then
- DO_MYSQL_UPGRADE=1
- else
- DO_MYSQL_UPGRADE=0
- fi
- echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
- elif [ "$mariadb_do_major_upgrade" = "no" ]; then
- DO_MYSQL_UPGRADE=0
- else
- DO_MYSQL_UPGRADE=1
- fi
-}
-
-check_disk_for_mysql_dump()
-{
- # Where to backup current database if mysql need to be upgraded
- MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
- MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
- # Spare disk ratio for extra safety
- MYSQL_BACKUP_SIZE_RATIO=1.2
-
- mysql_need_update
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-
- if [ -d "$MYSQL_BACKUP_DIR" ]; then
- echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
- exit 1
- fi
- mkdir "$MYSQL_BACKUP_DIR"
- if [ $? -ne 0 ]; then
- echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
- exit 1
- fi
-
- # the /root/.my.cnf is needed because we set the mysql root
- # password from liberty onwards
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- # While not ideal, this step allows us to calculate exactly how much space the dump
- # will need. Our main goal here is avoiding any chance of corruption due to disk space
- # exhaustion
- backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
- database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
- free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
- # we need at least space for a new mysql database + dump of the existing one,
- # times a small factor for additional safety room
- # note: bash doesn't do floating point math or floats in if statements,
- # so use python to apply the ratio and cast it back to integer
- required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
- if [ $required_space -ge $free_space ]; then
- echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
- exit 1
- fi
- fi
- fi
-}
-
-check_python_rpm()
-{
- # If for some reason rpm-python are missing we want to error out early enough
- if ! rpm -q rpm-python &> /dev/null; then
- echo_error "ERROR: upgrade cannot start without rpm-python installed"
- exit 1
- fi
-}
-
-check_clean_cluster()
-{
- if pcs status | grep -q Stopped:; then
- echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running."
- exit 1
- fi
-}
-
-check_galera_root_password()
-{
- # BZ: 1357112
- if [ ! -e /root/.my.cnf ]; then
- echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
- exit 1
- fi
-}
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
deleted file mode 100755
index 080831ab..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster
-check_pcsd
-if [[ -n $(is_bootstrap_node) ]]; then
- check_clean_cluster
-fi
-check_python_rpm
-check_galera_root_password
-check_disk_for_mysql_dump
-
-# We want to disable fencing during the cluster --stop as it might fence
-# nodes where a service fails to stop, which could be fatal during an upgrade
-# procedure. So we remember the stonith state. If it was enabled we reenable it
-# at the end of this script
-if [[ -n $(is_bootstrap_node) ]]; then
- STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
- # We create this empty file if stonith was set to true so we can reenable stonith in step2
- rm -f /var/tmp/stonith-true
- if [ $STONITH_STATE == "true" ]; then
- touch /var/tmp/stonith-true
- fi
- pcs property set stonith-enabled=false
-fi
-
-# Migrate to HA NG and fix up rabbitmq queues
-# We fix up the rabbitmq ha queues after the migration because it will
-# restart the rabbitmq resource. Doing it after the migration means no other
-# services will be restart as there are no other constraints
-if [[ -n $(is_bootstrap_node) ]]; then
- migrate_full_to_ng_ha
- rabbitmq_newton_ocata_upgrade
-fi
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
deleted file mode 100755
index 6bfe1239..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_sync_timeout=1800
-
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
- manage_systemd_service stop "${service%%-clone}"
- # So the reason for not reusing check_resource_systemd is that
- # I have observed systemctl is-active returning unknown with at least
- # one service that was stopped (See LP 1627254)
- timeout=600
- tstart=$(date +%s)
- tend=$(( $tstart + $timeout ))
- check_interval=3
- while (( $(date +%s) < $tend )); do
- if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
- echo "$service still active, sleeping $check_interval seconds."
- sleep $check_interval
- else
- # we do not care if it is inactive, unknown or failed as long as it is
- # not running
- break
- fi
-
- done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
-# to support specific upgrade scenario
-
-# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
-# later
-mysql_need_update
-
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
- cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
- fi
-
- pcs resource disable redis
- check_resource redis stopped 600
- pcs resource disable rabbitmq
- check_resource rabbitmq stopped 600
- pcs resource disable galera
- check_resource galera stopped 600
- pcs resource disable openstack-cinder-volume
- check_resource openstack-cinder-volume stopped 600
- # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
- pcs resource disable $vip
- check_resource $vip stopped 60
- done
- pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_sync_timeout )) ; then
- echo_error "ERROR: cluster shutdown timed out"
- exit 1
- fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
- echo_error "ERROR: mysql backup dir already exist"
- exit 1
- fi
- mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-special_case_ovs_upgrade_if_needed
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- # Scripts run via heat have no HOME variable set and this confuses
- # mysqladmin
- export HOME=/root
-
- mkdir /var/lib/mysql || /bin/true
- chown mysql:mysql /var/lib/mysql
- chmod 0755 /var/lib/mysql
- restorecon -R /var/lib/mysql/
- mysql_install_db --datadir=/var/lib/mysql --user=mysql
- chown -R mysql:mysql /var/lib/mysql/
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- mysqld_safe --wsrep-new-cluster &
- # We have a populated /root/.my.cnf with root/password here so
- # we need to temporarily rename it because the newly created
- # db is empty and no root password is set
- mv /root/.my.cnf /root/.my.cnf.temporary
- timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
- mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
- mv /root/.my.cnf.temporary /root/.my.cnf
- mysqladmin -u root shutdown
- # The import was successful so we may remove the folder
- rm -r "$MYSQL_BACKUP_DIR"
- fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller. Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ -f /var/tmp/stonith-true ]; then
- pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
- fi
- rm -f /var/tmp/stonith-true
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
deleted file mode 100755
index a3cbd945..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- pcs cluster start --all
-
- tstart=$(date +%s)
- while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_form_timeout )) ; then
- echo_error "ERROR: timed out forming the cluster"
- exit 1
- fi
- done
-
- if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
- echo_error "ERROR: timed out waiting for cluster to finish transition"
- exit 1
- fi
-
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
- pcs resource enable $vip
- check_resource_pacemaker $vip started 60
- done
-fi
-
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo_error "ERROR galera sync timed out"
- exit 1
- fi
- done
-
- # Run all the db syncs
- # TODO: check if this can be triggered in puppet and removed from here
- ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
- cinder-manage db sync
- glance-manage db_sync
- heat-manage --config-file /etc/heat/heat.conf db_sync
- keystone-manage db_sync
- neutron-db-manage upgrade heads
- nova-manage db sync
- nova-manage api_db sync
- nova-manage db online_data_migrations
- sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
deleted file mode 100755
index d2cb9553..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
-start_or_enable_service redis
-check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
-
-# start httpd so keystone is available for gnocchi
-# upgrade to run.
-systemctl start httpd
-
-# Swift isn't controled by pacemaker
-systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
deleted file mode 100755
index fa95f1f8..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-if [[ -n $(is_bootstrap_node) ]]; then
- # run gnocchi upgrade
- gnocchi-upgrade
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
deleted file mode 100755
index d569084d..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
- services=${services%%openstack-sahara*}
-fi
-for service in $services; do
- manage_systemd_service start "${service%%-clone}"
- check_resource_systemd "${service%%-clone}" started 600
-done
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
deleted file mode 100644
index 8c91027d..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ /dev/null
@@ -1,179 +0,0 @@
-heat_template_version: ocata
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-
- UpgradeLevelNovaCompute:
- type: string
- description: Nova Compute upgrade level
- default: ''
- MySqlMajorUpgrade:
- type: string
- description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade
- constraints:
- - allowed_values: ['auto', 'yes', 'no']
- default: 'auto'
- IgnoreCephUpgradeWarnings:
- type: boolean
- default: false
- description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean
- KeepSaharaServicesOnUpgrade:
- type: boolean
- default: true
- description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton
-
-
-resources:
- # TODO(jistr): for Mitaka->Newton upgrades and further we can use
- # map_merge with input_values instead of feeding params into scripts
- # via str_replace on bash snippets
-
- ControllerPacemakerUpgradeConfig_Step1:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_1.sh
-
- ControllerPacemakerUpgradeDeployment_Step1:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step2:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_2.sh
-
- ControllerPacemakerUpgradeDeployment_Step2:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step1
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step3:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_3.sh
-
- ControllerPacemakerUpgradeDeployment_Step3:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step2
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step4:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_4.sh
-
- ControllerPacemakerUpgradeDeployment_Step4:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step3
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step4}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step5:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_5.sh
-
- ControllerPacemakerUpgradeDeployment_Step5:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step4
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step6:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE'
- params:
- KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_6.sh
-
- ControllerPacemakerUpgradeDeployment_Step6:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step5
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
- input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
deleted file mode 100644
index ae22a1e7..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/bin/bash
-
-# Special pieces of upgrade migration logic go into this
-# file. E.g. Pacemaker cluster transitions for existing deployments,
-# matching changes to overcloud_controller_pacemaker.pp (Puppet
-# handles deployment, this file handles migrations).
-#
-# This file shouldn't execute any action on its own, all logic should
-# be wrapped into bash functions. Upgrade scripts will source this
-# file and call the functions defined in this file where appropriate.
-#
-# The migration functions should be idempotent. If the migration has
-# been already applied, it should be possible to call the function
-# again without damaging the deployment or failing the upgrade.
-
-# If the major version of mysql is going to change after the major
-# upgrade, the database must be upgraded on disk to avoid failures
-# due to internal incompatibilities between major mysql versions
-# https://bugs.launchpad.net/tripleo/+bug/1587449
-# This function detects whether a database upgrade is required
-# after a mysql package upgrade. It returns 0 when no major upgrade
-# has to take place, 1 otherwise.
-function is_mysql_upgrade_needed {
- # The name of the package which provides mysql might differ
- # after the upgrade. Consider the generic package name, which
- # should capture the major version change (e.g. 5.5 -> 10.1)
- local name="mariadb"
- local output
- local ret
- set +e
- output=$(yum -q check-update $name)
- ret=$?
- set -e
- if [ $ret -ne 100 ]; then
- # no updates so we exit
- echo "0"
- return
- fi
-
- local currentepoch=$(rpm -q --qf "%{epoch}" $name)
- local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2)
- local currentrelease=$(rpm -q --qf "%{release}" $name)
- local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name)
- local newepoch=$(echo "$newoutput" | awk '{ print $1 }')
- local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2)
- local newrelease=$(echo "$newoutput" | awk '{ print $3 }')
-
- # With this we trigger the dump restore/path if we change either epoch or
- # version in the package If only the release tag changes we do not do it
- # FIXME: we could refine this by trying to parse the mariadb version
- # into X.Y.Z and trigger the update only if X and/or Y change.
- output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc")
- if [ "$output" != "-1" ]; then
- echo "0"
- return
- fi
- echo "1"
-}
-
-# This function returns the list of services to be migrated away from pacemaker
-# and to systemd. The reason to have these services in a separate function is because
-# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
-# and in the function to migrate the cluster from full HA to HA NG
-function services_to_migrate {
- # The following PCMK resources the ones the we are going to delete
- PCMK_RESOURCE_TODELETE="
- httpd-clone
- memcached-clone
- mongod-clone
- neutron-dhcp-agent-clone
- neutron-l3-agent-clone
- neutron-metadata-agent-clone
- neutron-netns-cleanup-clone
- neutron-openvswitch-agent-clone
- neutron-ovs-cleanup-clone
- neutron-server-clone
- openstack-aodh-evaluator-clone
- openstack-aodh-listener-clone
- openstack-aodh-notifier-clone
- openstack-ceilometer-central-clone
- openstack-ceilometer-collector-clone
- openstack-ceilometer-notification-clone
- openstack-cinder-api-clone
- openstack-cinder-scheduler-clone
- openstack-glance-api-clone
- openstack-gnocchi-metricd-clone
- openstack-gnocchi-statsd-clone
- openstack-heat-api-cfn-clone
- openstack-heat-api-clone
- openstack-heat-api-cloudwatch-clone
- openstack-heat-engine-clone
- openstack-nova-api-clone
- openstack-nova-conductor-clone
- openstack-nova-consoleauth-clone
- openstack-nova-novncproxy-clone
- openstack-nova-scheduler-clone
- openstack-sahara-api-clone
- openstack-sahara-engine-clone
- "
- echo $PCMK_RESOURCE_TODELETE
-}
-
-# This function will migrate a mitaka system where all the resources are managed
-# via pacemaker to a newton setup where only a few services will be managed by pacemaker
-# On a high-level it will operate as follows:
-# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
-# during the conversion
-# 2. Remove all the colocation constraints and then the ordering constraints, except the
-# ones related to haproxy/VIPs which exist in Newton as well
-# 3. Take the cluster out of maintenance-mode
-# 4. Remove all the resources that won't be managed by pacemaker in newton. The
-# outcome will be
-# that they are stopped and removed from pacemakers control
-# 5. Do a resource cleanup to make sure the cluster is in a clean state
-function migrate_full_to_ng_ha {
- if [[ -n $(pcmk_running) ]]; then
- pcs property set maintenance-mode=true
-
- # First we go through all the colocation constraints (except the ones
- # we want to keep, i.e. the haproxy/ip ones) and we remove those
- COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $COL_CONSTRAINTS; do
- log_debug "Deleting colocation constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
-
- # Now we kill all the ordering constraints (except the haproxy/ip ones)
- ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $ORD_CONSTRAINTS; do
- log_debug "Deleting ordering constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
- # At this stage all the pacemaker resources are removed from the CIB.
- # Once we remove the maintenance-mode those systemd resources will keep
- # on running. They shall be systemd enabled via the puppet converge
- # step later on
- pcs property set maintenance-mode=false
-
- # At this stage there are no constraints whatsoever except the haproxy/ip ones
- # which we want to keep. We now disable and then delete each resource
- # that will move to systemd.
- # We want the systemd resources be stopped before doing "yum update",
- # that way "systemctl try-restart <service>" is no-op because the
- # service was down already
- PCS_STATUS_OUTPUT="$(pcs status)"
- for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
- if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
- log_debug "Deleting $resource from the CIB"
- if ! pcs resource disable "$resource" --wait=600; then
- echo_error "ERROR: resource $resource failed to be disabled"
- exit 1
- fi
- pcs resource delete --force "$resource"
- else
- log_debug "Service $resource not found as a pacemaker resource, not trying to delete."
- fi
- done
-
- # We need to do a pcs resource cleanup here + crm_resource --wait to
- # make sure the cluster is in a clean state before we stop everything,
- # upgrade and restart everything
- pcs resource cleanup
- # We are making sure here that the cluster is stable before proceeding
- if ! timeout -k 10 600 crm_resource --wait; then
- echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
- exit 1
- fi
- fi
-}
-
-function disable_standalone_ceilometer_api {
- if [[ -n $(is_bootstrap_node) ]]; then
- if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then
- # Disable pacemaker resources for ceilometer-api
- manage_pacemaker_service disable openstack-ceilometer-api
- check_resource_pacemaker openstack-ceilometer-api stopped 600
- pcs resource delete openstack-ceilometer-api --wait=600
- fi
- fi
-}
-
-
-# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
-# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
-# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
-# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
-# Note that changing an attribute like this makes the rabbitmq resource restart
-function rabbitmq_newton_ocata_upgrade {
- if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
- # Number of controller is obtained by counting how many hostnames we
- # have in controller_node_names hiera key
- nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
- nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
- if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
- echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
- exit 1
- fi
- pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
- fi
-}
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
deleted file mode 100644
index 45933fb7..00000000
--- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Software-config for performing aodh data migration
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
-
- AodhMysqlMigrationScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: aodh_data_migration.sh}
-
- AodhMysqlMigrationScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: AodhMysqlMigrationScriptConfig}
- input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
deleted file mode 100644
index a8d43663..00000000
--- a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This puppet manifest is to be used only during a Mitaka->Newton upgrade
-# It configures ceilometer to be run under httpd but it makes sure to not
-# restart any services. This snippet needs to be called before init as a
-# pre upgrade migration.
-
-Service <|
- tag == 'ceilometer-service'
-|> {
- hasrestart => true,
- restart => '/bin/true',
- start => '/bin/true',
- stop => '/bin/true',
-}
-
-if $::hostname == downcase(hiera('bootstrap_nodeid')) {
- $pacemaker_master = true
- $sync_db = true
-} else {
- $pacemaker_master = false
- $sync_db = false
-}
-
-include ::tripleo::packages
-
-
-if str2bool(hiera('mongodb::server::ipv6', false)) {
- $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[')
- $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
-} else {
- $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017')
-}
-$mongodb_replset = hiera('mongodb::server::replset')
-$mongo_node_string = join($mongo_node_ips_with_port, ',')
-$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
-
-$rabbit_hosts = hiera('rabbitmq_node_ips', undef)
-$rabbit_port = hiera('ceilometer::rabbit_port', 5672)
-$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}")
-
-class { '::ceilometer' :
- rabbit_hosts => $rabbit_endpoints,
-}
-
-class {'::ceilometer::db':
- database_connection => $database_connection,
-}
-
-if $sync_db {
- include ::ceilometer::db::sync
-}
-
-include ::ceilometer::config
-
-class { '::ceilometer::api':
- enabled => true,
- service_name => 'httpd',
- keystone_password => hiera('ceilometer::keystone::auth::password'),
- identity_uri => hiera('ceilometer::keystone::authtoken::auth_url'),
- auth_uri => hiera('ceilometer::keystone::authtoken::auth_uri'),
- keystone_tenant => hiera('ceilometer::keystone::authtoken::project_name'),
-}
-
-class { '::apache' :
- service_enable => false,
- service_manage => true,
- service_restart => '/bin/true',
- purge_configs => false,
- purge_vhost_dir => false,
-}
-
-# To ensure existing ports are not overridden
-class { '::aodh::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::gnocchi::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-
-class { '::keystone::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::ceilometer::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index aae4a2de..4480f74d 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -299,9 +299,10 @@ function systemctl_swift {
}
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+# Update condition and add --notriggerun for +bug/1669714
function special_case_ovs_upgrade_if_needed {
- if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
+ if rpm -qa | grep "^openvswitch-2.5.0-14" || rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart" ; then
+ echo "Manual upgrade of openvswitch - ovs-2.5.0-14 or restart in postun detected"
rm -rf OVS_UPGRADE
mkdir OVS_UPGRADE && pushd OVS_UPGRADE
echo "Attempting to downloading latest openvswitch with yumdownloader"
@@ -310,8 +311,8 @@ function special_case_ovs_upgrade_if_needed {
if rpm -U --test $pkg 2>&1 | grep "already installed" ; then
echo "Looks like newer version of $pkg is already installed, skipping"
else
- echo "Updating $pkg with nopostun option"
- rpm -U --replacepkgs --nopostun $pkg
+ echo "Updating $pkg with --nopostun --notriggerun"
+ rpm -U --replacepkgs --nopostun --notriggerun $pkg
fi
done
popd
diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml
index a63868c9..a304e55b 100644
--- a/extraconfig/tasks/post_puppet_pacemaker.yaml
+++ b/extraconfig/tasks/post_puppet_pacemaker.yaml
@@ -25,7 +25,7 @@ resources:
ControllerPostPuppetMaintenanceModeDeployment:
type: OS::Heat::SoftwareDeployments
properties:
- servers: {get_param: servers}
+ servers: {get_param: servers}
config: {get_resource: ControllerPostPuppetMaintenanceModeConfig}
input_values: {get_param: input_values}
@@ -33,5 +33,5 @@ resources:
type: OS::TripleO::Tasks::ControllerPostPuppetRestart
depends_on: ControllerPostPuppetMaintenanceModeDeployment
properties:
- servers: {get_param: servers}
+ servers: {get_param: servers}
input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/run_puppet.sh b/extraconfig/tasks/run_puppet.sh
index b7771e33..e3f6c493 100755
--- a/extraconfig/tasks/run_puppet.sh
+++ b/extraconfig/tasks/run_puppet.sh
@@ -10,7 +10,10 @@ function run_puppet {
export FACTER_deploy_config_name="${role}Deployment_Step${step}"
if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then
set +e
- puppet apply --detailed-exitcodes "${manifest}"
+ puppet apply --detailed-exitcodes \
+ --modulepath \
+ /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+ "${manifest}"
rc=$?
echo "puppet apply exited with exit code $rc"
else
diff --git a/extraconfig/tasks/ssh/host_public_key.yaml b/extraconfig/tasks/ssh/host_public_key.yaml
new file mode 100644
index 00000000..847c8772
--- /dev/null
+++ b/extraconfig/tasks/ssh/host_public_key.yaml
@@ -0,0 +1,42 @@
+heat_template_version: ocata
+
+description: >
+ This is a template which will fetch the ssh host public key.
+
+parameters:
+ server:
+ description: ID of the node to apply this config to
+ type: string
+
+resources:
+ SshHostPubKeyConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ outputs:
+ - name: rsa
+ - name: ecdsa
+ - name: ed25519
+ config: |
+ #!/bin/sh -x
+ test -e '/etc/ssh/ssh_host_rsa_key.pub' && cat /etc/ssh/ssh_host_rsa_key.pub > $heat_outputs_path.rsa
+ test -e '/etc/ssh/ssh_host_ecdsa_key.pub' && cat /etc/ssh/ssh_host_ecdsa_key.pub > $heat_outputs_path.ecdsa
+ test -e '/etc/ssh/ssh_host_ed25519_key.pub' && cat /etc/ssh/ssh_host_ed25519_key.pub > $heat_outputs_path.ed25519
+
+ SshHostPubKeyDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: SshHostPubKeyConfig}
+ server: {get_param: server}
+
+
+outputs:
+ ecdsa:
+ description: Host ssh public key (ecdsa)
+ value: {get_attr: [SshHostPubKeyDeployment, ecdsa]}
+ rsa:
+ description: Host ssh public key (rsa)
+ value: {get_attr: [SshHostPubKeyDeployment, rsa]}
+ ed25519:
+ description: Host ssh public key (ed25519)
+ value: {get_attr: [SshHostPubKeyDeployment, ed25519]}
diff --git a/extraconfig/tasks/ssh/known_hosts_config.yaml b/extraconfig/tasks/ssh/known_hosts_config.yaml
new file mode 100644
index 00000000..2ebcb63c
--- /dev/null
+++ b/extraconfig/tasks/ssh/known_hosts_config.yaml
@@ -0,0 +1,36 @@
+heat_template_version: ocata
+description: 'SSH Known Hosts Config'
+
+parameters:
+ known_hosts:
+ type: string
+
+resources:
+
+ SSHKnownHostsConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: known_hosts
+ default: {get_param: known_hosts}
+ config: |
+ #!/bin/bash
+ set -eux
+ set -o pipefail
+
+ echo "Creating ssh known hosts file"
+
+ if [ ! -z "${known_hosts}" ]; then
+ echo "${known_hosts}"
+ echo -ne "${known_hosts}" > /etc/ssh/ssh_known_hosts
+ chmod 0644 /etc/ssh/ssh_known_hosts
+ else
+ rm -f /etc/ssh/ssh_known_hosts
+ echo "No ssh known hosts"
+ fi
+
+outputs:
+ OS::stack_id:
+ description: The SSHKnownHostsConfig resource.
+ value: {get_resource: SSHKnownHostsConfig} \ No newline at end of file
diff --git a/extraconfig/tasks/swift-ring-deploy.yaml b/extraconfig/tasks/swift-ring-deploy.yaml
deleted file mode 100644
index d17f78ae..00000000
--- a/extraconfig/tasks/swift-ring-deploy.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-heat_template_version: ocata
-
-parameters:
- servers:
- type: json
- SwiftRingGetTempurl:
- default: ''
- description: A temporary Swift URL to download rings from.
- type: string
-
-resources:
- SwiftRingDeployConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: swift_ring_get_tempurl
- config: |
- #!/bin/sh
- pushd /
- curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true
- popd
-
- SwiftRingDeploy:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: SwiftRingDeploy
- config: {get_resource: SwiftRingDeployConfig}
- servers: {get_param: servers}
- input_values:
- swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
diff --git a/extraconfig/tasks/swift-ring-update.yaml b/extraconfig/tasks/swift-ring-update.yaml
deleted file mode 100644
index 440c6883..00000000
--- a/extraconfig/tasks/swift-ring-update.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-heat_template_version: ocata
-
-parameters:
- servers:
- type: json
- SwiftRingPutTempurl:
- default: ''
- description: A temporary Swift URL to upload rings to.
- type: string
-
-resources:
- SwiftRingUpdateConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: swift_ring_put_tempurl
- config: |
- #!/bin/sh
- TMP_DATA=$(mktemp -d)
- function cleanup {
- rm -Rf "$TMP_DATA"
- }
- trap cleanup EXIT
- # sanity check in case rings are not consistent within cluster
- swift-recon --md5 | grep -q "doesn't match" && exit 1
- pushd ${TMP_DATA}
- tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/*
- resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz`
- popd
- if [ "$resp" != "201" ]; then
- exit 1
- fi
-
- SwiftRingUpdate:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: SwiftRingUpdate
- config: {get_resource: SwiftRingUpdateConfig}
- servers: {get_param: servers}
- input_values:
- swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh
index c2565410..a5a312dc 100644
--- a/extraconfig/tasks/tripleo_upgrade_node.sh
+++ b/extraconfig/tasks/tripleo_upgrade_node.sh
@@ -28,37 +28,43 @@ SCRIPT_NAME=$(basename $0)
$(declare -f log_debug)
$(declare -f manage_systemd_service)
$(declare -f systemctl_swift)
+$(declare -f special_case_ovs_upgrade_if_needed)
# pin nova messaging +-1 for the nova-compute service
if [[ -n \$NOVA_COMPUTE ]]; then
crudini --set /etc/nova/nova.conf upgrade_levels compute auto
fi
-$(declare -f special_case_ovs_upgrade_if_needed)
special_case_ovs_upgrade_if_needed
-yum -y install python-zaqarclient # needed for os-collect-config
if [[ -n \$SWIFT_STORAGE ]]; then
systemctl_swift stop
fi
+
yum -y update
+
if [[ -n \$SWIFT_STORAGE ]]; then
systemctl_swift start
fi
# Due to bug#1640177 we need to restart compute agent
if [[ -n \$NOVA_COMPUTE ]]; then
- echo "Restarting openstack ceilometer agent compute"
+ log_debug "Restarting openstack ceilometer agent compute"
systemctl restart openstack-ceilometer-compute
fi
# Apply puppet manifest to converge just right after the ${ROLE} upgrade
$(declare -f run_puppet)
for step in 1 2 3 4 5 6; do
+ log_debug "Running puppet step \$step for ${ROLE}"
if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then
- echo "Puppet failure at step \${step}"
+ log_debug "Puppet failure at step \${step}"
exit 1
fi
+ log_debug "Completed puppet step \$step"
done
+
+log_debug "TripleO upgrade run completed."
+
ENDOFCAT
# ensure the permissions are OK
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index c66dd01f..20a5b658 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -40,14 +40,25 @@ touch "$timestamp_file"
command_arguments=${command_arguments:-}
-list_updates=$(yum list updates)
-
-if [[ "$list_updates" == "" ]]; then
+# yum check-update exits 100 if updates are available
+set +e
+check_update=$(yum check-update 2>&1)
+check_update_exit=$?
+set -e
+
+if [[ "$check_update_exit" == "1" ]]; then
+ echo "Failed to check for package updates"
+ echo "$check_update"
+ exit 1
+elif [[ "$check_update_exit" != "100" ]]; then
echo "No packages require updating"
exit 0
fi
-pacemaker_status=$(systemctl is-active pacemaker || :)
+pacemaker_status=""
+if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then
+ pacemaker_status=$(systemctl is-active pacemaker)
+fi
# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455
# and https://bugs.launchpad.net/tripleo/+bug/1634851
@@ -67,7 +78,7 @@ if [[ "$pacemaker_status" == "active" && \
fi
fi
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
special_case_ovs_upgrade_if_needed
if [[ "$pacemaker_status" == "active" ]] ; then
@@ -97,17 +108,6 @@ return_code=$?
echo "$result"
echo "yum return code: $return_code"
-# Writes any changes caused by alterations to os-net-config and bounces the
-# interfaces *before* restarting the cluster.
-os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
-RETVAL=$?
-if [[ $RETVAL == 2 ]]; then
- echo "os-net-config: interface configuration files updated successfully"
-elif [[ $RETVAL != 0 ]]; then
- echo "ERROR: os-net-config configuration failed"
- exit $RETVAL
-fi
-
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Starting cluster node"
pcs cluster start
@@ -124,15 +124,19 @@ if [[ "$pacemaker_status" == "active" ]] ; then
fi
done
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo "ERROR galera sync timed out"
- exit 1
- fi
- done
+ RETVAL=$( pcs resource show galera-master | grep wsrep_cluster_address | grep -q `crm_node -n` ; echo $? )
+
+ if [[ $RETVAL -eq 0 && -e /etc/sysconfig/clustercheck ]]; then
+ tstart=$(date +%s)
+ while ! clustercheck; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > galera_sync_timeout )) ; then
+ echo "ERROR galera sync timed out"
+ exit 1
+ fi
+ done
+ fi
echo "Waiting for pacemaker cluster to settle"
if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then