aboutsummaryrefslogtreecommitdiffstats
path: root/extraconfig
diff options
context:
space:
mode:
Diffstat (limited to 'extraconfig')
-rw-r--r--extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml4
-rw-r--r--extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml16
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration82
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh5
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml4
-rwxr-xr-xextraconfig/tasks/run_puppet.sh27
-rw-r--r--extraconfig/tasks/swift-ring-deploy.yaml31
-rw-r--r--extraconfig/tasks/swift-ring-update.yaml42
-rw-r--r--extraconfig/tasks/tripleo_upgrade_node.sh33
-rwxr-xr-xextraconfig/tasks/yum_update.sh8
10 files changed, 232 insertions, 20 deletions
diff --git a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
index c388358a..24557517 100644
--- a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
@@ -21,3 +21,7 @@ parameter_defaults:
rhel_reg_type: ""
rhel_reg_method: ""
rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms"
+ rhel_reg_http_proxy_host: ""
+ rhel_reg_http_proxy_port: ""
+ rhel_reg_http_proxy_username: ""
+ rhel_reg_http_proxy_password: ""
diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
index fdf2e957..e8316c53 100644
--- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
@@ -45,6 +45,14 @@ parameters:
type: string
rhel_reg_sat_repo:
type: string
+ rhel_reg_http_proxy_host:
+ type: string
+ rhel_reg_http_proxy_port:
+ type: string
+ rhel_reg_http_proxy_username:
+ type: string
+ rhel_reg_http_proxy_password:
+ type: string
resources:
@@ -71,6 +79,10 @@ resources:
- name: REG_TYPE
- name: REG_METHOD
- name: REG_SAT_REPO
+ - name: REG_HTTP_PROXY_HOST
+ - name: REG_HTTP_PROXY_PORT
+ - name: REG_HTTP_PROXY_USERNAME
+ - name: REG_HTTP_PROXY_PASSWORD
config: {get_file: scripts/rhel-registration}
RHELRegistrationDeployment:
@@ -99,6 +111,10 @@ resources:
REG_TYPE: {get_param: rhel_reg_type}
REG_METHOD: {get_param: rhel_reg_method}
REG_SAT_REPO: {get_param: rhel_reg_sat_repo}
+ REG_HTTP_PROXY_HOST: {get_param: rhel_reg_http_proxy_host}
+ REG_HTTP_PROXY_PORT: {get_param: rhel_reg_http_proxy_port}
+ REG_HTTP_PROXY_USERNAME: {get_param: rhel_reg_http_proxy_username}
+ REG_HTTP_PROXY_PASSWORD: {get_param: rhel_reg_http_proxy_password}
RHELUnregistration:
type: OS::Heat::SoftwareConfig
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
index 2650a967..6f83cc4b 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
@@ -13,10 +13,18 @@ fi
retryCount=0
opts=
+config_opts=
attach_opts=
sat5_opts=
repos="repos --enable rhel-7-server-rpms"
satellite_repo=${REG_SAT_REPO}
+proxy_host=
+proxy_port=
+proxy_url=
+proxy_username=
+proxy_password=
+
+# process variables..
if [ -n "${REG_AUTO_ATTACH:-}" ]; then
opts="$opts --auto-attach"
@@ -97,6 +105,57 @@ if [ -n "${REG_TYPE:-}" ]; then
opts="$opts --type=$REG_TYPE"
fi
+# Proxy settings (host and port)
+if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then
+ proxy_host="${REG_HTTP_PROXY_HOST}"
+fi
+
+if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then
+ proxy_port="${REG_HTTP_PROXY_PORT}"
+fi
+
+# Proxy settings (user and password)
+if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then
+ proxy_username="${REG_HTTP_PROXY_USERNAME}"
+fi
+
+if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
+ proxy_password="${REG_HTTP_PROXY_PASSWORD}"
+fi
+
+# Sanity Checks for proxy host/port/user/password
+if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then
+ if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then
+ # Good both values are not empty
+ proxy_url="http://${proxy_host}:${proxy_port}"
+ config_opts="--server.proxy_hostname=${proxy_host} --server.proxy_port=${proxy_port}"
+ sat5_opts="${sat5_opts} --proxy_hostname=${proxy_url}"
+ echo "RHSM Proxy set to: ${proxy_url}"
+ if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then
+ if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
+ config_opts="${config_opts} --server.proxy_user=${proxy_username} --server.proxy_password=${proxy_password}"
+ sat5_opts="${sat5_opts} --proxyUser=${proxy_username} --proxyPassword=${proxy_password}"
+ else
+ echo "Warning: REG_HTTP_PROXY_PASSWORD cannot be null with non-empty REG_HTTP_PROXY_USERNAME! Skipping..."
+ proxy_username= ; proxy_password=
+ fi
+ else
+ if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
+ echo "Warning: REG_HTTP_PROXY_USERNAME cannot be null with non-empty REG_HTTP_PROXY_PASSWORD! Skipping..."
+ proxy_username= ; proxy_password=
+ fi
+ fi
+ else
+ echo "Warning: REG_HTTP_PROXY_PORT cannot be null with non-empty REG_HTTP_PROXY_HOST! Skipping..."
+ proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password=
+ fi
+else
+ if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then
+ echo "Warning: REG_HTTP_PROXY_HOST cannot be null with non-empty REG_HTTP_PROXY_PORT! Skipping..."
+ proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password=
+ fi
+fi
+
function retry() {
if [[ $retryCount < 3 ]]; then
$@
@@ -127,13 +186,34 @@ function detect_satellite_version {
fi
}
+if [ "x${proxy_url}" != "x" ];then
+ # Config subscription-manager for proxy
+ subscription-manager config ${config_opts}
+
+ # Config yum for proxy..
+ sed -i -e '/^proxy=/d' /etc/yum.conf
+ echo "proxy=${proxy_url}" >> /etc/yum.conf
+
+ # Handle optional username/password
+ if [ -n "${proxy_username}" ]; then
+ sed -i -e '/^proxy_username=/d' /etc/yum.conf
+ echo "proxy_username=${proxy_username}" >> /etc/yum.conf
+ fi
+
+ if [ -n "${proxy_password}" ]; then
+ sed -i -e '/^proxy_password=/d' /etc/yum.conf
+ echo "proxy_password=${proxy_password}" >> /etc/yum.conf
+ fi
+
+fi
+
case "${REG_METHOD:-}" in
portal)
retry subscription-manager register $opts
if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then
retry subscription-manager attach $attach_opts
fi
- retry subscription-manager repos --disable '*'
+ retry subscription-manager repos --disable='*'
retry subscription-manager $repos
;;
satellite)
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
index 6bfe1239..4b323854 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -41,7 +41,7 @@ done
# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
#
# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
+# on mysql package versioning, but this can be overridden manually
# to support specific upgrade scenario
# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
@@ -50,6 +50,7 @@ mysql_need_update
if [[ -n $(is_bootstrap_node) ]]; then
if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
fi
@@ -108,7 +109,7 @@ yum -y -q update
# We need to ensure at least those two configuration settings, otherwise
# mariadb 10.1+ won't activate galera replication.
# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
+# matter because it's overridden by the galera resource agent.
cat >> /etc/my.cnf.d/galera.cnf <<EOF
[mysqld]
wsrep_on = ON
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index 8c91027d..74d3be71 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -18,10 +18,6 @@ parameters:
constraints:
- allowed_values: ['auto', 'yes', 'no']
default: 'auto'
- IgnoreCephUpgradeWarnings:
- type: boolean
- default: false
- description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean
KeepSaharaServicesOnUpgrade:
type: boolean
default: true
diff --git a/extraconfig/tasks/run_puppet.sh b/extraconfig/tasks/run_puppet.sh
new file mode 100755
index 00000000..b7771e33
--- /dev/null
+++ b/extraconfig/tasks/run_puppet.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+function run_puppet {
+ set -eux
+ local manifest="$1"
+ local role="$2"
+ local step="$3"
+ local rc=0
+
+ export FACTER_deploy_config_name="${role}Deployment_Step${step}"
+ if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then
+ set +e
+ puppet apply --detailed-exitcodes "${manifest}"
+ rc=$?
+ echo "puppet apply exited with exit code $rc"
+ else
+ echo "Step${step} doesn't exist for ${role}"
+ fi
+ set -e
+
+ if [ $rc -eq 2 -o $rc -eq 0 ]; then
+ set +xu
+ return 0
+ fi
+ set +xu
+ return $rc
+}
diff --git a/extraconfig/tasks/swift-ring-deploy.yaml b/extraconfig/tasks/swift-ring-deploy.yaml
new file mode 100644
index 00000000..d17f78ae
--- /dev/null
+++ b/extraconfig/tasks/swift-ring-deploy.yaml
@@ -0,0 +1,31 @@
+heat_template_version: ocata
+
+parameters:
+ servers:
+ type: json
+ SwiftRingGetTempurl:
+ default: ''
+ description: A temporary Swift URL to download rings from.
+ type: string
+
+resources:
+ SwiftRingDeployConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: swift_ring_get_tempurl
+ config: |
+ #!/bin/sh
+ pushd /
+ curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true
+ popd
+
+ SwiftRingDeploy:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ name: SwiftRingDeploy
+ config: {get_resource: SwiftRingDeployConfig}
+ servers: {get_param: servers}
+ input_values:
+ swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
diff --git a/extraconfig/tasks/swift-ring-update.yaml b/extraconfig/tasks/swift-ring-update.yaml
new file mode 100644
index 00000000..440c6883
--- /dev/null
+++ b/extraconfig/tasks/swift-ring-update.yaml
@@ -0,0 +1,42 @@
+heat_template_version: ocata
+
+parameters:
+ servers:
+ type: json
+ SwiftRingPutTempurl:
+ default: ''
+ description: A temporary Swift URL to upload rings to.
+ type: string
+
+resources:
+ SwiftRingUpdateConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: swift_ring_put_tempurl
+ config: |
+ #!/bin/sh
+ TMP_DATA=$(mktemp -d)
+ function cleanup {
+ rm -Rf "$TMP_DATA"
+ }
+ trap cleanup EXIT
+ # sanity check in case rings are not consistent within cluster
+ swift-recon --md5 | grep -q "doesn't match" && exit 1
+ pushd ${TMP_DATA}
+ tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/*
+ resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz`
+ popd
+ if [ "$resp" != "201" ]; then
+ exit 1
+ fi
+
+ SwiftRingUpdate:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ name: SwiftRingUpdate
+ config: {get_resource: SwiftRingUpdateConfig}
+ servers: {get_param: servers}
+ input_values:
+ swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh
index 27ba33a8..24211ab0 100644
--- a/extraconfig/tasks/tripleo_upgrade_node.sh
+++ b/extraconfig/tasks/tripleo_upgrade_node.sh
@@ -15,9 +15,13 @@ cat > $UPGRADE_SCRIPT << ENDOFCAT
set -eu
NOVA_COMPUTE=""
-if systemctl show 'openstack-nova-compute' --property ActiveState | grep '\bactive\b'; then
+if hiera -c /etc/puppet/hiera.yaml service_names | grep nova_compute ; then
NOVA_COMPUTE="true"
fi
+SWIFT_STORAGE=""
+if hiera -c /etc/puppet/hiera.yaml service_names | grep swift_storage ; then
+ SWIFT_STORAGE="true"
+fi
DEBUG="true"
SCRIPT_NAME=$(basename $0)
@@ -30,22 +34,33 @@ if [[ -n \$NOVA_COMPUTE ]]; then
crudini --set /etc/nova/nova.conf upgrade_levels compute auto
fi
-$(declare -f special_case_ovs_upgrade_if_needed)
-special_case_ovs_upgrade_if_needed
+if [[ -n \$SWIFT_STORAGE ]]; then
+ systemctl_swift stop
+fi
-yum -y install python-zaqarclient # needed for os-collect-config
-systemctl_swift stop
yum -y update
-systemctl_swift start
+if [[ -n \$SWIFT_STORAGE ]]; then
+ systemctl_swift start
+fi
# Due to bug#1640177 we need to restart compute agent
if [[ -n \$NOVA_COMPUTE ]]; then
- echo "Restarting openstack ceilometer agent compute"
+ log_debug "Restarting openstack ceilometer agent compute"
systemctl restart openstack-ceilometer-compute
fi
-# Apply puppet manifest to converge just right after the \$ROLE upgrade
-puppet apply /root/${ROLE}_puppet_config.pp
+# Apply puppet manifest to converge just right after the ${ROLE} upgrade
+$(declare -f run_puppet)
+for step in 1 2 3 4 5 6; do
+ log_debug "Running puppet step \$step for ${ROLE}"
+ if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then
+ log_debug "Puppet failure at step \${step}"
+ exit 1
+ fi
+ log_debug "Completed puppet step \$step"
+done
+
+log_debug "TripleO upgrade run completed."
ENDOFCAT
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index c66dd01f..4c87373e 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -47,7 +47,10 @@ if [[ "$list_updates" == "" ]]; then
exit 0
fi
-pacemaker_status=$(systemctl is-active pacemaker || :)
+pacemaker_status=""
+if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then
+ pacemaker_status=$(systemctl is-active pacemaker)
+fi
# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455
# and https://bugs.launchpad.net/tripleo/+bug/1634851
@@ -67,9 +70,6 @@ if [[ "$pacemaker_status" == "active" && \
fi
fi
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-special_case_ovs_upgrade_if_needed
-
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Pacemaker running, stopping cluster node and doing full package update"
node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")