diff options
Diffstat (limited to 'extraconfig/tasks')
-rw-r--r-- | extraconfig/tasks/major_upgrade_block_storage.sh | 8 | ||||
-rw-r--r-- | extraconfig/tasks/major_upgrade_ceph_storage.sh | 35 | ||||
-rw-r--r-- | extraconfig/tasks/major_upgrade_compute.sh | 26 | ||||
-rwxr-xr-x | extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh | 55 | ||||
-rwxr-xr-x | extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh | 67 | ||||
-rw-r--r-- | extraconfig/tasks/major_upgrade_object_storage.sh | 39 | ||||
-rw-r--r-- | extraconfig/tasks/major_upgrade_pacemaker.yaml | 85 | ||||
-rw-r--r-- | extraconfig/tasks/major_upgrade_pacemaker_init.yaml | 128 | ||||
-rw-r--r-- | extraconfig/tasks/major_upgrade_pacemaker_migrations.sh | 14 | ||||
-rw-r--r-- | extraconfig/tasks/noop.yaml | 16 | ||||
-rwxr-xr-x | extraconfig/tasks/pacemaker_common_functions.sh | 61 | ||||
-rwxr-xr-x | extraconfig/tasks/pacemaker_resource_restart.sh | 32 | ||||
-rw-r--r-- | extraconfig/tasks/post_puppet_pacemaker.yaml | 6 | ||||
-rwxr-xr-x | extraconfig/tasks/yum_update.sh | 3 | ||||
-rw-r--r-- | extraconfig/tasks/yum_update_noop.yaml | 29 |
15 files changed, 571 insertions, 33 deletions
diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh new file mode 100644 index 00000000..07666245 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_block_storage.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# +# This runs an upgrade of Cinder Block Storage nodes. +# +set -eu + +yum -y install python-zaqarclient # needed for os-collect-config +yum -y -q update diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh new file mode 100644 index 00000000..de42b16d --- /dev/null +++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# +# This delivers the ceph-storage upgrade script to be invoked as part of the tripleo +# major upgrade workflow. +# +set -eu + +UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh + +cat > $UPGRADE_SCRIPT << ENDOFCAT +### DO NOT MODIFY THIS FILE +### This file is automatically delivered to the ceph-storage nodes as part of the +### tripleo upgrades workflow + + +function systemctl_ceph { + action=\$1 + systemctl \$action ceph +} + +# "so that mirrors aren't rebalanced as if the OSD died" - gfidente +ceph osd set noout + +systemctl_ceph stop +yum -y install python-zaqarclient # needed for os-collect-config +yum -y update +systemctl_ceph start + +ceph osd unset noout + +ENDOFCAT + +# ensure the permissions are OK +chmod 0755 $UPGRADE_SCRIPT + diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh new file mode 100644 index 00000000..78628c8c --- /dev/null +++ b/extraconfig/tasks/major_upgrade_compute.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# +# This delivers the compute upgrade script to be invoked as part of the tripleo +# major upgrade workflow. +# +set -eu + +UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh + +cat > $UPGRADE_SCRIPT << ENDOFCAT +### DO NOT MODIFY THIS FILE +### This file is automatically delivered to the compute nodes as part of the +### tripleo upgrades workflow + +# pin nova to kilo (messaging +-1) for the nova-compute service + +crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute + +yum -y install python-zaqarclient # needed for os-collect-config +yum -y update + +ENDOFCAT + +# ensure the permissions are OK +chmod 0755 $UPGRADE_SCRIPT + diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh new file mode 100755 index 00000000..bf2ee330 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +set -eu + +cluster_sync_timeout=600 + +if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then + echo_error "ERROR: upgrade cannot start with some cluster nodes being offline" + exit 1 +fi + +if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then + pcs resource disable httpd + check_resource httpd stopped 1800 + if pcs status | grep openstack-keystone; then + pcs resource disable openstack-keystone + check_resource openstack-keystone stopped 1800 + fi + pcs resource disable redis + check_resource redis stopped 600 + pcs resource disable mongod + check_resource mongod stopped 600 + pcs resource disable rabbitmq + check_resource rabbitmq stopped 600 + pcs resource disable memcached + check_resource memcached stopped 600 + pcs resource disable galera + check_resource galera stopped 600 + pcs cluster stop --all +fi + +# Swift isn't controled by pacemaker +systemctl_swift stop + +tstart=$(date +%s) +while systemctl is-active pacemaker; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > cluster_sync_timeout )) ; then + echo_error "ERROR: cluster shutdown timed out" + exit 1 + fi +done + +yum -y install python-zaqarclient # needed for os-collect-config +yum -y -q update + +# Pin messages sent to compute nodes to kilo, these will be upgraded later +crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute" +# https://bugzilla.redhat.com/show_bug.cgi?id=1284047 +# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435 +crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit +# https://bugzilla.redhat.com/show_bug.cgi?id=1284058 +# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists +crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server" diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh new file mode 100755 index 00000000..10bea573 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +set -eu + +cluster_form_timeout=600 +cluster_settle_timeout=600 +galera_sync_timeout=600 + +if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then + pcs cluster start --all + + tstart=$(date +%s) + while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > cluster_form_timeout )) ; then + echo_error "ERROR: timed out forming the cluster" + exit 1 + fi + done + + if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then + echo_error "ERROR: timed out waiting for cluster to finish transition" + exit 1 + fi + + pcs resource enable galera + check_resource galera started 600 + pcs resource enable mongod + check_resource mongod started 600 + + tstart=$(date +%s) + while ! clustercheck; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > galera_sync_timeout )) ; then + echo_error "ERROR galera sync timed out" + exit 1 + fi + done + + # Run all the db syncs + # TODO: check if this can be triggered in puppet and removed from here + ceilometer-dbsync --config-file=/etc/ceilometer/ceilometer.conf + cinder-manage db sync + glance-manage --config-file=/etc/glance/glance-registry.conf db_sync + heat-manage --config-file /etc/heat/heat.conf db_sync + keystone-manage db_sync + neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head + nova-manage db sync + + pcs resource enable memcached + check_resource memcached started 600 + pcs resource enable rabbitmq + check_resource rabbitmq started 600 + pcs resource enable redis + check_resource redis started 600 + if pcs status | grep openstack-keystone; then + pcs resource enable openstack-keystone + check_resource openstack-keystone started 1800 + fi + pcs resource enable httpd + check_resource httpd started 1800 +fi + +# Swift isn't controled by heat +systemctl_swift start diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh new file mode 100644 index 00000000..931f4f42 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_object_storage.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# +# This delivers the swift-storage upgrade script to be invoked as part of the tripleo +# major upgrade workflow. +# +set -eu + +UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh + +cat > $UPGRADE_SCRIPT << ENDOFCAT +### DO NOT MODIFY THIS FILE +### This file is automatically delivered to the swift-storage nodes as part of the +### tripleo upgrades workflow + + +function systemctl_swift { + action=\$1 + for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \ + openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \ + openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object; do + systemctl \$action \$S + done +} + + +systemctl_swift stop + +yum -y install python-zaqarclient # needed for os-collect-config +yum -y update + +systemctl_swift start + + + +ENDOFCAT + +# ensure the permissions are OK +chmod 0755 $UPGRADE_SCRIPT + diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml new file mode 100644 index 00000000..4af3186c --- /dev/null +++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml @@ -0,0 +1,85 @@ +heat_template_version: 2014-10-16 +description: 'Upgrade for Pacemaker deployments' + +parameters: + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json + input_values: + type: json + description: input values for the software deployments + + UpgradeLevelNovaCompute: + type: string + description: Nova Compute upgrade level + default: '' + +resources: + # TODO(jistr): for Mitaka->Newton upgrades and further we can use + # map_merge with input_values instead of feeding params into scripts + # via str_replace on bash snippets + + ControllerPacemakerUpgradeConfig_Step1: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - str_replace: + template: | + #!/bin/bash + upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' + params: + UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} + - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_pacemaker_migrations.sh + - get_file: major_upgrade_controller_pacemaker_1.sh + + ControllerPacemakerUpgradeDeployment_Step1: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: controller_servers} + config: {get_resource: ControllerPacemakerUpgradeConfig_Step1} + input_values: {get_param: input_values} + + BlockStorageUpgradeConfig: + type: OS::Heat::SoftwareConfig + depends_on: ControllerPacemakerUpgradeDeployment_Step1 + properties: + group: script + config: {get_file: major_upgrade_block_storage.sh} + + BlockStorageUpgradeDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: blockstorage_servers} + config: {get_resource: BlockStorageUpgradeConfig} + input_values: {get_param: input_values} + + ControllerPacemakerUpgradeConfig_Step2: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_pacemaker_migrations.sh + - get_file: major_upgrade_controller_pacemaker_2.sh + + ControllerPacemakerUpgradeDeployment_Step2: + type: OS::Heat::SoftwareDeploymentGroup + depends_on: BlockStorageUpgradeDeployment + properties: + servers: {get_param: controller_servers} + config: {get_resource: ControllerPacemakerUpgradeConfig_Step2} + input_values: {get_param: input_values} + diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml new file mode 100644 index 00000000..f662bc3d --- /dev/null +++ b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml @@ -0,0 +1,128 @@ +heat_template_version: 2014-10-16 +description: 'Upgrade for Pacemaker deployments' + +parameters: + + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json + input_values: + type: json + description: input values for the software deployments + + UpgradeInitCommand: + type: string + description: | + Command or script snippet to run on all overcloud nodes to + initialize the upgrade process. E.g. a repository switch. + default: '' + UpgradeLevelNovaCompute: + type: string + description: Nova Compute upgrade level + default: '' + +resources: + + UpgradeInitConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - get_param: UpgradeInitCommand + + UpgradeInitControllerDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: controller_servers} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + UpgradeInitComputeDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: compute_servers} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + UpgradeInitBlockStorageDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: blockstorage_servers} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + UpgradeInitObjectStorageDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: objectstorage_servers} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + UpgradeInitCephStorageDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: cephstorage_servers} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + # TODO(jistr): for Mitaka->Newton upgrades and further we can use + # map_merge with input_values instead of feeding params into scripts + # via str_replace on bash snippets + + ComputeDeliverUpgradeScriptConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - str_replace: + template: | + #!/bin/bash + upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' + params: + UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} + - get_file: major_upgrade_compute.sh + + ComputeDeliverUpgradeScriptDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: compute_servers} + config: {get_resource: ComputeDeliverUpgradeScriptConfig} + input_values: {get_param: input_values} + + ObjectStorageDeliverUpgradeScriptConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: major_upgrade_object_storage.sh} + + ObjectStorageDeliverUpgradeScriptDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: objectstorage_servers} + config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig} + input_values: {get_param: input_values} + + CephStorageDeliverUpgradeScriptConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: major_upgrade_ceph_storage.sh} + + CephStorageDeliverUpgradeScriptDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: cephstorage_servers} + config: {get_resource: CephStorageDeliverUpgradeScriptConfig} + input_values: {get_param: input_values} diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh new file mode 100644 index 00000000..7fd26945 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Special pieces of upgrade migration logic go into this +# file. E.g. Pacemaker cluster transitions for existing deployments, +# matching changes to overcloud_controller_pacemaker.pp (Puppet +# handles deployment, this file handles migrations). +# +# This file shouldn't execute any action on its own, all logic should +# be wrapped into bash functions. Upgrade scripts will source this +# file and call the functions defined in this file where appropriate. +# +# The migration functions should be idempotent. If the migration has +# been already applied, it should be possible to call the function +# again without damaging the deployment or failing the upgrade. diff --git a/extraconfig/tasks/noop.yaml b/extraconfig/tasks/noop.yaml index 0cff7469..dbb863be 100644 --- a/extraconfig/tasks/noop.yaml +++ b/extraconfig/tasks/noop.yaml @@ -4,6 +4,22 @@ description: 'No-op task' parameters: servers: type: json + default: [] + controller_servers: + type: json + default: [] + compute_servers: + type: json + default: [] + blockstorage_servers: + type: json + default: [] + objectstorage_servers: + type: json + default: [] + cephstorage_servers: + type: json + default: [] input_values: type: json default: {} diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh new file mode 100755 index 00000000..0808763e --- /dev/null +++ b/extraconfig/tasks/pacemaker_common_functions.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +set -eu + +function check_resource { + + if [ "$#" -ne 3 ]; then + echo_error "ERROR: check_resource function expects 3 parameters, $# given" + exit 1 + fi + + service=$1 + state=$2 + timeout=$3 + + if [ "$state" = "stopped" ]; then + match_for_incomplete='Started' + else # started + match_for_incomplete='Stopped' + fi + + if timeout -k 10 $timeout crm_resource --wait; then + node_states=$(pcs status --full | grep "$service" | grep -v Clone) + if echo "$node_states" | grep -q "$match_for_incomplete"; then + echo_error "ERROR: cluster finished transition but $service was not in $state state, exiting." + exit 1 + else + echo "$service has $state" + fi + else + echo_error "ERROR: cluster remained unstable for more than $timeout seconds, exiting." + exit 1 + fi + +} + +function echo_error { + echo "$@" | tee /dev/fd2 +} + +function systemctl_swift { + services=( openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \ + openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \ + openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy ) + action=$1 + case $action in + stop) + services=$(systemctl | grep swift | grep running | awk '{print $1}') + ;; + start) + enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml 'enable_swift_storage') + if [[ $enable_swift_storage != "true" ]]; then + services=( openstack-swift-proxy ) + fi + ;; + *) services=() ;; # for safetly, should never happen + esac + for S in ${services[@]}; do + systemctl $action $S + done +} diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh index 12201097..b2bdc55a 100755 --- a/extraconfig/tasks/pacemaker_resource_restart.sh +++ b/extraconfig/tasks/pacemaker_resource_restart.sh @@ -3,38 +3,6 @@ set -eux pacemaker_status=$(systemctl is-active pacemaker) -check_interval=3 - -function check_resource { - - service=$1 - state=$2 - timeout=$3 - tstart=$(date +%s) - tend=$(( $tstart + $timeout )) - - if [ "$state" = "stopped" ]; then - match_for_incomplete='Started' - else # started - match_for_incomplete='Stopped' - fi - - while (( $(date +%s) < $tend )); do - node_states=$(pcs status --full | grep "$service" | grep -v Clone) - if echo "$node_states" | grep -q "$match_for_incomplete"; then - echo "$service not yet $state, sleeping $check_interval seconds." - sleep $check_interval - else - echo "$service has $state" - timeout -k 10 $timeout crm_resource --wait - return - fi - done - - echo "$service never $state after $timeout seconds" | tee /dev/fd/2 - exit 1 - -} # Run if pacemaker is running, we're the bootstrap node, # and we're updating the deployment (not creating). diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml index 7de41d94..fbed9ce5 100644 --- a/extraconfig/tasks/post_puppet_pacemaker.yaml +++ b/extraconfig/tasks/post_puppet_pacemaker.yaml @@ -33,7 +33,11 @@ resources: type: OS::Heat::SoftwareConfig properties: group: script - config: {get_file: pacemaker_resource_restart.sh} + config: + list_join: + - '' + - - get_file: pacemaker_common_functions.sh + - get_file: pacemaker_resource_restart.sh ControllerPostPuppetRestartDeployment: type: OS::Heat::SoftwareDeployments diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index 869b1a42..59e4be45 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -128,6 +128,9 @@ openstack-nova-scheduler" # mongod start timeout is higher, setting only stop timeout pcs -f $pacemaker_dumpfile resource update mongod op start timeout=370s op stop timeout=200s + echo "Making sure rabbitmq has the notify=true meta parameter" + pcs -f $pacemaker_dumpfile resource update rabbitmq meta notify=true + echo "Applying new Pacemaker config" if ! pcs cluster cib-push $pacemaker_dumpfile; then echo "ERROR failed to apply new pacemaker config" diff --git a/extraconfig/tasks/yum_update_noop.yaml b/extraconfig/tasks/yum_update_noop.yaml new file mode 100644 index 00000000..b759d9c5 --- /dev/null +++ b/extraconfig/tasks/yum_update_noop.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2014-10-16 +description: 'No-op yum update task' + +resources: + + config: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: | + #!/bin/bash + echo -n "false" > $heat_outputs_path.update_managed_packages + inputs: + - name: update_identifier + description: yum will only run for previously unused values of update_identifier + default: '' + - name: command + description: yum sub-command to run, defaults to "update" + default: update + - name: command_arguments + description: yum command arguments, defaults to "" + default: '' + outputs: + - name: update_managed_packages + description: boolean value indicating whether to upgrade managed packages + +outputs: + OS::stack_id: + value: {get_resource: config} |