summaryrefslogtreecommitdiffstats
path: root/common
diff options
context:
space:
mode:
authorSteven Hardy <shardy@redhat.com>2017-07-13 13:40:48 +0100
committerAthlan-Guyot sofer <sathlang@redhat.com>2017-08-11 17:25:02 +0000
commit7f6305980df5427fd23c9c5b1b0307b1249945f6 (patch)
treeaf692718ef42b4fa6437b22f7d65fedb32d718a8 /common
parent4e5ba442189d704deaaa1a922bbcaf5c28e9de40 (diff)
Consolidate puppet/docker deployments with one deploy steps workflow
If we consolidate these we can focus on one implementation (the new ansible based one used for docker-steps) Change-Id: Iec0ad2278d62040bf03613fc9556b199c6a80546 Depends-On: Ifa2afa915e0fee368fb2506c02de75bf5efe82d5
Diffstat (limited to 'common')
-rw-r--r--common/deploy-steps-playbook.yaml86
-rw-r--r--common/deploy-steps.j2296
-rw-r--r--common/major_upgrade_steps.j2.yaml225
-rw-r--r--common/post-upgrade.j2.yaml4
-rw-r--r--common/post.j2.yaml1
5 files changed, 612 insertions, 0 deletions
diff --git a/common/deploy-steps-playbook.yaml b/common/deploy-steps-playbook.yaml
new file mode 100644
index 00000000..b884e0e7
--- /dev/null
+++ b/common/deploy-steps-playbook.yaml
@@ -0,0 +1,86 @@
+- hosts: localhost
+ connection: local
+ tasks:
+ #####################################################
+ # Per step puppet configuration of the baremetal host
+ #####################################################
+ - name: Write the config_step hieradata
+ copy: content="{{dict(step=step|int)|to_json}}" dest=/etc/puppet/hieradata/config_step.json force=true
+ - name: Run puppet host configuration for step {{step}}
+ command: >-
+ puppet apply
+ --modulepath=/etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
+ --logdest syslog --logdest console --color=false
+ /var/lib/tripleo-config/puppet_step_config.pp
+ changed_when: false
+ check_mode: no
+ register: outputs
+ failed_when: false
+ no_log: true
+ - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([]))
+ when: outputs is defined
+ failed_when: outputs|failed
+ ######################################
+ # Generate config via docker-puppet.py
+ ######################################
+ - name: Run docker-puppet tasks (generate config)
+ shell: python /var/lib/docker-puppet/docker-puppet.py
+ environment:
+ NET_HOST: 'true'
+ DEBUG: '{{docker_puppet_debug}}'
+ when: step == "1"
+ changed_when: false
+ check_mode: no
+ register: outputs
+ failed_when: false
+ no_log: true
+ - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([]))
+ when: outputs is defined
+ failed_when: outputs|failed
+ ##################################################
+ # Per step starting of the containers using paunch
+ ##################################################
+ - name: Check if /var/lib/hashed-tripleo-config/docker-container-startup-config-step_{{step}}.json exists
+ stat:
+ path: /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json
+ register: docker_config_json
+ # Note docker-puppet.py generates the hashed-*.json file, which is a copy of
+ # the *step_n.json with a hash of the generated external config added
+ # This acts as a salt to enable restarting the container if config changes
+ - name: Start containers for step {{step}}
+ command: >-
+ paunch --debug apply
+ --file /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json
+ --config-id tripleo_step{{step}} --managed-by tripleo-{{role_name}}
+ when: docker_config_json.stat.exists
+ changed_when: false
+ check_mode: no
+ register: outputs
+ failed_when: false
+ no_log: true
+ - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([]))
+ when: outputs is defined
+ failed_when: outputs|failed
+ ########################################################
+ # Bootstrap tasks, only performed on bootstrap_server_id
+ ########################################################
+ - name: Check if /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json exists
+ stat:
+ path: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
+ register: docker_puppet_tasks_json
+ - name: Run docker-puppet tasks (bootstrap tasks)
+ shell: python /var/lib/docker-puppet/docker-puppet.py
+ environment:
+ CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
+ NET_HOST: "true"
+ NO_ARCHIVE: "true"
+ STEP: "{{step}}"
+ when: deploy_server_id == bootstrap_server_id and docker_puppet_tasks_json.stat.exists
+ changed_when: false
+ check_mode: no
+ register: outputs
+ failed_when: false
+ no_log: true
+ - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([]))
+ when: outputs is defined
+ failed_when: outputs|failed
diff --git a/common/deploy-steps.j2 b/common/deploy-steps.j2
new file mode 100644
index 00000000..e5d7e98c
--- /dev/null
+++ b/common/deploy-steps.j2
@@ -0,0 +1,296 @@
+# certain initialization steps (run in a container) will occur
+# on the role marked as primary controller or the first role listed
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+ {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+ {%- set _ = primary_role.pop() -%}
+ {%- set _ = primary_role.append(role) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
+{% set deploy_steps_max = 6 -%}
+
+heat_template_version: pike
+
+description: >
+ Post-deploy configuration steps via puppet for all roles,
+ as defined in ../roles_data.yaml
+
+parameters:
+ servers:
+ type: json
+ description: Mapping of Role name e.g Controller to a list of servers
+ stack_name:
+ type: string
+ description: Name of the topmost stack
+ role_data:
+ type: json
+ description: Mapping of Role name e.g Controller to the per-role data
+ DeployIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ DockerPuppetDebug:
+ type: string
+ default: ''
+ description: Set to True to enable debug logging with docker-puppet.py
+ ctlplane_service_ips:
+ type: json
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {%- for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {%- endfor %}
+{% endfor %}
+
+resources:
+
+ RoleConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ansible
+ options:
+ modulepath: /usr/share/ansible-modules
+ inputs:
+ - name: step
+ - name: role_name
+ - name: update_identifier
+ - name: bootstrap_server_id
+ - name: docker_puppet_debug
+ config: {get_file: deploy-steps-playbook.yaml}
+
+{%- for step in range(1, deploy_steps_max) %}
+# BEGIN service_workflow_tasks handling
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on:
+ {%- if step == 1 %}
+ {%- for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {%- endfor %}
+ {%- else %}
+ {%- for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {%- endfor %}
+ {%- endif %}
+ properties:
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {%- for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {%- endfor %}
+
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ role_merged_configs:
+ {%- for r in roles %}
+ {{r.name}}: {get_param: [role_data, {{r.name}}, merged_config_settings]}
+ {%- endfor %}
+ evaluate_env: false
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ role_merged_configs:
+ {%- for r in roles %}
+ {{r.name}}: {get_param: [role_data, {{r.name}}, merged_config_settings]}
+ {%- endfor %}
+ evaluate_env: false
+ always_update: true
+# END service_workflow_tasks handling
+{% endfor %}
+
+{% for role in roles %}
+ # Post deployment steps for all roles
+ # A single config is re-applied with an incrementing step number
+ # {{role.name}} Role steps
+ {{role.name}}ArtifactsConfig:
+ type: ../puppet/deploy-artifacts.yaml
+
+ {{role.name}}ArtifactsDeploy:
+ type: OS::Heat::StructuredDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ArtifactsConfig}
+
+ {{role.name}}HostPrepConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ansible
+ options:
+ modulepath: /usr/share/ansible-modules
+ config:
+ str_replace:
+ template: _PLAYBOOK
+ params:
+ _PLAYBOOK:
+ - hosts: localhost
+ connection: local
+ vars:
+ puppet_config: {get_param: [role_data, {{role.name}}, puppet_config]}
+ docker_puppet_script: {get_file: ../docker/docker-puppet.py}
+ docker_puppet_tasks: {get_param: [role_data, {{role.name}}, docker_puppet_tasks]}
+ docker_startup_configs: {get_param: [role_data, {{role.name}}, docker_config]}
+ kolla_config: {get_param: [role_data, {{role.name}}, kolla_config]}
+ bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
+ puppet_step_config: {get_param: [role_data, {{role.name}}, step_config]}
+ tasks:
+ # Join host_prep_tasks with the other per-host configuration
+ yaql:
+ expression: $.data.host_prep_tasks + $.data.template_tasks
+ data:
+ host_prep_tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
+ template_tasks:
+{%- raw %}
+ # Write the manifest for baremetal puppet configuration
+ - name: Create /var/lib/tripleo-config directory
+ file: path=/var/lib/tripleo-config state=directory
+ - name: Write the puppet step_config manifest
+ copy: content="{{puppet_step_config}}" dest=/var/lib/tripleo-config/puppet_step_config.pp force=yes
+ # this creates a JSON config file for our docker-puppet.py script
+ - name: Create /var/lib/docker-puppet
+ file: path=/var/lib/docker-puppet state=directory
+ - name: Write docker-puppet-tasks json files
+ copy: content="{{puppet_config | to_json}}" dest=/var/lib/docker-puppet/docker-puppet.json force=yes
+ # FIXME: can we move docker-puppet somewhere so it's installed via a package?
+ - name: Write docker-puppet.py
+ copy: content="{{docker_puppet_script}}" dest=/var/lib/docker-puppet/docker-puppet.py force=yes
+ # Here we are dumping all the docker container startup configuration data
+ # so that we can have access to how they are started outside of heat
+ # and docker-cmd. This lets us create command line tools to test containers.
+ # FIXME do we need the docker-container-startup-configs.json or is the new per-step
+ # data consumed by paunch enough?
+ - name: Write docker-container-startup-configs
+ copy: content="{{docker_startup_configs | to_json}}" dest=/var/lib/docker-container-startup-configs.json force=yes
+ - name: Write per-step docker-container-startup-configs
+ copy: content="{{item.value|to_json}}" dest="/var/lib/tripleo-config/docker-container-startup-config-{{item.key}}.json" force=yes
+ with_dict: "{{docker_startup_configs}}"
+ - name: Create /var/lib/kolla/config_files directory
+ file: path=/var/lib/kolla/config_files state=directory
+ - name: Write kolla config json files
+ copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes
+ with_dict: "{{kolla_config}}"
+ ########################################################
+ # Bootstrap tasks, only performed on bootstrap_server_id
+ ########################################################
+ - name: Clean /var/lib/docker-puppet/docker-puppet-tasks*.json files
+ file:
+ path: "{{item}}"
+ state: absent
+ with_fileglob:
+ - /var/lib/docker-puppet/docker-puppet-tasks*.json
+ when: deploy_server_id == bootstrap_server_id
+ - name: Write docker-puppet-tasks json files
+ copy: content="{{item.value|to_json}}" dest=/var/lib/docker-puppet/docker-puppet-tasks{{item.key.replace("step_", "")}}.json force=yes
+ with_dict: "{{docker_puppet_tasks}}"
+ when: deploy_server_id == bootstrap_server_id
+{%- endraw %}
+
+ {{role.name}}HostPrepDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}HostPrepConfig}
+
+ # BEGIN CONFIG STEPS
+
+ {{role.name}}PreConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PreConfig
+ depends_on: {{role.name}}HostPrepDeployment
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {% for step in range(1, deploy_steps_max) %}
+ {{role.name}}Deployment_Step{{step}}:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {%- if step == 1 %}
+ {%- for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {%- endfor %}
+ {%- else %}
+ {%- for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {%- endfor %}
+ {%- endif %}
+ properties:
+ name: {{role.name}}Deployment_Step{{step}}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: RoleConfig}
+ input_values:
+ step: {{step}}
+ role_name: {{role.name}}
+ update_identifier: {get_param: DeployIdentifier}
+ bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
+ docker_puppet_debug: {get_param: DockerPuppetDebug}
+ {% endfor %}
+ # END CONFIG STEPS
+
+ # Note, this should be the last step to execute configuration changes.
+ # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+ # after all the previous deployment steps.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {%- for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ {%- endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+
+ # The {{role.name}}PostConfig steps are in charge of
+ # quiescing all services, i.e. in the Controller case,
+ # we should run a full service reload.
+ {{role.name}}PostConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PostConfig
+ depends_on:
+ {%- for dep in roles %}
+ - {{dep.name}}ExtraConfigPost
+ {%- endfor %}
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+
+{% endfor %}
diff --git a/common/major_upgrade_steps.j2.yaml b/common/major_upgrade_steps.j2.yaml
new file mode 100644
index 00000000..11113eec
--- /dev/null
+++ b/common/major_upgrade_steps.j2.yaml
@@ -0,0 +1,225 @@
+{% set enabled_roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% set batch_upgrade_steps_max = 3 -%}
+{% set upgrade_steps_max = 6 -%}
+{% set deliver_script = {'deliver': False} -%}
+heat_template_version: pike
+description: 'Upgrade steps for all roles'
+
+parameters:
+ servers:
+ type: json
+ stack_name:
+ type: string
+ description: Name of the topmost stack
+ role_data:
+ type: json
+ description: Mapping of Role name e.g Controller to the per-role data
+ ctlplane_service_ips:
+ type: json
+ UpdateIdentifier:
+ type: string
+ default: ''
+ description: >
+ Setting to a previously unused value during stack-update will trigger
+ the Upgrade resources to re-run on all roles.
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ NovaPassword:
+ description: The password for the nova service and db account
+ type: string
+ hidden: true
+
+resources:
+
+{% for role in roles if role.disable_upgrade_deployment|default(false) %}
+ {{role.name}}DeliverUpgradeScriptConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "set -eu\n\n"
+ - str_replace:
+ template: |
+ ROLE='ROLE_NAME'
+ params:
+ ROLE_NAME: {{role.name}}
+ - get_file: ../extraconfig/tasks/pacemaker_common_functions.sh
+ - get_file: ../extraconfig/tasks/run_puppet.sh
+ - get_file: ../extraconfig/tasks/tripleo_upgrade_node.sh
+
+ {{role.name}}DeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig}
+{% endfor %}
+
+# Upgrade Steps for all roles, batched updates
+# The UpgradeConfig resources could actually be created without
+# serialization, but the event output is easier to follow if we
+# do, and there should be minimal performance hit (creating the
+# config is cheap compared to the time to apply the deployment).
+{% for step in range(0, batch_upgrade_steps_max) %}
+ # Batch config resources step {{step}}
+ {%- for role in roles %}
+ {{role.name}}UpgradeBatchConfig_Step{{step}}:
+ type: OS::TripleO::UpgradeConfig
+ {%- if step > 0 %}
+ depends_on:
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}UpgradeBatch_Step{{step -1}}
+ {%- endfor %}
+ {% else %}
+ {% for role in roles if role.disable_upgrade_deployment|default(false) %}
+ {% if deliver_script.update({'deliver': True}) %} {% endif %}
+ {% endfor %}
+ {% if deliver_script.deliver %}
+ depends_on:
+ {% for dep in roles if dep.disable_upgrade_deployment|default(false) %}
+ - {{dep.name}}DeliverUpgradeScriptDeployment
+ {% endfor %}
+ {% endif %}
+ {% endif %}
+ properties:
+ UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
+ step: {{step}}
+ {%- endfor %}
+
+ # Batch deployment resources for step {{step}} (only for enabled roles)
+ {%- for role in enabled_roles %}
+ {{role.name}}UpgradeBatch_Step{{step}}:
+ type: OS::Heat::SoftwareDeploymentGroup
+ {%- if step > 0 %}
+ depends_on:
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}UpgradeBatch_Step{{step -1}}
+ {%- endfor %}
+ {% else %}
+ {% for role in roles if role.disable_upgrade_deployment|default(false) %}
+ {% if deliver_script.update({'deliver': True}) %} {% endif %}
+ {% endfor %}
+ {% if deliver_script.deliver %}
+ depends_on:
+ {% for dep in roles if dep.disable_upgrade_deployment|default(false) %}
+ - {{dep.name}}DeliverUpgradeScriptDeployment
+ {% endfor %}
+ {% endif %}
+ {% endif %}
+ update_policy:
+ batch_create:
+ max_batch_size: {{role.upgrade_batch_size|default(1)}}
+ rolling_update:
+ max_batch_size: {{role.upgrade_batch_size|default(1)}}
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}UpgradeBatchConfig_Step{{step}}}
+ input_values:
+ role: {{role.name}}
+ update_identifier: {get_param: UpdateIdentifier}
+ {%- endfor %}
+{%- endfor %}
+
+# Dump the puppet manifests to be apply later when disable_upgrade_deployment
+# is to true
+{% for role in roles if role.disable_upgrade_deployment|default(false) %}
+ {{role.name}}DeliverPuppetConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ cat > /root/{{role.name}}_puppet_config.pp << ENDOFCAT
+ PUPPET_CLASSES
+ ENDOFCAT
+ params:
+ PUPPET_CLASSES: {get_param: [role_data, {{role.name}}, step_config]}
+
+ {{role.name}}DeliverPuppetDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}DeliverPuppetConfig}
+{% endfor %}
+
+# Upgrade Steps for all roles
+{%- for step in range(0, upgrade_steps_max) %}
+ # Config resources for step {{step}}
+ {%- for role in roles %}
+ {{role.name}}UpgradeConfig_Step{{step}}:
+ type: OS::TripleO::UpgradeConfig
+ # The UpgradeConfig resources could actually be created without
+ # serialization, but the event output is easier to follow if we
+ # do, and there should be minimal performance hit (creating the
+ # config is cheap compared to the time to apply the deployment).
+ depends_on:
+ {%- for role_inside in enabled_roles %}
+ {%- if step > 0 %}
+ - {{role_inside.name}}Upgrade_Step{{step -1}}
+ {%- else %}
+ - {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+ {%- endif %}
+ {%- endfor %}
+ properties:
+ UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]}
+ step: {{step}}
+ {%- endfor %}
+
+ # Deployment resources for step {{step}} (only for enabled roles)
+ {%- for role in enabled_roles %}
+ {{role.name}}Upgrade_Step{{step}}:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on:
+ {%- for role_inside in enabled_roles %}
+ {%- if step > 0 %}
+ - {{role_inside.name}}Upgrade_Step{{step -1}}
+ {%- else %}
+ - {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+ {%- endif %}
+ {%- endfor %}
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}UpgradeConfig_Step{{step}}}
+ input_values:
+ role: {{role.name}}
+ update_identifier: {get_param: UpdateIdentifier}
+ {%- endfor %}
+{%- endfor %}
+
+ # Post upgrade deployment steps for all roles
+ # This runs the normal configuration (e.g puppet) steps unless upgrade
+ # is disabled for the role
+ AllNodesPostUpgradeSteps:
+ type: OS::TripleO::PostUpgradeSteps
+ depends_on:
+{%- for dep in enabled_roles %}
+ - {{dep.name}}Upgrade_Step{{upgrade_steps_max - 1}}
+{%- endfor %}
+ properties:
+ servers: {get_param: servers}
+ stack_name: {get_param: stack_name}
+ role_data: {get_param: role_data}
+ ctlplane_service_ips: {get_param: ctlplane_service_ips}
+
+outputs:
+ # Output the config for each role, just use Step1 as the config should be
+ # the same for all steps (only the tag provided differs)
+ upgrade_configs:
+ description: The per-role upgrade configuration used
+ value:
+{% for role in roles %}
+ {{role.name.lower()}}: {get_attr: [{{role.name}}UpgradeConfig_Step1, upgrade_config]}
+{% endfor %}
diff --git a/common/post-upgrade.j2.yaml b/common/post-upgrade.j2.yaml
new file mode 100644
index 00000000..7cd6abdf
--- /dev/null
+++ b/common/post-upgrade.j2.yaml
@@ -0,0 +1,4 @@
+# Note the include here is the same as post.j2.yaml but the data used at
+# # the time of rendering is different if any roles disable upgrades
+{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% include 'deploy-steps.j2' %}
diff --git a/common/post.j2.yaml b/common/post.j2.yaml
new file mode 100644
index 00000000..8a70dfa9
--- /dev/null
+++ b/common/post.j2.yaml
@@ -0,0 +1 @@
+{% include 'deploy-steps.j2' %}