From 7f6305980df5427fd23c9c5b1b0307b1249945f6 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Thu, 13 Jul 2017 13:40:48 +0100 Subject: Consolidate puppet/docker deployments with one deploy steps workflow If we consolidate these we can focus on one implementation (the new ansible based one used for docker-steps) Change-Id: Iec0ad2278d62040bf03613fc9556b199c6a80546 Depends-On: Ifa2afa915e0fee368fb2506c02de75bf5efe82d5 --- docker/deploy-steps-playbook.yaml | 86 ----------- docker/docker-steps.j2 | 296 -------------------------------------- docker/post-upgrade.j2.yaml | 4 - docker/post.j2.yaml | 1 - 4 files changed, 387 deletions(-) delete mode 100644 docker/deploy-steps-playbook.yaml delete mode 100644 docker/docker-steps.j2 delete mode 100644 docker/post-upgrade.j2.yaml delete mode 100644 docker/post.j2.yaml (limited to 'docker') diff --git a/docker/deploy-steps-playbook.yaml b/docker/deploy-steps-playbook.yaml deleted file mode 100644 index b884e0e7..00000000 --- a/docker/deploy-steps-playbook.yaml +++ /dev/null @@ -1,86 +0,0 @@ -- hosts: localhost - connection: local - tasks: - ##################################################### - # Per step puppet configuration of the baremetal host - ##################################################### - - name: Write the config_step hieradata - copy: content="{{dict(step=step|int)|to_json}}" dest=/etc/puppet/hieradata/config_step.json force=true - - name: Run puppet host configuration for step {{step}} - command: >- - puppet apply - --modulepath=/etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules - --logdest syslog --logdest console --color=false - /var/lib/tripleo-config/puppet_step_config.pp - changed_when: false - check_mode: no - register: outputs - failed_when: false - no_log: true - - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([])) - when: outputs is defined - failed_when: outputs|failed - ###################################### - # Generate config via docker-puppet.py - ###################################### - - name: Run docker-puppet tasks (generate config) - shell: python /var/lib/docker-puppet/docker-puppet.py - environment: - NET_HOST: 'true' - DEBUG: '{{docker_puppet_debug}}' - when: step == "1" - changed_when: false - check_mode: no - register: outputs - failed_when: false - no_log: true - - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([])) - when: outputs is defined - failed_when: outputs|failed - ################################################## - # Per step starting of the containers using paunch - ################################################## - - name: Check if /var/lib/hashed-tripleo-config/docker-container-startup-config-step_{{step}}.json exists - stat: - path: /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json - register: docker_config_json - # Note docker-puppet.py generates the hashed-*.json file, which is a copy of - # the *step_n.json with a hash of the generated external config added - # This acts as a salt to enable restarting the container if config changes - - name: Start containers for step {{step}} - command: >- - paunch --debug apply - --file /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json - --config-id tripleo_step{{step}} --managed-by tripleo-{{role_name}} - when: docker_config_json.stat.exists - changed_when: false - check_mode: no - register: outputs - failed_when: false - no_log: true - - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([])) - when: outputs is defined - failed_when: outputs|failed - ######################################################## - # Bootstrap tasks, only performed on bootstrap_server_id - ######################################################## - - name: Check if /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json exists - stat: - path: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json - register: docker_puppet_tasks_json - - name: Run docker-puppet tasks (bootstrap tasks) - shell: python /var/lib/docker-puppet/docker-puppet.py - environment: - CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json - NET_HOST: "true" - NO_ARCHIVE: "true" - STEP: "{{step}}" - when: deploy_server_id == bootstrap_server_id and docker_puppet_tasks_json.stat.exists - changed_when: false - check_mode: no - register: outputs - failed_when: false - no_log: true - - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([])) - when: outputs is defined - failed_when: outputs|failed diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2 deleted file mode 100644 index 05ff7945..00000000 --- a/docker/docker-steps.j2 +++ /dev/null @@ -1,296 +0,0 @@ -# certain initialization steps (run in a container) will occur -# on the role marked as primary controller or the first role listed -{%- set primary_role = [roles[0]] -%} -{%- for role in roles -%} - {%- if 'primary' in role.tags and 'controller' in role.tags -%} - {%- set _ = primary_role.pop() -%} - {%- set _ = primary_role.append(role) -%} - {%- endif -%} -{%- endfor -%} -{%- set primary_role_name = primary_role[0].name -%} -# primary role is: {{primary_role_name}} -{% set deploy_steps_max = 6 -%} - -heat_template_version: pike - -description: > - Post-deploy configuration steps via puppet for all roles, - as defined in ../roles_data.yaml - -parameters: - servers: - type: json - description: Mapping of Role name e.g Controller to a list of servers - stack_name: - type: string - description: Name of the topmost stack - role_data: - type: json - description: Mapping of Role name e.g Controller to the per-role data - DeployIdentifier: - default: '' - type: string - description: > - Setting this to a unique value will re-run any deployment tasks which - perform configuration on a Heat stack-update. - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - DockerPuppetDebug: - type: string - default: '' - description: Set to True to enable debug logging with docker-puppet.py - ctlplane_service_ips: - type: json - -conditions: -{% for step in range(1, deploy_steps_max) %} - WorkflowTasks_Step{{step}}_Enabled: - or: - {%- for role in roles %} - - not: - equals: - - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}] - - '' - - False - {%- endfor %} -{% endfor %} - -resources: - - RoleConfig: - type: OS::Heat::SoftwareConfig - properties: - group: ansible - options: - modulepath: /usr/share/ansible-modules - inputs: - - name: step - - name: role_name - - name: update_identifier - - name: bootstrap_server_id - - name: docker_puppet_debug - config: {get_file: deploy-steps-playbook.yaml} - -{%- for step in range(1, deploy_steps_max) %} -# BEGIN service_workflow_tasks handling - WorkflowTasks_Step{{step}}: - type: OS::Mistral::Workflow - condition: WorkflowTasks_Step{{step}}_Enabled - depends_on: - {%- if step == 1 %} - {%- for dep in roles %} - - {{dep.name}}PreConfig - - {{dep.name}}ArtifactsDeploy - {%- endfor %} - {%- else %} - {%- for dep in roles %} - - {{dep.name}}Deployment_Step{{step -1}} - {%- endfor %} - {%- endif %} - properties: - name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]} - type: direct - tasks: - yaql: - expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten() - data: - {%- for role in roles %} - - get_param: [role_data, {{role.name}}, service_workflow_tasks] - {%- endfor %} - - WorkflowTasks_Step{{step}}_Execution: - type: OS::Mistral::ExternalResource - condition: WorkflowTasks_Step{{step}}_Enabled - depends_on: WorkflowTasks_Step{{step}} - properties: - actions: - CREATE: - workflow: { get_resource: WorkflowTasks_Step{{step}} } - params: - env: - service_ips: { get_param: ctlplane_service_ips } - role_merged_configs: - {%- for r in roles %} - {{r.name}}: {get_param: [role_data, {{r.name}}, merged_config_settings]} - {%- endfor %} - evaluate_env: false - UPDATE: - workflow: { get_resource: WorkflowTasks_Step{{step}} } - params: - env: - service_ips: { get_param: ctlplane_service_ips } - role_merged_configs: - {%- for r in roles %} - {{r.name}}: {get_param: [role_data, {{r.name}}, merged_config_settings]} - {%- endfor %} - evaluate_env: false - always_update: true -# END service_workflow_tasks handling -{% endfor %} - -{% for role in roles %} - # Post deployment steps for all roles - # A single config is re-applied with an incrementing step number - # {{role.name}} Role steps - {{role.name}}ArtifactsConfig: - type: ../puppet/deploy-artifacts.yaml - - {{role.name}}ArtifactsDeploy: - type: OS::Heat::StructuredDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ArtifactsConfig} - - {{role.name}}HostPrepConfig: - type: OS::Heat::SoftwareConfig - properties: - group: ansible - options: - modulepath: /usr/share/ansible-modules - config: - str_replace: - template: _PLAYBOOK - params: - _PLAYBOOK: - - hosts: localhost - connection: local - vars: - puppet_config: {get_param: [role_data, {{role.name}}, puppet_config]} - docker_puppet_script: {get_file: docker-puppet.py} - docker_puppet_tasks: {get_param: [role_data, {{role.name}}, docker_puppet_tasks]} - docker_startup_configs: {get_param: [role_data, {{role.name}}, docker_config]} - kolla_config: {get_param: [role_data, {{role.name}}, kolla_config]} - bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']} - puppet_step_config: {get_param: [role_data, {{role.name}}, step_config]} - tasks: - # Join host_prep_tasks with the other per-host configuration - yaql: - expression: $.data.host_prep_tasks + $.data.template_tasks - data: - host_prep_tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]} - template_tasks: -{%- raw %} - # Write the manifest for baremetal puppet configuration - - name: Create /var/lib/tripleo-config directory - file: path=/var/lib/tripleo-config state=directory - - name: Write the puppet step_config manifest - copy: content="{{puppet_step_config}}" dest=/var/lib/tripleo-config/puppet_step_config.pp force=yes - # this creates a JSON config file for our docker-puppet.py script - - name: Create /var/lib/docker-puppet - file: path=/var/lib/docker-puppet state=directory - - name: Write docker-puppet-tasks json files - copy: content="{{puppet_config | to_json}}" dest=/var/lib/docker-puppet/docker-puppet.json force=yes - # FIXME: can we move docker-puppet somewhere so it's installed via a package? - - name: Write docker-puppet.py - copy: content="{{docker_puppet_script}}" dest=/var/lib/docker-puppet/docker-puppet.py force=yes - # Here we are dumping all the docker container startup configuration data - # so that we can have access to how they are started outside of heat - # and docker-cmd. This lets us create command line tools to test containers. - # FIXME do we need the docker-container-startup-configs.json or is the new per-step - # data consumed by paunch enough? - - name: Write docker-container-startup-configs - copy: content="{{docker_startup_configs | to_json}}" dest=/var/lib/docker-container-startup-configs.json force=yes - - name: Write per-step docker-container-startup-configs - copy: content="{{item.value|to_json}}" dest="/var/lib/tripleo-config/docker-container-startup-config-{{item.key}}.json" force=yes - with_dict: "{{docker_startup_configs}}" - - name: Create /var/lib/kolla/config_files directory - file: path=/var/lib/kolla/config_files state=directory - - name: Write kolla config json files - copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes - with_dict: "{{kolla_config}}" - ######################################################## - # Bootstrap tasks, only performed on bootstrap_server_id - ######################################################## - - name: Clean /var/lib/docker-puppet/docker-puppet-tasks*.json files - file: - path: "{{item}}" - state: absent - with_fileglob: - - /var/lib/docker-puppet/docker-puppet-tasks*.json - when: deploy_server_id == bootstrap_server_id - - name: Write docker-puppet-tasks json files - copy: content="{{item.value|to_json}}" dest=/var/lib/docker-puppet/docker-puppet-tasks{{item.key.replace("step_", "")}}.json force=yes - with_dict: "{{docker_puppet_tasks}}" - when: deploy_server_id == bootstrap_server_id -{%- endraw %} - - {{role.name}}HostPrepDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}HostPrepConfig} - - # BEGIN CONFIG STEPS - - {{role.name}}PreConfig: - type: OS::TripleO::Tasks::{{role.name}}PreConfig - depends_on: {{role.name}}HostPrepDeployment - properties: - servers: {get_param: [servers, {{role.name}}]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - {% for step in range(1, deploy_steps_max) %} - {{role.name}}Deployment_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - WorkflowTasks_Step{{step}}_Execution - # TODO(gfidente): the following if/else condition - # replicates what is already defined for the - # WorkflowTasks_StepX resource and can be remove - # if https://bugs.launchpad.net/heat/+bug/1700569 - # is fixed. - {%- if step == 1 %} - {%- for dep in roles %} - - {{dep.name}}PreConfig - - {{dep.name}}ArtifactsDeploy - {%- endfor %} - {%- else %} - {%- for dep in roles %} - - {{dep.name}}Deployment_Step{{step -1}} - {%- endfor %} - {%- endif %} - properties: - name: {{role.name}}Deployment_Step{{step}} - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: RoleConfig} - input_values: - step: {{step}} - role_name: {{role.name}} - update_identifier: {get_param: DeployIdentifier} - bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']} - docker_puppet_debug: {get_param: DockerPuppetDebug} - {% endfor %} - # END CONFIG STEPS - - # Note, this should be the last step to execute configuration changes. - # Ensure that all {{role.name}}ExtraConfigPost steps are executed - # after all the previous deployment steps. - {{role.name}}ExtraConfigPost: - depends_on: - {%- for dep in roles %} - - {{dep.name}}Deployment_Step5 - {%- endfor %} - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, {{role.name}}]} - - # The {{role.name}}PostConfig steps are in charge of - # quiescing all services, i.e. in the Controller case, - # we should run a full service reload. - {{role.name}}PostConfig: - type: OS::TripleO::Tasks::{{role.name}}PostConfig - depends_on: - {%- for dep in roles %} - - {{dep.name}}ExtraConfigPost - {%- endfor %} - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - -{% endfor %} diff --git a/docker/post-upgrade.j2.yaml b/docker/post-upgrade.j2.yaml deleted file mode 100644 index 4477f868..00000000 --- a/docker/post-upgrade.j2.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Note the include here is the same as post.j2.yaml but the data used at -# # the time of rendering is different if any roles disable upgrades -{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%} -{% include 'docker-steps.j2' %} diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml deleted file mode 100644 index fd956215..00000000 --- a/docker/post.j2.yaml +++ /dev/null @@ -1 +0,0 @@ -{% include 'docker-steps.j2' %} -- cgit 1.2.3-korg