diff options
Diffstat (limited to 'docker')
-rw-r--r-- | docker/deploy-steps-playbook.yaml | 86 | ||||
-rw-r--r-- | docker/docker-steps.j2 | 296 | ||||
-rw-r--r-- | docker/post-upgrade.j2.yaml | 4 | ||||
-rw-r--r-- | docker/post.j2.yaml | 1 | ||||
-rw-r--r-- | docker/services/ceilometer-agent-central.yaml | 2 | ||||
-rw-r--r-- | docker/services/ceph-ansible/ceph-base.yaml | 1 | ||||
-rw-r--r-- | docker/services/database/mongodb.yaml | 2 | ||||
-rw-r--r-- | docker/services/gnocchi-api.yaml | 16 | ||||
-rw-r--r-- | docker/services/nova-api.yaml | 26 | ||||
-rw-r--r-- | docker/services/nova-libvirt.yaml | 56 | ||||
-rw-r--r-- | docker/services/nova-placement.yaml | 17 | ||||
-rw-r--r-- | docker/services/pacemaker/database/mysql.yaml | 23 |
12 files changed, 128 insertions, 402 deletions
diff --git a/docker/deploy-steps-playbook.yaml b/docker/deploy-steps-playbook.yaml deleted file mode 100644 index b884e0e7..00000000 --- a/docker/deploy-steps-playbook.yaml +++ /dev/null @@ -1,86 +0,0 @@ -- hosts: localhost - connection: local - tasks: - ##################################################### - # Per step puppet configuration of the baremetal host - ##################################################### - - name: Write the config_step hieradata - copy: content="{{dict(step=step|int)|to_json}}" dest=/etc/puppet/hieradata/config_step.json force=true - - name: Run puppet host configuration for step {{step}} - command: >- - puppet apply - --modulepath=/etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules - --logdest syslog --logdest console --color=false - /var/lib/tripleo-config/puppet_step_config.pp - changed_when: false - check_mode: no - register: outputs - failed_when: false - no_log: true - - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([])) - when: outputs is defined - failed_when: outputs|failed - ###################################### - # Generate config via docker-puppet.py - ###################################### - - name: Run docker-puppet tasks (generate config) - shell: python /var/lib/docker-puppet/docker-puppet.py - environment: - NET_HOST: 'true' - DEBUG: '{{docker_puppet_debug}}' - when: step == "1" - changed_when: false - check_mode: no - register: outputs - failed_when: false - no_log: true - - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([])) - when: outputs is defined - failed_when: outputs|failed - ################################################## - # Per step starting of the containers using paunch - ################################################## - - name: Check if /var/lib/hashed-tripleo-config/docker-container-startup-config-step_{{step}}.json exists - stat: - path: /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json - register: docker_config_json - # Note docker-puppet.py generates the hashed-*.json file, which is a copy of - # the *step_n.json with a hash of the generated external config added - # This acts as a salt to enable restarting the container if config changes - - name: Start containers for step {{step}} - command: >- - paunch --debug apply - --file /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json - --config-id tripleo_step{{step}} --managed-by tripleo-{{role_name}} - when: docker_config_json.stat.exists - changed_when: false - check_mode: no - register: outputs - failed_when: false - no_log: true - - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([])) - when: outputs is defined - failed_when: outputs|failed - ######################################################## - # Bootstrap tasks, only performed on bootstrap_server_id - ######################################################## - - name: Check if /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json exists - stat: - path: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json - register: docker_puppet_tasks_json - - name: Run docker-puppet tasks (bootstrap tasks) - shell: python /var/lib/docker-puppet/docker-puppet.py - environment: - CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json - NET_HOST: "true" - NO_ARCHIVE: "true" - STEP: "{{step}}" - when: deploy_server_id == bootstrap_server_id and docker_puppet_tasks_json.stat.exists - changed_when: false - check_mode: no - register: outputs - failed_when: false - no_log: true - - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([])) - when: outputs is defined - failed_when: outputs|failed diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2 deleted file mode 100644 index 05ff7945..00000000 --- a/docker/docker-steps.j2 +++ /dev/null @@ -1,296 +0,0 @@ -# certain initialization steps (run in a container) will occur -# on the role marked as primary controller or the first role listed -{%- set primary_role = [roles[0]] -%} -{%- for role in roles -%} - {%- if 'primary' in role.tags and 'controller' in role.tags -%} - {%- set _ = primary_role.pop() -%} - {%- set _ = primary_role.append(role) -%} - {%- endif -%} -{%- endfor -%} -{%- set primary_role_name = primary_role[0].name -%} -# primary role is: {{primary_role_name}} -{% set deploy_steps_max = 6 -%} - -heat_template_version: pike - -description: > - Post-deploy configuration steps via puppet for all roles, - as defined in ../roles_data.yaml - -parameters: - servers: - type: json - description: Mapping of Role name e.g Controller to a list of servers - stack_name: - type: string - description: Name of the topmost stack - role_data: - type: json - description: Mapping of Role name e.g Controller to the per-role data - DeployIdentifier: - default: '' - type: string - description: > - Setting this to a unique value will re-run any deployment tasks which - perform configuration on a Heat stack-update. - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - DockerPuppetDebug: - type: string - default: '' - description: Set to True to enable debug logging with docker-puppet.py - ctlplane_service_ips: - type: json - -conditions: -{% for step in range(1, deploy_steps_max) %} - WorkflowTasks_Step{{step}}_Enabled: - or: - {%- for role in roles %} - - not: - equals: - - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}] - - '' - - False - {%- endfor %} -{% endfor %} - -resources: - - RoleConfig: - type: OS::Heat::SoftwareConfig - properties: - group: ansible - options: - modulepath: /usr/share/ansible-modules - inputs: - - name: step - - name: role_name - - name: update_identifier - - name: bootstrap_server_id - - name: docker_puppet_debug - config: {get_file: deploy-steps-playbook.yaml} - -{%- for step in range(1, deploy_steps_max) %} -# BEGIN service_workflow_tasks handling - WorkflowTasks_Step{{step}}: - type: OS::Mistral::Workflow - condition: WorkflowTasks_Step{{step}}_Enabled - depends_on: - {%- if step == 1 %} - {%- for dep in roles %} - - {{dep.name}}PreConfig - - {{dep.name}}ArtifactsDeploy - {%- endfor %} - {%- else %} - {%- for dep in roles %} - - {{dep.name}}Deployment_Step{{step -1}} - {%- endfor %} - {%- endif %} - properties: - name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]} - type: direct - tasks: - yaql: - expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten() - data: - {%- for role in roles %} - - get_param: [role_data, {{role.name}}, service_workflow_tasks] - {%- endfor %} - - WorkflowTasks_Step{{step}}_Execution: - type: OS::Mistral::ExternalResource - condition: WorkflowTasks_Step{{step}}_Enabled - depends_on: WorkflowTasks_Step{{step}} - properties: - actions: - CREATE: - workflow: { get_resource: WorkflowTasks_Step{{step}} } - params: - env: - service_ips: { get_param: ctlplane_service_ips } - role_merged_configs: - {%- for r in roles %} - {{r.name}}: {get_param: [role_data, {{r.name}}, merged_config_settings]} - {%- endfor %} - evaluate_env: false - UPDATE: - workflow: { get_resource: WorkflowTasks_Step{{step}} } - params: - env: - service_ips: { get_param: ctlplane_service_ips } - role_merged_configs: - {%- for r in roles %} - {{r.name}}: {get_param: [role_data, {{r.name}}, merged_config_settings]} - {%- endfor %} - evaluate_env: false - always_update: true -# END service_workflow_tasks handling -{% endfor %} - -{% for role in roles %} - # Post deployment steps for all roles - # A single config is re-applied with an incrementing step number - # {{role.name}} Role steps - {{role.name}}ArtifactsConfig: - type: ../puppet/deploy-artifacts.yaml - - {{role.name}}ArtifactsDeploy: - type: OS::Heat::StructuredDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ArtifactsConfig} - - {{role.name}}HostPrepConfig: - type: OS::Heat::SoftwareConfig - properties: - group: ansible - options: - modulepath: /usr/share/ansible-modules - config: - str_replace: - template: _PLAYBOOK - params: - _PLAYBOOK: - - hosts: localhost - connection: local - vars: - puppet_config: {get_param: [role_data, {{role.name}}, puppet_config]} - docker_puppet_script: {get_file: docker-puppet.py} - docker_puppet_tasks: {get_param: [role_data, {{role.name}}, docker_puppet_tasks]} - docker_startup_configs: {get_param: [role_data, {{role.name}}, docker_config]} - kolla_config: {get_param: [role_data, {{role.name}}, kolla_config]} - bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']} - puppet_step_config: {get_param: [role_data, {{role.name}}, step_config]} - tasks: - # Join host_prep_tasks with the other per-host configuration - yaql: - expression: $.data.host_prep_tasks + $.data.template_tasks - data: - host_prep_tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]} - template_tasks: -{%- raw %} - # Write the manifest for baremetal puppet configuration - - name: Create /var/lib/tripleo-config directory - file: path=/var/lib/tripleo-config state=directory - - name: Write the puppet step_config manifest - copy: content="{{puppet_step_config}}" dest=/var/lib/tripleo-config/puppet_step_config.pp force=yes - # this creates a JSON config file for our docker-puppet.py script - - name: Create /var/lib/docker-puppet - file: path=/var/lib/docker-puppet state=directory - - name: Write docker-puppet-tasks json files - copy: content="{{puppet_config | to_json}}" dest=/var/lib/docker-puppet/docker-puppet.json force=yes - # FIXME: can we move docker-puppet somewhere so it's installed via a package? - - name: Write docker-puppet.py - copy: content="{{docker_puppet_script}}" dest=/var/lib/docker-puppet/docker-puppet.py force=yes - # Here we are dumping all the docker container startup configuration data - # so that we can have access to how they are started outside of heat - # and docker-cmd. This lets us create command line tools to test containers. - # FIXME do we need the docker-container-startup-configs.json or is the new per-step - # data consumed by paunch enough? - - name: Write docker-container-startup-configs - copy: content="{{docker_startup_configs | to_json}}" dest=/var/lib/docker-container-startup-configs.json force=yes - - name: Write per-step docker-container-startup-configs - copy: content="{{item.value|to_json}}" dest="/var/lib/tripleo-config/docker-container-startup-config-{{item.key}}.json" force=yes - with_dict: "{{docker_startup_configs}}" - - name: Create /var/lib/kolla/config_files directory - file: path=/var/lib/kolla/config_files state=directory - - name: Write kolla config json files - copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes - with_dict: "{{kolla_config}}" - ######################################################## - # Bootstrap tasks, only performed on bootstrap_server_id - ######################################################## - - name: Clean /var/lib/docker-puppet/docker-puppet-tasks*.json files - file: - path: "{{item}}" - state: absent - with_fileglob: - - /var/lib/docker-puppet/docker-puppet-tasks*.json - when: deploy_server_id == bootstrap_server_id - - name: Write docker-puppet-tasks json files - copy: content="{{item.value|to_json}}" dest=/var/lib/docker-puppet/docker-puppet-tasks{{item.key.replace("step_", "")}}.json force=yes - with_dict: "{{docker_puppet_tasks}}" - when: deploy_server_id == bootstrap_server_id -{%- endraw %} - - {{role.name}}HostPrepDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}HostPrepConfig} - - # BEGIN CONFIG STEPS - - {{role.name}}PreConfig: - type: OS::TripleO::Tasks::{{role.name}}PreConfig - depends_on: {{role.name}}HostPrepDeployment - properties: - servers: {get_param: [servers, {{role.name}}]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - {% for step in range(1, deploy_steps_max) %} - {{role.name}}Deployment_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - - WorkflowTasks_Step{{step}}_Execution - # TODO(gfidente): the following if/else condition - # replicates what is already defined for the - # WorkflowTasks_StepX resource and can be remove - # if https://bugs.launchpad.net/heat/+bug/1700569 - # is fixed. - {%- if step == 1 %} - {%- for dep in roles %} - - {{dep.name}}PreConfig - - {{dep.name}}ArtifactsDeploy - {%- endfor %} - {%- else %} - {%- for dep in roles %} - - {{dep.name}}Deployment_Step{{step -1}} - {%- endfor %} - {%- endif %} - properties: - name: {{role.name}}Deployment_Step{{step}} - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: RoleConfig} - input_values: - step: {{step}} - role_name: {{role.name}} - update_identifier: {get_param: DeployIdentifier} - bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']} - docker_puppet_debug: {get_param: DockerPuppetDebug} - {% endfor %} - # END CONFIG STEPS - - # Note, this should be the last step to execute configuration changes. - # Ensure that all {{role.name}}ExtraConfigPost steps are executed - # after all the previous deployment steps. - {{role.name}}ExtraConfigPost: - depends_on: - {%- for dep in roles %} - - {{dep.name}}Deployment_Step5 - {%- endfor %} - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, {{role.name}}]} - - # The {{role.name}}PostConfig steps are in charge of - # quiescing all services, i.e. in the Controller case, - # we should run a full service reload. - {{role.name}}PostConfig: - type: OS::TripleO::Tasks::{{role.name}}PostConfig - depends_on: - {%- for dep in roles %} - - {{dep.name}}ExtraConfigPost - {%- endfor %} - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - -{% endfor %} diff --git a/docker/post-upgrade.j2.yaml b/docker/post-upgrade.j2.yaml deleted file mode 100644 index 4477f868..00000000 --- a/docker/post-upgrade.j2.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Note the include here is the same as post.j2.yaml but the data used at -# # the time of rendering is different if any roles disable upgrades -{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%} -{% include 'docker-steps.j2' %} diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml deleted file mode 100644 index fd956215..00000000 --- a/docker/post.j2.yaml +++ /dev/null @@ -1 +0,0 @@ -{% include 'docker-steps.j2' %} diff --git a/docker/services/ceilometer-agent-central.yaml b/docker/services/ceilometer-agent-central.yaml index 6caffd15..424c316f 100644 --- a/docker/services/ceilometer-agent-central.yaml +++ b/docker/services/ceilometer-agent-central.yaml @@ -115,7 +115,7 @@ outputs: command: - '/usr/bin/bootstrap_host_exec' - 'ceilometer_agent_central' - - "su ceilometer -s /bin/bash -c '/usr/bin/ceilometer-upgrade --skip-metering-database'" + - "su ceilometer -s /bin/bash -c 'for n in {1..10}; do /usr/bin/ceilometer-upgrade --skip-metering-database && exit 0 || sleep 5; done; exit 1'" upgrade_tasks: - name: Stop and disable ceilometer agent central service tags: step2 diff --git a/docker/services/ceph-ansible/ceph-base.yaml b/docker/services/ceph-ansible/ceph-base.yaml index 1468415e..85fe0608 100644 --- a/docker/services/ceph-ansible/ceph-base.yaml +++ b/docker/services/ceph-ansible/ceph-base.yaml @@ -142,6 +142,7 @@ outputs: ceph_docker_image_tag: {str_split: [':', {get_param: DockerCephDaemonImage}, 1]} containerized_deployment: true public_network: {get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephMonNetwork]}]} + monitor_address_block: {get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephMonNetwork]}]} cluster_network: {get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} user_config: true ceph_stable: true diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml index 5ba79b31..86bb6d54 100644 --- a/docker/services/database/mongodb.yaml +++ b/docker/services/database/mongodb.yaml @@ -116,6 +116,8 @@ outputs: with_items: - /var/log/containers/mongodb - /var/lib/mongodb + metadata_settings: + get_attr: [MongodbPuppetBase, role_data, metadata_settings] upgrade_tasks: - name: Stop and disable mongodb service tags: step2 diff --git a/docker/services/gnocchi-api.yaml b/docker/services/gnocchi-api.yaml index 41fe197b..7c6b6766 100644 --- a/docker/services/gnocchi-api.yaml +++ b/docker/services/gnocchi-api.yaml @@ -88,6 +88,10 @@ outputs: dest: "/" merge: true preserve_properties: true + - source: "/var/lib/kolla/config_files/src-ceph/" + dest: "/etc/ceph/" + merge: true + preserve_properties: true permissions: - path: /var/log/gnocchi owner: gnocchi:gnocchi @@ -101,7 +105,7 @@ outputs: volumes: - /var/log/containers/gnocchi:/var/log/gnocchi command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi'] - step_3: + step_4: gnocchi_db_sync: image: *gnocchi_api_image net: host @@ -114,12 +118,13 @@ outputs: - - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro - /var/log/containers/gnocchi:/var/log/gnocchi + - /etc/ceph:/etc/ceph:ro command: str_replace: - template: "/usr/bin/bootstrap_host_exec gnocchi_api su gnocchi -s /bin/bash -c /usr/bin/gnocchi-upgrade --sacks-number=SACK_NUM" + template: /usr/bin/bootstrap_host_exec gnocchi_api su gnocchi -s /bin/bash -c '/usr/bin/gnocchi-upgrade --sacks-number=SACK_NUM' params: SACK_NUM: {get_param: NumberOfStorageSacks} - step_4: + step_5: gnocchi_api: image: *gnocchi_api_image net: host @@ -132,6 +137,7 @@ outputs: - /var/lib/kolla/config_files/gnocchi_api.json:/var/lib/kolla/config_files/config.json:ro - /var/lib/config-data/puppet-generated/gnocchi/:/var/lib/kolla/config_files/src:ro - /var/log/containers/gnocchi:/var/log/gnocchi + - /etc/ceph:/var/lib/kolla/config_files/src-ceph:ro - if: - internal_tls_enabled @@ -149,6 +155,10 @@ outputs: file: path: /var/log/containers/gnocchi state: directory + - name: ensure ceph configurations exist + file: + path: /etc/ceph + state: directory upgrade_tasks: - name: Stop and disable httpd service tags: step2 diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml index da461049..45de265e 100644 --- a/docker/services/nova-api.yaml +++ b/docker/services/nova-api.yaml @@ -36,6 +36,13 @@ parameters: default: {} description: Parameters specific to the role type: json + EnableInternalTLS: + type: boolean + default: false + +conditions: + + internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]} resources: @@ -64,9 +71,6 @@ outputs: map_merge: - get_attr: [NovaApiBase, role_data, config_settings] - apache::default_vhost: false - nova_wsgi_enabled: false - nova::api::service_name: '%{::nova::params::api_service_name}' - nova::wsgi::apache_api::ssl: false step_config: &step_config list_join: - "\n" @@ -82,7 +86,7 @@ outputs: config_image: {get_param: DockerNovaConfigImage} kolla_config: /var/lib/kolla/config_files/nova_api.json: - command: /usr/bin/nova-api + command: /usr/sbin/httpd -DFOREGROUND config_files: - source: "/var/lib/kolla/config_files/src/*" dest: "/" @@ -112,7 +116,7 @@ outputs: user: root volumes: - /var/log/containers/nova:/var/log/nova - command: ['/bin/bash', '-c', 'chown -R nova:nova /var/log/nova'] + command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R nova:nova /var/log/nova'] step_3: nova_api_db_sync: start_order: 0 @@ -163,7 +167,7 @@ outputs: start_order: 2 image: *nova_api_image net: host - user: nova + user: root privileged: true restart: always volumes: @@ -173,6 +177,16 @@ outputs: - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro - /var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro - /var/log/containers/nova:/var/log/nova + - + if: + - internal_tls_enabled + - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro + - '' + - + if: + - internal_tls_enabled + - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro + - '' environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS nova_api_cron: diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml index 2f3851a5..916b057e 100644 --- a/docker/services/nova-libvirt.yaml +++ b/docker/services/nova-libvirt.yaml @@ -56,7 +56,21 @@ parameters: description: Port that dockerized nova migration target sshd service binds to. type: number - + NovaEnableRbdBackend: + default: false + description: Whether to enable or not the Rbd backend for Nova + type: boolean + CinderEnableRbdBackend: + default: false + description: Whether to enable or not the Rbd backend for Cinder + type: boolean + CephClientKey: + description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring. + type: string + hidden: true + CephClusterFSID: + type: string + description: The Ceph cluster FSID. Must be a UUID. conditions: @@ -69,6 +83,15 @@ conditions: - {get_param: UseTLSTransportForLiveMigration} - true + need_libvirt_secret: + or: + - equals: + - {get_param: NovaEnableRbdBackend} + - true + - equals: + - {get_param: CinderEnableRbdBackend} + - true + resources: ContainersCommon: @@ -102,7 +125,7 @@ outputs: - {get_attr: [MySQLClient, role_data, step_config]} puppet_config: config_volume: nova_libvirt - puppet_tags: libvirtd_config,nova_config,file,exec + puppet_tags: libvirtd_config,nova_config,file step_config: *step_config config_image: {get_param: DockerNovaLibvirtConfigImage} kolla_config: @@ -145,21 +168,46 @@ outputs: - /run:/run - /sys/fs/cgroup:/sys/fs/cgroup - /var/lib/nova:/var/lib/nova - - /etc/libvirt/secrets:/etc/libvirt/secrets + - /etc/libvirt:/etc/libvirt # Needed to use host's virtlogd - /var/run/libvirt:/var/run/libvirt - /var/lib/libvirt:/var/lib/libvirt - - /etc/libvirt/qemu:/etc/libvirt/qemu - /var/log/libvirt/qemu:/var/log/libvirt/qemu:ro - /var/log/containers/nova:/var/log/nova environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + step_4: + if: + - need_libvirt_secret + - nova_libvirt_init_secret: + detach: false + image: {get_param: DockerNovaLibvirtImage} + privileged: false + user: root + volumes: + list_concat: + - {get_attr: [ContainersCommon, volumes]} + - + - /var/lib/config-data/puppet-generated/nova_libvirt/etc/nova:/etc/nova:ro + - /etc/libvirt:/etc/libvirt + - /var/run/libvirt:/var/run/libvirt + - /var/lib/libvirt:/var/lib/libvirt + command: + - /bin/bash + - -c + - str_replace: + template: /usr/bin/virsh secret-define --file /etc/nova/secret.xml && /usr/bin/virsh secret-set-value --secret 'SECRET_UUID' --base64 'SECRET_KEY' + params: + SECRET_UUID: {get_param: CephClusterFSID} + SECRET_KEY: {get_param: CephClientKey} + - {} host_prep_tasks: - name: create libvirt persistent data directories file: path: "{{ item }}" state: directory with_items: + - /etc/libvirt - /etc/libvirt/secrets - /etc/libvirt/qemu - /var/lib/libvirt diff --git a/docker/services/nova-placement.yaml b/docker/services/nova-placement.yaml index d784ace3..26d17560 100644 --- a/docker/services/nova-placement.yaml +++ b/docker/services/nova-placement.yaml @@ -36,6 +36,13 @@ parameters: default: {} description: Parameters specific to the role type: json + EnableInternalTLS: + type: boolean + default: false + +conditions: + + internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]} resources: @@ -104,6 +111,16 @@ outputs: - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro - /var/lib/config-data/puppet-generated/nova_placement/:/var/lib/kolla/config_files/src:ro - /var/log/containers/nova:/var/log/nova + - + if: + - internal_tls_enabled + - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro + - '' + - + if: + - internal_tls_enabled + - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro + - '' environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS metadata_settings: diff --git a/docker/services/pacemaker/database/mysql.yaml b/docker/services/pacemaker/database/mysql.yaml index f12852f8..3fb38349 100644 --- a/docker/services/pacemaker/database/mysql.yaml +++ b/docker/services/pacemaker/database/mysql.yaml @@ -32,6 +32,9 @@ parameters: type: string hidden: true default: '' + MysqlClustercheckPassword: + type: string + hidden: true RoleName: default: '' description: Role name on which the service is applied @@ -118,7 +121,19 @@ outputs: image: *mysql_image net: host # Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done - command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start'] + command: + - 'bash' + - '-ec' + - + list_join: + - "\n" + - - 'if [ -e /var/lib/mysql/mysql ]; then exit 0; fi' + - 'kolla_start' + - 'mysqld_safe --skip-networking --wsrep-on=OFF &' + - 'timeout ${DB_MAX_TIMEOUT} /bin/bash -c ''until mysqladmin -uroot -p"${DB_ROOT_PASSWORD}" ping 2>/dev/null; do sleep 1; done''' + - 'mysql -uroot -p"${DB_ROOT_PASSWORD}" -e "CREATE USER ''clustercheck''@''localhost'' IDENTIFIED BY ''${DB_CLUSTERCHECK_PASSWORD}'';"' + - 'mysql -uroot -p"${DB_ROOT_PASSWORD}" -e "GRANT PROCESS ON *.* TO ''clustercheck''@''localhost'' WITH GRANT OPTION;"' + - 'timeout ${DB_MAX_TIMEOUT} mysqladmin -uroot -p"${DB_ROOT_PASSWORD}" shutdown' volumes: &mysql_volumes list_concat: - {get_attr: [ContainersCommon, volumes]} @@ -131,6 +146,12 @@ outputs: - KOLLA_BOOTSTRAP=True # NOTE(mandre) skip wsrep cluster status check - KOLLA_KUBERNETES=True + - DB_MAX_TIMEOUT=60 + - + list_join: + - '=' + - - 'DB_CLUSTERCHECK_PASSWORD' + - {get_param: MysqlClustercheckPassword} - list_join: - '=' |