summaryrefslogtreecommitdiffstats
path: root/docker
diff options
context:
space:
mode:
Diffstat (limited to 'docker')
-rw-r--r--docker/deploy-steps-playbook.yaml86
-rw-r--r--docker/docker-steps.j2296
-rw-r--r--docker/post-upgrade.j2.yaml4
-rw-r--r--docker/post.j2.yaml1
-rw-r--r--docker/services/ceilometer-agent-central.yaml2
-rw-r--r--docker/services/ceph-ansible/ceph-base.yaml52
-rw-r--r--docker/services/database/mongodb.yaml2
-rw-r--r--docker/services/gnocchi-api.yaml16
-rw-r--r--docker/services/nova-api.yaml26
-rw-r--r--docker/services/nova-libvirt.yaml56
-rw-r--r--docker/services/nova-placement.yaml17
-rw-r--r--docker/services/pacemaker/database/mysql.yaml35
12 files changed, 172 insertions, 421 deletions
diff --git a/docker/deploy-steps-playbook.yaml b/docker/deploy-steps-playbook.yaml
deleted file mode 100644
index b884e0e7..00000000
--- a/docker/deploy-steps-playbook.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-- hosts: localhost
- connection: local
- tasks:
- #####################################################
- # Per step puppet configuration of the baremetal host
- #####################################################
- - name: Write the config_step hieradata
- copy: content="{{dict(step=step|int)|to_json}}" dest=/etc/puppet/hieradata/config_step.json force=true
- - name: Run puppet host configuration for step {{step}}
- command: >-
- puppet apply
- --modulepath=/etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
- --logdest syslog --logdest console --color=false
- /var/lib/tripleo-config/puppet_step_config.pp
- changed_when: false
- check_mode: no
- register: outputs
- failed_when: false
- no_log: true
- - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([]))
- when: outputs is defined
- failed_when: outputs|failed
- ######################################
- # Generate config via docker-puppet.py
- ######################################
- - name: Run docker-puppet tasks (generate config)
- shell: python /var/lib/docker-puppet/docker-puppet.py
- environment:
- NET_HOST: 'true'
- DEBUG: '{{docker_puppet_debug}}'
- when: step == "1"
- changed_when: false
- check_mode: no
- register: outputs
- failed_when: false
- no_log: true
- - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([]))
- when: outputs is defined
- failed_when: outputs|failed
- ##################################################
- # Per step starting of the containers using paunch
- ##################################################
- - name: Check if /var/lib/hashed-tripleo-config/docker-container-startup-config-step_{{step}}.json exists
- stat:
- path: /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json
- register: docker_config_json
- # Note docker-puppet.py generates the hashed-*.json file, which is a copy of
- # the *step_n.json with a hash of the generated external config added
- # This acts as a salt to enable restarting the container if config changes
- - name: Start containers for step {{step}}
- command: >-
- paunch --debug apply
- --file /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json
- --config-id tripleo_step{{step}} --managed-by tripleo-{{role_name}}
- when: docker_config_json.stat.exists
- changed_when: false
- check_mode: no
- register: outputs
- failed_when: false
- no_log: true
- - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([]))
- when: outputs is defined
- failed_when: outputs|failed
- ########################################################
- # Bootstrap tasks, only performed on bootstrap_server_id
- ########################################################
- - name: Check if /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json exists
- stat:
- path: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
- register: docker_puppet_tasks_json
- - name: Run docker-puppet tasks (bootstrap tasks)
- shell: python /var/lib/docker-puppet/docker-puppet.py
- environment:
- CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
- NET_HOST: "true"
- NO_ARCHIVE: "true"
- STEP: "{{step}}"
- when: deploy_server_id == bootstrap_server_id and docker_puppet_tasks_json.stat.exists
- changed_when: false
- check_mode: no
- register: outputs
- failed_when: false
- no_log: true
- - debug: var=(outputs.stderr|default('')).split('\n')|union(outputs.stdout_lines|default([]))
- when: outputs is defined
- failed_when: outputs|failed
diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2
deleted file mode 100644
index 05ff7945..00000000
--- a/docker/docker-steps.j2
+++ /dev/null
@@ -1,296 +0,0 @@
-# certain initialization steps (run in a container) will occur
-# on the role marked as primary controller or the first role listed
-{%- set primary_role = [roles[0]] -%}
-{%- for role in roles -%}
- {%- if 'primary' in role.tags and 'controller' in role.tags -%}
- {%- set _ = primary_role.pop() -%}
- {%- set _ = primary_role.append(role) -%}
- {%- endif -%}
-{%- endfor -%}
-{%- set primary_role_name = primary_role[0].name -%}
-# primary role is: {{primary_role_name}}
-{% set deploy_steps_max = 6 -%}
-
-heat_template_version: pike
-
-description: >
- Post-deploy configuration steps via puppet for all roles,
- as defined in ../roles_data.yaml
-
-parameters:
- servers:
- type: json
- description: Mapping of Role name e.g Controller to a list of servers
- stack_name:
- type: string
- description: Name of the topmost stack
- role_data:
- type: json
- description: Mapping of Role name e.g Controller to the per-role data
- DeployIdentifier:
- default: ''
- type: string
- description: >
- Setting this to a unique value will re-run any deployment tasks which
- perform configuration on a Heat stack-update.
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- DockerPuppetDebug:
- type: string
- default: ''
- description: Set to True to enable debug logging with docker-puppet.py
- ctlplane_service_ips:
- type: json
-
-conditions:
-{% for step in range(1, deploy_steps_max) %}
- WorkflowTasks_Step{{step}}_Enabled:
- or:
- {%- for role in roles %}
- - not:
- equals:
- - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
- - ''
- - False
- {%- endfor %}
-{% endfor %}
-
-resources:
-
- RoleConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: ansible
- options:
- modulepath: /usr/share/ansible-modules
- inputs:
- - name: step
- - name: role_name
- - name: update_identifier
- - name: bootstrap_server_id
- - name: docker_puppet_debug
- config: {get_file: deploy-steps-playbook.yaml}
-
-{%- for step in range(1, deploy_steps_max) %}
-# BEGIN service_workflow_tasks handling
- WorkflowTasks_Step{{step}}:
- type: OS::Mistral::Workflow
- condition: WorkflowTasks_Step{{step}}_Enabled
- depends_on:
- {%- if step == 1 %}
- {%- for dep in roles %}
- - {{dep.name}}PreConfig
- - {{dep.name}}ArtifactsDeploy
- {%- endfor %}
- {%- else %}
- {%- for dep in roles %}
- - {{dep.name}}Deployment_Step{{step -1}}
- {%- endfor %}
- {%- endif %}
- properties:
- name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
- type: direct
- tasks:
- yaql:
- expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
- data:
- {%- for role in roles %}
- - get_param: [role_data, {{role.name}}, service_workflow_tasks]
- {%- endfor %}
-
- WorkflowTasks_Step{{step}}_Execution:
- type: OS::Mistral::ExternalResource
- condition: WorkflowTasks_Step{{step}}_Enabled
- depends_on: WorkflowTasks_Step{{step}}
- properties:
- actions:
- CREATE:
- workflow: { get_resource: WorkflowTasks_Step{{step}} }
- params:
- env:
- service_ips: { get_param: ctlplane_service_ips }
- role_merged_configs:
- {%- for r in roles %}
- {{r.name}}: {get_param: [role_data, {{r.name}}, merged_config_settings]}
- {%- endfor %}
- evaluate_env: false
- UPDATE:
- workflow: { get_resource: WorkflowTasks_Step{{step}} }
- params:
- env:
- service_ips: { get_param: ctlplane_service_ips }
- role_merged_configs:
- {%- for r in roles %}
- {{r.name}}: {get_param: [role_data, {{r.name}}, merged_config_settings]}
- {%- endfor %}
- evaluate_env: false
- always_update: true
-# END service_workflow_tasks handling
-{% endfor %}
-
-{% for role in roles %}
- # Post deployment steps for all roles
- # A single config is re-applied with an incrementing step number
- # {{role.name}} Role steps
- {{role.name}}ArtifactsConfig:
- type: ../puppet/deploy-artifacts.yaml
-
- {{role.name}}ArtifactsDeploy:
- type: OS::Heat::StructuredDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ArtifactsConfig}
-
- {{role.name}}HostPrepConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: ansible
- options:
- modulepath: /usr/share/ansible-modules
- config:
- str_replace:
- template: _PLAYBOOK
- params:
- _PLAYBOOK:
- - hosts: localhost
- connection: local
- vars:
- puppet_config: {get_param: [role_data, {{role.name}}, puppet_config]}
- docker_puppet_script: {get_file: docker-puppet.py}
- docker_puppet_tasks: {get_param: [role_data, {{role.name}}, docker_puppet_tasks]}
- docker_startup_configs: {get_param: [role_data, {{role.name}}, docker_config]}
- kolla_config: {get_param: [role_data, {{role.name}}, kolla_config]}
- bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
- puppet_step_config: {get_param: [role_data, {{role.name}}, step_config]}
- tasks:
- # Join host_prep_tasks with the other per-host configuration
- yaql:
- expression: $.data.host_prep_tasks + $.data.template_tasks
- data:
- host_prep_tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
- template_tasks:
-{%- raw %}
- # Write the manifest for baremetal puppet configuration
- - name: Create /var/lib/tripleo-config directory
- file: path=/var/lib/tripleo-config state=directory
- - name: Write the puppet step_config manifest
- copy: content="{{puppet_step_config}}" dest=/var/lib/tripleo-config/puppet_step_config.pp force=yes
- # this creates a JSON config file for our docker-puppet.py script
- - name: Create /var/lib/docker-puppet
- file: path=/var/lib/docker-puppet state=directory
- - name: Write docker-puppet-tasks json files
- copy: content="{{puppet_config | to_json}}" dest=/var/lib/docker-puppet/docker-puppet.json force=yes
- # FIXME: can we move docker-puppet somewhere so it's installed via a package?
- - name: Write docker-puppet.py
- copy: content="{{docker_puppet_script}}" dest=/var/lib/docker-puppet/docker-puppet.py force=yes
- # Here we are dumping all the docker container startup configuration data
- # so that we can have access to how they are started outside of heat
- # and docker-cmd. This lets us create command line tools to test containers.
- # FIXME do we need the docker-container-startup-configs.json or is the new per-step
- # data consumed by paunch enough?
- - name: Write docker-container-startup-configs
- copy: content="{{docker_startup_configs | to_json}}" dest=/var/lib/docker-container-startup-configs.json force=yes
- - name: Write per-step docker-container-startup-configs
- copy: content="{{item.value|to_json}}" dest="/var/lib/tripleo-config/docker-container-startup-config-{{item.key}}.json" force=yes
- with_dict: "{{docker_startup_configs}}"
- - name: Create /var/lib/kolla/config_files directory
- file: path=/var/lib/kolla/config_files state=directory
- - name: Write kolla config json files
- copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes
- with_dict: "{{kolla_config}}"
- ########################################################
- # Bootstrap tasks, only performed on bootstrap_server_id
- ########################################################
- - name: Clean /var/lib/docker-puppet/docker-puppet-tasks*.json files
- file:
- path: "{{item}}"
- state: absent
- with_fileglob:
- - /var/lib/docker-puppet/docker-puppet-tasks*.json
- when: deploy_server_id == bootstrap_server_id
- - name: Write docker-puppet-tasks json files
- copy: content="{{item.value|to_json}}" dest=/var/lib/docker-puppet/docker-puppet-tasks{{item.key.replace("step_", "")}}.json force=yes
- with_dict: "{{docker_puppet_tasks}}"
- when: deploy_server_id == bootstrap_server_id
-{%- endraw %}
-
- {{role.name}}HostPrepDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}HostPrepConfig}
-
- # BEGIN CONFIG STEPS
-
- {{role.name}}PreConfig:
- type: OS::TripleO::Tasks::{{role.name}}PreConfig
- depends_on: {{role.name}}HostPrepDeployment
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- {% for step in range(1, deploy_steps_max) %}
- {{role.name}}Deployment_Step{{step}}:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - WorkflowTasks_Step{{step}}_Execution
- # TODO(gfidente): the following if/else condition
- # replicates what is already defined for the
- # WorkflowTasks_StepX resource and can be remove
- # if https://bugs.launchpad.net/heat/+bug/1700569
- # is fixed.
- {%- if step == 1 %}
- {%- for dep in roles %}
- - {{dep.name}}PreConfig
- - {{dep.name}}ArtifactsDeploy
- {%- endfor %}
- {%- else %}
- {%- for dep in roles %}
- - {{dep.name}}Deployment_Step{{step -1}}
- {%- endfor %}
- {%- endif %}
- properties:
- name: {{role.name}}Deployment_Step{{step}}
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: RoleConfig}
- input_values:
- step: {{step}}
- role_name: {{role.name}}
- update_identifier: {get_param: DeployIdentifier}
- bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
- docker_puppet_debug: {get_param: DockerPuppetDebug}
- {% endfor %}
- # END CONFIG STEPS
-
- # Note, this should be the last step to execute configuration changes.
- # Ensure that all {{role.name}}ExtraConfigPost steps are executed
- # after all the previous deployment steps.
- {{role.name}}ExtraConfigPost:
- depends_on:
- {%- for dep in roles %}
- - {{dep.name}}Deployment_Step5
- {%- endfor %}
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: [servers, {{role.name}}]}
-
- # The {{role.name}}PostConfig steps are in charge of
- # quiescing all services, i.e. in the Controller case,
- # we should run a full service reload.
- {{role.name}}PostConfig:
- type: OS::TripleO::Tasks::{{role.name}}PostConfig
- depends_on:
- {%- for dep in roles %}
- - {{dep.name}}ExtraConfigPost
- {%- endfor %}
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
-
-{% endfor %}
diff --git a/docker/post-upgrade.j2.yaml b/docker/post-upgrade.j2.yaml
deleted file mode 100644
index 4477f868..00000000
--- a/docker/post-upgrade.j2.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-# Note the include here is the same as post.j2.yaml but the data used at
-# # the time of rendering is different if any roles disable upgrades
-{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
-{% include 'docker-steps.j2' %}
diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml
deleted file mode 100644
index fd956215..00000000
--- a/docker/post.j2.yaml
+++ /dev/null
@@ -1 +0,0 @@
-{% include 'docker-steps.j2' %}
diff --git a/docker/services/ceilometer-agent-central.yaml b/docker/services/ceilometer-agent-central.yaml
index 6caffd15..424c316f 100644
--- a/docker/services/ceilometer-agent-central.yaml
+++ b/docker/services/ceilometer-agent-central.yaml
@@ -115,7 +115,7 @@ outputs:
command:
- '/usr/bin/bootstrap_host_exec'
- 'ceilometer_agent_central'
- - "su ceilometer -s /bin/bash -c '/usr/bin/ceilometer-upgrade --skip-metering-database'"
+ - "su ceilometer -s /bin/bash -c 'for n in {1..10}; do /usr/bin/ceilometer-upgrade --skip-metering-database && exit 0 || sleep 5; done; exit 1'"
upgrade_tasks:
- name: Stop and disable ceilometer agent central service
tags: step2
diff --git a/docker/services/ceph-ansible/ceph-base.yaml b/docker/services/ceph-ansible/ceph-base.yaml
index 1468415e..52c4a65c 100644
--- a/docker/services/ceph-ansible/ceph-base.yaml
+++ b/docker/services/ceph-ansible/ceph-base.yaml
@@ -102,6 +102,33 @@ conditions:
data: {get_param: DockerCephDaemonImage}
expression: $.data.split('/')[0].matches('(\.|:)')
+resources:
+ DockerImageUrlParts:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ host:
+ if:
+ - custom_registry_host
+ - yaql:
+ expression: let(location => $.data.rightSplit(':', 1)[0]) -> regex('(?:https?://)?(.*)/').split($location)[1]
+ data: {get_param: DockerCephDaemonImage}
+ - docker.io
+ image:
+ if:
+ - custom_registry_host
+ - yaql:
+ expression: let(location => $.data.rightSplit(':', 1)[0]) -> regex('(?:https?://)?(.*)/').split($location)[2]
+ data: {get_param: DockerCephDaemonImage}
+ - yaql:
+ expression: $.data.rightSplit(':', 1)[0]
+ data: {get_param: DockerCephDaemonImage}
+ image_tag:
+ yaql:
+ expression: $.data.rightSplit(':', 1)[1]
+ data: {get_param: DockerCephDaemonImage}
+
outputs:
role_data:
description: Role data for the Ceph base service.
@@ -125,23 +152,12 @@ outputs:
ceph_common_ansible_vars:
fsid: { get_param: CephClusterFSID }
docker: true
- ceph_docker_registry:
- if:
- - custom_registry_host
- - yaql:
- expression: regex('(?:https?://)?(.*)/').split($.data)[1]
- data: {str_split: [':', {get_param: DockerCephDaemonImage}, 0]}
- - docker.io
- ceph_docker_image:
- if:
- - custom_registry_host
- - yaql:
- expression: regex('(?:https?://)?(.*)/').split($.data)[2]
- data: {str_split: [':', {get_param: DockerCephDaemonImage}, 0]}
- - {str_split: [':', {get_param: DockerCephDaemonImage}, 0]}
- ceph_docker_image_tag: {str_split: [':', {get_param: DockerCephDaemonImage}, 1]}
+ ceph_docker_registry: {get_attr: [DockerImageUrlParts, value, host]}
+ ceph_docker_image: {get_attr: [DockerImageUrlParts, value, image]}
+ ceph_docker_image_tag: {get_attr: [DockerImageUrlParts, value, image_tag]}
containerized_deployment: true
public_network: {get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephMonNetwork]}]}
+ monitor_address_block: {get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephMonNetwork]}]}
cluster_network: {get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]}
user_config: true
ceph_stable: true
@@ -185,11 +201,7 @@ outputs:
CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
GLANCE_POOL: {get_param: GlanceRbdPoolName}
GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
- acls:
- - "u:glance:r--"
- - "u:nova:r--"
- - "u:cinder:r--"
- - "u:gnocchi:r--"
+ mode: "0644"
keys: *openstack_keys
pools: []
ceph_conf_overrides:
diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml
index 4853ab9c..9b5c5b8f 100644
--- a/docker/services/database/mongodb.yaml
+++ b/docker/services/database/mongodb.yaml
@@ -154,6 +154,8 @@ outputs:
with_items:
- /var/log/containers/mongodb
- /var/lib/mongodb
+ metadata_settings:
+ get_attr: [MongodbPuppetBase, role_data, metadata_settings]
upgrade_tasks:
- name: Stop and disable mongodb service
tags: step2
diff --git a/docker/services/gnocchi-api.yaml b/docker/services/gnocchi-api.yaml
index 41fe197b..7c6b6766 100644
--- a/docker/services/gnocchi-api.yaml
+++ b/docker/services/gnocchi-api.yaml
@@ -88,6 +88,10 @@ outputs:
dest: "/"
merge: true
preserve_properties: true
+ - source: "/var/lib/kolla/config_files/src-ceph/"
+ dest: "/etc/ceph/"
+ merge: true
+ preserve_properties: true
permissions:
- path: /var/log/gnocchi
owner: gnocchi:gnocchi
@@ -101,7 +105,7 @@ outputs:
volumes:
- /var/log/containers/gnocchi:/var/log/gnocchi
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
- step_3:
+ step_4:
gnocchi_db_sync:
image: *gnocchi_api_image
net: host
@@ -114,12 +118,13 @@ outputs:
-
- /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
- /var/log/containers/gnocchi:/var/log/gnocchi
+ - /etc/ceph:/etc/ceph:ro
command:
str_replace:
- template: "/usr/bin/bootstrap_host_exec gnocchi_api su gnocchi -s /bin/bash -c /usr/bin/gnocchi-upgrade --sacks-number=SACK_NUM"
+ template: /usr/bin/bootstrap_host_exec gnocchi_api su gnocchi -s /bin/bash -c '/usr/bin/gnocchi-upgrade --sacks-number=SACK_NUM'
params:
SACK_NUM: {get_param: NumberOfStorageSacks}
- step_4:
+ step_5:
gnocchi_api:
image: *gnocchi_api_image
net: host
@@ -132,6 +137,7 @@ outputs:
- /var/lib/kolla/config_files/gnocchi_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/gnocchi/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/gnocchi:/var/log/gnocchi
+ - /etc/ceph:/var/lib/kolla/config_files/src-ceph:ro
-
if:
- internal_tls_enabled
@@ -149,6 +155,10 @@ outputs:
file:
path: /var/log/containers/gnocchi
state: directory
+ - name: ensure ceph configurations exist
+ file:
+ path: /etc/ceph
+ state: directory
upgrade_tasks:
- name: Stop and disable httpd service
tags: step2
diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml
index da461049..45de265e 100644
--- a/docker/services/nova-api.yaml
+++ b/docker/services/nova-api.yaml
@@ -36,6 +36,13 @@ parameters:
default: {}
description: Parameters specific to the role
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
@@ -64,9 +71,6 @@ outputs:
map_merge:
- get_attr: [NovaApiBase, role_data, config_settings]
- apache::default_vhost: false
- nova_wsgi_enabled: false
- nova::api::service_name: '%{::nova::params::api_service_name}'
- nova::wsgi::apache_api::ssl: false
step_config: &step_config
list_join:
- "\n"
@@ -82,7 +86,7 @@ outputs:
config_image: {get_param: DockerNovaConfigImage}
kolla_config:
/var/lib/kolla/config_files/nova_api.json:
- command: /usr/bin/nova-api
+ command: /usr/sbin/httpd -DFOREGROUND
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
@@ -112,7 +116,7 @@ outputs:
user: root
volumes:
- /var/log/containers/nova:/var/log/nova
- command: ['/bin/bash', '-c', 'chown -R nova:nova /var/log/nova']
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R nova:nova /var/log/nova']
step_3:
nova_api_db_sync:
start_order: 0
@@ -163,7 +167,7 @@ outputs:
start_order: 2
image: *nova_api_image
net: host
- user: nova
+ user: root
privileged: true
restart: always
volumes:
@@ -173,6 +177,16 @@ outputs:
- /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/nova:/var/log/nova
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
nova_api_cron:
diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml
index 2f3851a5..916b057e 100644
--- a/docker/services/nova-libvirt.yaml
+++ b/docker/services/nova-libvirt.yaml
@@ -56,7 +56,21 @@ parameters:
description: Port that dockerized nova migration target sshd service
binds to.
type: number
-
+ NovaEnableRbdBackend:
+ default: false
+ description: Whether to enable or not the Rbd backend for Nova
+ type: boolean
+ CinderEnableRbdBackend:
+ default: false
+ description: Whether to enable or not the Rbd backend for Cinder
+ type: boolean
+ CephClientKey:
+ description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
+ type: string
+ hidden: true
+ CephClusterFSID:
+ type: string
+ description: The Ceph cluster FSID. Must be a UUID.
conditions:
@@ -69,6 +83,15 @@ conditions:
- {get_param: UseTLSTransportForLiveMigration}
- true
+ need_libvirt_secret:
+ or:
+ - equals:
+ - {get_param: NovaEnableRbdBackend}
+ - true
+ - equals:
+ - {get_param: CinderEnableRbdBackend}
+ - true
+
resources:
ContainersCommon:
@@ -102,7 +125,7 @@ outputs:
- {get_attr: [MySQLClient, role_data, step_config]}
puppet_config:
config_volume: nova_libvirt
- puppet_tags: libvirtd_config,nova_config,file,exec
+ puppet_tags: libvirtd_config,nova_config,file
step_config: *step_config
config_image: {get_param: DockerNovaLibvirtConfigImage}
kolla_config:
@@ -145,21 +168,46 @@ outputs:
- /run:/run
- /sys/fs/cgroup:/sys/fs/cgroup
- /var/lib/nova:/var/lib/nova
- - /etc/libvirt/secrets:/etc/libvirt/secrets
+ - /etc/libvirt:/etc/libvirt
# Needed to use host's virtlogd
- /var/run/libvirt:/var/run/libvirt
- /var/lib/libvirt:/var/lib/libvirt
- - /etc/libvirt/qemu:/etc/libvirt/qemu
- /var/log/libvirt/qemu:/var/log/libvirt/qemu:ro
- /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_4:
+ if:
+ - need_libvirt_secret
+ - nova_libvirt_init_secret:
+ detach: false
+ image: {get_param: DockerNovaLibvirtImage}
+ privileged: false
+ user: root
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/puppet-generated/nova_libvirt/etc/nova:/etc/nova:ro
+ - /etc/libvirt:/etc/libvirt
+ - /var/run/libvirt:/var/run/libvirt
+ - /var/lib/libvirt:/var/lib/libvirt
+ command:
+ - /bin/bash
+ - -c
+ - str_replace:
+ template: /usr/bin/virsh secret-define --file /etc/nova/secret.xml && /usr/bin/virsh secret-set-value --secret 'SECRET_UUID' --base64 'SECRET_KEY'
+ params:
+ SECRET_UUID: {get_param: CephClusterFSID}
+ SECRET_KEY: {get_param: CephClientKey}
+ - {}
host_prep_tasks:
- name: create libvirt persistent data directories
file:
path: "{{ item }}"
state: directory
with_items:
+ - /etc/libvirt
- /etc/libvirt/secrets
- /etc/libvirt/qemu
- /var/lib/libvirt
diff --git a/docker/services/nova-placement.yaml b/docker/services/nova-placement.yaml
index d784ace3..26d17560 100644
--- a/docker/services/nova-placement.yaml
+++ b/docker/services/nova-placement.yaml
@@ -36,6 +36,13 @@ parameters:
default: {}
description: Parameters specific to the role
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
@@ -104,6 +111,16 @@ outputs:
- /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/nova_placement/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/nova:/var/log/nova
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
metadata_settings:
diff --git a/docker/services/pacemaker/database/mysql.yaml b/docker/services/pacemaker/database/mysql.yaml
index 3fb38349..a9e49b28 100644
--- a/docker/services/pacemaker/database/mysql.yaml
+++ b/docker/services/pacemaker/database/mysql.yaml
@@ -43,6 +43,14 @@ parameters:
default: {}
description: Parameters specific to the role
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
resources:
@@ -59,6 +67,10 @@ resources:
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
outputs:
role_data:
description: Containerized service MySQL using composable services.
@@ -79,6 +91,13 @@ outputs:
- 4567
- 4568
- 9200
+ -
+ if:
+ - internal_tls_enabled
+ -
+ tripleo::profile::pacemaker::database::mysql_bundle::ca_file:
+ get_param: InternalTLSCAFile
+ - {}
step_config: ""
# BEGIN DOCKER SETTINGS #
puppet_config:
@@ -103,6 +122,20 @@ outputs:
dest: "/"
merge: true
preserve_properties: true
+ - source: "/var/lib/kolla/config_files/src-tls/*"
+ dest: "/"
+ merge: true
+ optional: true
+ preserve_properties: true
+ permissions:
+ - path: /etc/pki/tls/certs/mysql.crt
+ owner: mysql:mysql
+ perm: '0600'
+ optional: true
+ - path: /etc/pki/tls/private/mysql.key
+ owner: mysql:mysql
+ perm: '0600'
+ optional: true
docker_config:
step_1:
mysql_data_ownership:
@@ -195,6 +228,8 @@ outputs:
file:
path: /var/lib/mysql
state: directory
+ metadata_settings:
+ get_attr: [MysqlPuppetBase, role_data, metadata_settings]
upgrade_tasks:
- name: get bootstrap nodeid
tags: common