diff options
143 files changed, 2569 insertions, 2187 deletions
@@ -66,7 +66,7 @@ and should be executed according to the following table: +================+=============+=============+=============+=============+=================+ | keystone | X | X | X | X | X | +----------------+-------------+-------------+-------------+-------------+-----------------+ -| glance | file | swift | file | file | swift | +| glance | rbd | swift | file | swift + rbd | swift | +----------------+-------------+-------------+-------------+-------------+-----------------+ | cinder | rbd | iscsi | | | iscsi | +----------------+-------------+-------------+-------------+-------------+-----------------+ diff --git a/all-nodes-validation.yaml b/all-nodes-validation.yaml index 65d01d0f..eea3e40a 100644 --- a/all-nodes-validation.yaml +++ b/all-nodes-validation.yaml @@ -10,6 +10,10 @@ parameters: default: '' description: A string containing a space separated list of IP addresses used to ping test each available network interface. type: string + ValidateFqdn: + default: false + description: Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts. + type: boolean resources: AllNodesValidationsImpl: @@ -19,6 +23,8 @@ resources: inputs: - name: ping_test_ips default: {get_param: PingTestIps} + - name: validate_fqdn + default: {get_param: ValidateFqdn} config: {get_file: ./validation-scripts/all-nodes.sh} outputs: diff --git a/bindep.txt b/bindep.txt new file mode 100644 index 00000000..4f9b4254 --- /dev/null +++ b/bindep.txt @@ -0,0 +1,2 @@ +# This is a cross-platform list tracking distribution packages needed by tests; +# see http://docs.openstack.org/infra/bindep/ for additional information. diff --git a/capabilities-map.yaml b/capabilities-map.yaml index cc22ff92..83b3ac40 100644 --- a/capabilities-map.yaml +++ b/capabilities-map.yaml @@ -308,6 +308,11 @@ topics: description: > Enable various Neutron plugins and backends environments: + - file: environments/neutron-bgpvpn.yaml + title: Neutron BGPVPN Service Plugin + description: Enables Neutron BGPVPN Service Plugin + requires: + - overcloud-resource-registry-puppet.yaml - file: environments/neutron-ml2-bigswitch.yaml title: BigSwitch Extensions description: > @@ -335,21 +340,11 @@ topics: description: Enables Neutron Nuage backend on the controller requires: - overcloud-resource-registry-puppet.yaml - - file: environments/neutron-opencontrail.yaml - title: OpenContrail Extensions - description: Enables OpenContrail extensions - requires: - - overcloud-resource-registry-puppet.yaml - file: environments/neutron-opendaylight.yaml title: OpenDaylight description: Enables OpenDaylight requires: - overcloud-resource-registry-puppet.yaml - - file: environments/neutron-opendaylight-l3.yaml - title: OpenDaylight with L3 DVR - description: Enables OpenDaylight with L3 DVR - requires: - - overcloud-resource-registry-puppet.yaml - file: environments/neutron-ovs-dpdk.yaml title: DPDK with OVS description: Deploy DPDK with OVS @@ -544,14 +539,6 @@ topics: description: requires: - overcloud-resource-registry-puppet.yaml - - title: Manage Firewall - description: - environments: - - file: environments/manage-firewall.yaml - title: Manage Firewall - description: - requires: - - overcloud-resource-registry-puppet.yaml - title: Operational Tools description: @@ -600,3 +587,8 @@ topics: description: requires: - overcloud-resource-registry-puppet.yaml + - title: Keystone CADF auditing + description: Enable CADF notifications in Keystone for auditing + environments: + - file: environments/cadf.yaml + title: Keystone CADF auditing diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py index d9496af6..0f079436 100755 --- a/docker/docker-puppet.py +++ b/docker/docker-puppet.py @@ -102,6 +102,9 @@ for service in (json_data or []): config_image = service[3] or '' volumes = service[4] if len(service) > 4 else [] + if not manifest or not config_image: + continue + print('---------') print('config_volume %s' % config_volume) print('puppet_tags %s' % puppet_tags) @@ -207,6 +210,11 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume dcmd.extend(['--entrypoint', sh_script]) env = {} + # NOTE(flaper87): Always copy the DOCKER_* environment variables as + # they contain the access data for the docker daemon. + for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()): + env[k] = os.environ.get(k) + if os.environ.get('NET_HOST', 'false') == 'true': print('NET_HOST enabled') dcmd.extend(['--net', 'host', '--volume', diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2 new file mode 100644 index 00000000..301d838f --- /dev/null +++ b/docker/docker-steps.j2 @@ -0,0 +1,350 @@ +# certain initialization steps (run in a container) will occur +# on the first role listed in the roles file +{% set primary_role_name = roles[0].name -%} + +heat_template_version: ocata + +description: > + Post-deploy configuration steps via puppet for all roles, + as defined in ../roles_data.yaml + +parameters: + servers: + type: json + description: Mapping of Role name e.g Controller to a list of servers + role_data: + type: json + description: Mapping of Role name e.g Controller to the per-role data + DeployIdentifier: + default: '' + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + # These utility tasks use docker-puppet.py to execute tasks via puppet + # We only execute these on the first node in the primary role + {{primary_role_name}}DockerPuppetTasks: + type: OS::Heat::Value + properties: + type: json + value: + yaql: + expression: + dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1])) + data: + docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]} + +# BEGIN primary_role_name docker-puppet-tasks (run only on a single node) +{% for step in range(1, 6) %} + + {{primary_role_name}}DockerPuppetJsonConfig{{step}}: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json: + {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']} + + {{primary_role_name}}DockerPuppetJsonDeployment{{step}}: + type: OS::Heat::SoftwareDeployment + properties: + server: {get_param: [servers, {{primary_role_name}}, '0']} + config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}} + + {{primary_role_name}}DockerPuppetTasksConfig{{step}}: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: docker-puppet.py} + inputs: + - name: CONFIG + - name: NET_HOST + - name: NO_ARCHIVE + - name: STEP + + {{primary_role_name}}DockerPuppetTasksDeployment{{step}}: + type: OS::Heat::SoftwareDeployment + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step{{step}} + - {{dep.name}}ContainersDeployment_Step{{step}} + {% endfor %} + - {{primary_role_name}}DockerPuppetJsonDeployment{{step}} + properties: + name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}} + server: {get_param: [servers, {{primary_role_name}}, '0']} + config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}} + input_values: + CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json + NET_HOST: 'true' + NO_ARCHIVE: 'true' + STEP: {{step}} + +{% endfor %} +# END primary_role_name docker-puppet-tasks + +{% for role in roles %} + # Post deployment steps for all roles + # A single config is re-applied with an incrementing step number + # {{role.name}} Role steps + {{role.name}}ArtifactsConfig: + type: ../puppet/deploy-artifacts.yaml + + {{role.name}}ArtifactsDeploy: + type: OS::Heat::StructuredDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}ArtifactsConfig} + + {{role.name}}PreConfig: + type: OS::TripleO::Tasks::{{role.name}}PreConfig + properties: + servers: {get_param: [servers, {{role.name}}]} + input_values: + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}CreateConfigDir: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: create-config-dir.sh} + + {{role.name}}CreateConfigDirDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}CreateConfigDir} + + {{role.name}}HostPrepAnsible: + type: OS::Heat::Value + properties: + value: + str_replace: + template: CONFIG + params: + CONFIG: + - hosts: localhost + connection: local + tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]} + + {{role.name}}HostPrepConfig: + type: OS::Heat::SoftwareConfig + properties: + group: ansible + options: + modulepath: /usr/share/ansible-modules + config: {get_attr: [{{role.name}}HostPrepAnsible, value]} + + {{role.name}}HostPrepDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}HostPrepConfig} + + # this creates a JSON config file for our docker-puppet.py script + {{role.name}}GenPuppetConfig: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + /var/lib/docker-puppet/docker-puppet.json: + {get_param: [role_data, {{role.name}}, puppet_config]} + + {{role.name}}GenPuppetDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}GenPuppetConfig} + + {{role.name}}GenerateConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: docker-puppet.py} + + {{role.name}}GenerateConfigDeployment: + type: OS::Heat::SoftwareDeploymentGroup + depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment, {{role.name}}HostPrepDeployment] + properties: + name: {{role.name}}GenerateConfigDeployment + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}GenerateConfig} + + {{role.name}}PuppetStepConfig: + type: OS::Heat::Value + properties: + type: string + value: + yaql: + expression: + # select 'step_config' only from services that do not have a docker_config + $.data.service_names.zip($.data.step_config, $.data.docker_config).where($[2] = null).where($[1] != null).select($[1]).join("\n") + data: + service_names: {get_param: [role_data, {{role.name}}, service_names]} + step_config: {get_param: [role_data, {{role.name}}, step_config]} + docker_config: {get_param: [role_data, {{role.name}}, docker_config]} + + {{role.name}}DockerConfig: + type: OS::Heat::Value + properties: + type: json + value: + yaql: + expression: + # select 'docker_config' only from services that have it + $.data.service_names.zip($.data.docker_config).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {}) + data: + service_names: {get_param: [role_data, {{role.name}}, service_names]} + docker_config: {get_param: [role_data, {{role.name}}, docker_config]} + + # Here we are dumping all the docker container startup configuration data + # so that we can have access to how they are started outside of heat + # and docker-cmd. This lets us create command line tools to start and + # test these containers. + {{role.name}}DockerConfigJsonStartupData: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + /var/lib/docker-container-startup-configs.json: + {get_attr: [{{role.name}}DockerConfig, value]} + + {{role.name}}DockerConfigJsonStartupDataDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + config: {get_resource: {{role.name}}DockerConfigJsonStartupData} + servers: {get_param: [servers, {{role.name}}]} + + {{role.name}}KollaJsonConfig: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + {get_param: [role_data, {{role.name}}, kolla_config]} + + {{role.name}}KollaJsonDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + name: {{role.name}}KollaJsonDeployment + config: {get_resource: {{role.name}}KollaJsonConfig} + servers: {get_param: [servers, {{role.name}}]} + + # BEGIN BAREMETAL CONFIG STEPS + + {% if role.name == 'Controller' %} + ControllerPrePuppet: + type: OS::TripleO::Tasks::ControllerPrePuppet + properties: + servers: {get_param: [servers, Controller]} + input_values: + update_identifier: {get_param: DeployIdentifier} + {% endif %} + + {{role.name}}Config: + type: OS::TripleO::{{role.name}}Config + properties: + StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]} + + {% for step in range(1, 6) %} + + {{role.name}}Deployment_Step{{step}}: + type: OS::Heat::StructuredDeploymentGroup + {% if step == 1 %} + depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] + {% else %} + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step{{step -1}} + - {{dep.name}}ContainersDeployment_Step{{step -1}} + {% endfor %} + - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}} + {% endif %} + properties: + name: {{role.name}}Deployment_Step{{step}} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}Config} + input_values: + step: {{step}} + update_identifier: {get_param: DeployIdentifier} + + {% endfor %} + # END BAREMETAL CONFIG STEPS + + # BEGIN CONTAINER CONFIG STEPS + {% for step in range(1, 6) %} + + {{role.name}}ContainersConfig_Step{{step}}: + type: OS::Heat::StructuredConfig + properties: + group: docker-cmd + config: + {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]} + + {{role.name}}ContainersDeployment_Step{{step}}: + type: OS::Heat::StructuredDeploymentGroup + {% if step == 1 %} + depends_on: + - {{role.name}}PreConfig + - {{role.name}}KollaJsonDeployment + - {{role.name}}GenPuppetDeployment + - {{role.name}}GenerateConfigDeployment + {% else %} + depends_on: + {% for dep in roles %} + - {{dep.name}}ContainersDeployment_Step{{step -1}} + - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first + - {{dep.name}}Deployment_Step{{step -1}} + {% endfor %} + - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}} + {% endif %} + properties: + name: {{role.name}}ContainersDeployment_Step{{step}} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}} + + {% endfor %} + # END CONTAINER CONFIG STEPS + + {{role.name}}PostConfig: + type: OS::TripleO::Tasks::{{role.name}}PostConfig + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step5 + - {{primary_role_name}}DockerPuppetTasksDeployment5 + {% endfor %} + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: DeployIdentifier} + + # Note, this should come last, so use depends_on to ensure + # this is created after any other resources. + {{role.name}}ExtraConfigPost: + depends_on: + {% for dep in roles %} + - {{dep.name}}PostConfig + {% endfor %} + type: OS::TripleO::NodeExtraConfigPost + properties: + servers: {get_param: [servers, {{role.name}}]} + + {% if role.name == 'Controller' %} + ControllerPostPuppet: + depends_on: + - ControllerExtraConfigPost + type: OS::TripleO::Tasks::ControllerPostPuppet + properties: + servers: {get_param: [servers, Controller]} + input_values: + update_identifier: {get_param: DeployIdentifier} + {% endif %} + +{% endfor %} diff --git a/docker/firstboot/setup_docker_host.sh b/docker/firstboot/setup_docker_host.sh index b2287e91..8b4c6a03 100755 --- a/docker/firstboot/setup_docker_host.sh +++ b/docker/firstboot/setup_docker_host.sh @@ -1,26 +1,8 @@ #!/bin/bash set -eux -# TODO This would be better in puppet +# This file contains setup steps that can't be or have not yet been moved to +# puppet -# TODO remove this when built image includes docker -if [ ! -f "/usr/bin/docker" ]; then - yum -y install docker -fi - -# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is -# a place holder for text replacement done via heat -if [ "$docker_namespace_is_registry" = "True" ]; then - /usr/bin/systemctl stop docker.service - # if namespace is used with local registry, trim all namespacing - trim_var=$docker_registry - registry_host="${trim_var%%/*}" - /bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker -fi - -# enable and start docker -/usr/bin/systemctl enable docker.service -/usr/bin/systemctl start docker.service - -# Disable libvirtd +# Disable libvirtd since it conflicts with nova_libvirt container /usr/bin/systemctl disable libvirtd.service /usr/bin/systemctl stop libvirtd.service diff --git a/docker/post-upgrade.j2.yaml b/docker/post-upgrade.j2.yaml new file mode 100644 index 00000000..4477f868 --- /dev/null +++ b/docker/post-upgrade.j2.yaml @@ -0,0 +1,4 @@ +# Note the include here is the same as post.j2.yaml but the data used at +# # the time of rendering is different if any roles disable upgrades +{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%} +{% include 'docker-steps.j2' %} diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml index 65d0c4ee..fd956215 100644 --- a/docker/post.j2.yaml +++ b/docker/post.j2.yaml @@ -1,334 +1 @@ -# certain initialization steps (run in a container) will occur -# on the first role listed in the roles file -{% set primary_role_name = roles[0].name -%} - -heat_template_version: ocata - -description: > - Post-deploy configuration steps via puppet for all roles, - as defined in ../roles_data.yaml - -parameters: - servers: - type: json - description: Mapping of Role name e.g Controller to a list of servers - role_data: - type: json - description: Mapping of Role name e.g Controller to the per-role data - DeployIdentifier: - default: '' - type: string - description: > - Setting this to a unique value will re-run any deployment tasks which - perform configuration on a Heat stack-update. - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - # These utility tasks use docker-puppet.py to execute tasks via puppet - # We only execute these on the first node in the primary role - {{primary_role_name}}DockerPuppetTasks: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: - dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1])) - data: - docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]} - -# BEGIN primary_role_name docker-puppet-tasks (run only on a single node) -{% for step in range(1, 6) %} - - {{primary_role_name}}DockerPuppetJsonConfig{{step}}: - type: OS::Heat::StructuredConfig - properties: - group: json-file - config: - /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json: - {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']} - - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}: - type: OS::Heat::SoftwareDeployment - properties: - server: {get_param: [servers, {{primary_role_name}}, '0']} - config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}} - - {{primary_role_name}}DockerPuppetTasksConfig{{step}}: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: docker-puppet.py} - inputs: - - name: CONFIG - - name: NET_HOST - - name: NO_ARCHIVE - - name: STEP - - {{primary_role_name}}DockerPuppetTasksDeployment{{step}}: - type: OS::Heat::SoftwareDeployment - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step{{step}} - - {{dep.name}}ContainersDeployment_Step{{step}} - {% endfor %} - - {{primary_role_name}}DockerPuppetJsonDeployment{{step}} - properties: - name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}} - server: {get_param: [servers, {{primary_role_name}}, '0']} - config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}} - input_values: - CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json - NET_HOST: 'true' - NO_ARCHIVE: 'true' - STEP: {{step}} - -{% endfor %} -# END primary_role_name docker-puppet-tasks - -{% for role in roles %} - # Post deployment steps for all roles - # A single config is re-applied with an incrementing step number - # {{role.name}} Role steps - {{role.name}}ArtifactsConfig: - type: ../puppet/deploy-artifacts.yaml - - {{role.name}}ArtifactsDeploy: - type: OS::Heat::StructuredDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ArtifactsConfig} - - {{role.name}}PreConfig: - type: OS::TripleO::Tasks::{{role.name}}PreConfig - properties: - servers: {get_param: [servers, {{role.name}}]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}CreateConfigDir: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: create-config-dir.sh} - - {{role.name}}CreateConfigDirDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}CreateConfigDir} - - # this creates a JSON config file for our docker-puppet.py script - {{role.name}}GenPuppetConfig: - type: OS::Heat::StructuredConfig - properties: - group: json-file - config: - /var/lib/docker-puppet/docker-puppet.json: - yaql: - # select only services that have a non-null config_image with - # a step_config as well - expression: - $.data.config_volume.zip($.data.puppet_tags, $.data.step_config, $.data.config_image).where($[3] != null and $[1] != null) - data: - config_volume: {get_param: [role_data, {{role.name}}, config_volume]} - step_config: {get_param: [role_data, {{role.name}}, step_config]} - puppet_tags: {get_param: [role_data, {{role.name}}, puppet_tags]} - config_image: {get_param: [role_data, {{role.name}}, config_image]} - - {{role.name}}GenPuppetDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}GenPuppetConfig} - - {{role.name}}GenerateConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: docker-puppet.py} - - {{role.name}}GenerateConfigDeployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment] - properties: - name: {{role.name}}GenerateConfigDeployment - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}GenerateConfig} - - {{role.name}}PuppetStepConfig: - type: OS::Heat::Value - properties: - type: string - value: - yaql: - expression: - # select 'step_config' only from services that do not have a docker_image - $.data.service_names.zip($.data.step_config, $.data.docker_image).where($[2] = null).where($[1] != null).select($[1]).join("\n") - data: - service_names: {get_param: [role_data, {{role.name}}, service_names]} - step_config: {get_param: [role_data, {{role.name}}, step_config]} - docker_image: {get_param: [role_data, {{role.name}}, docker_image]} - - {{role.name}}DockerConfig: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: - # select 'docker_config' only from services that have a docker_image - $.data.service_names.zip($.data.docker_config, $.data.docker_image).where($[2] != null).select($[1]).reduce($1.mergeWith($2), {}) - data: - service_names: {get_param: [role_data, {{role.name}}, service_names]} - docker_config: {get_param: [role_data, {{role.name}}, docker_config]} - docker_image: {get_param: [role_data, {{role.name}}, docker_image]} - - # Here we are dumping all the docker container startup configuration data - # so that we can have access to how they are started outside of heat - # and docker-cmd. This lets us create command line tools to start and - # test these containers. - {{role.name}}DockerConfigJsonStartupData: - type: OS::Heat::StructuredConfig - properties: - group: json-file - config: - /var/lib/docker-container-startup-configs.json: - {get_attr: [{{role.name}}DockerConfig, value]} - - {{role.name}}DockerConfigJsonStartupDataDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - config: {get_resource: {{role.name}}DockerConfigJsonStartupData} - servers: {get_param: [servers, {{role.name}}]} - - {{role.name}}KollaJsonConfig: - type: OS::Heat::StructuredConfig - properties: - group: json-file - config: - {get_param: [role_data, {{role.name}}, kolla_config]} - - {{role.name}}KollaJsonDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - name: {{role.name}}KollaJsonDeployment - config: {get_resource: {{role.name}}KollaJsonConfig} - servers: {get_param: [servers, {{role.name}}]} - - # BEGIN BAREMETAL CONFIG STEPS - - {% if role.name == 'Controller' %} - ControllerPrePuppet: - type: OS::TripleO::Tasks::ControllerPrePuppet - properties: - servers: {get_param: [servers, Controller]} - input_values: - update_identifier: {get_param: DeployIdentifier} - {% endif %} - - {{role.name}}Config: - type: OS::TripleO::{{role.name}}Config - properties: - StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]} - - {% for step in range(1, 6) %} - - {{role.name}}Deployment_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - {% if step == 1 %} - depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] - {% else %} - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step{{step -1}} - - {{dep.name}}ContainersDeployment_Step{{step -1}} - {% endfor %} - - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}} - {% endif %} - properties: - name: {{role.name}}Deployment_Step{{step}} - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: {{step}} - update_identifier: {get_param: DeployIdentifier} - - {% endfor %} - # END BAREMETAL CONFIG STEPS - - # BEGIN CONTAINER CONFIG STEPS - {% for step in range(1, 6) %} - - {{role.name}}ContainersConfig_Step{{step}}: - type: OS::Heat::StructuredConfig - properties: - group: docker-cmd - config: - {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]} - - {{role.name}}ContainersDeployment_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - {% if step == 1 %} - depends_on: - - {{role.name}}PreConfig - - {{role.name}}KollaJsonDeployment - - {{role.name}}GenPuppetDeployment - - {{role.name}}GenerateConfigDeployment - {% else %} - depends_on: - {% for dep in roles %} - - {{dep.name}}ContainersDeployment_Step{{step -1}} - - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first - - {{dep.name}}Deployment_Step{{step -1}} - {% endfor %} - - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}} - {% endif %} - properties: - name: {{role.name}}ContainersDeployment_Step{{step}} - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}} - - {% endfor %} - # END CONTAINER CONFIG STEPS - - {{role.name}}PostConfig: - type: OS::TripleO::Tasks::{{role.name}}PostConfig - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step5 - - {{primary_role_name}}DockerPuppetTasksDeployment5 - {% endfor %} - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - # Note, this should come last, so use depends_on to ensure - # this is created after any other resources. - {{role.name}}ExtraConfigPost: - depends_on: - {% for dep in roles %} - - {{dep.name}}PostConfig - {% endfor %} - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, {{role.name}}]} - - {% if role.name == 'Controller' %} - ControllerPostPuppet: - depends_on: - - ControllerExtraConfigPost - type: OS::TripleO::Tasks::ControllerPostPuppet - properties: - servers: {get_param: [servers, Controller]} - input_values: - update_identifier: {get_param: DeployIdentifier} - {% endif %} - -{% endfor %} +{% include 'docker-steps.j2' %} diff --git a/docker/services/README.rst b/docker/services/README.rst index 881a2a37..219f35eb 100644 --- a/docker/services/README.rst +++ b/docker/services/README.rst @@ -58,27 +58,34 @@ are re-asserted when applying latter ones. the container itself at the /var/lib/kolla/config_files/config.json location and drives how kolla's external config mechanisms work. - * docker_image: The full name of the docker image that will be used. - * docker_config: Data that is passed to the docker-cmd hook to configure a container, or step of containers at each step. See the available steps below and the related docker-cmd hook documentation in the heat-agents project. - * puppet_tags: Puppet resource tag names that are used to generate config - files with puppet. Only the named config resources are used to generate - a config file. Any service that specifies tags will have the default - tags of 'file,concat,file_line' appended to the setting. - Example: keystone_config - - * config_volume: The name of the volume (directory) where config files - will be generated for this service. Use this as the location to - bind mount into the running Kolla container for configuration. - - * config_image: The name of the docker image that will be used for - generating configuration files. This is often the same value as - 'docker_image' above but some containers share a common set of - config files which are generated in a common base container. + * puppet_config: This section is a nested set of key value pairs + that drive the creation of config files using puppet. + Required parameters include: + + * puppet_tags: Puppet resource tag names that are used to generate config + files with puppet. Only the named config resources are used to generate + a config file. Any service that specifies tags will have the default + tags of 'file,concat,file_line' appended to the setting. + Example: keystone_config + + * config_volume: The name of the volume (directory) where config files + will be generated for this service. Use this as the location to + bind mount into the running Kolla container for configuration. + + * config_image: The name of the docker image that will be used for + generating configuration files. This is often the same container + that the runtime service uses. Some services share a common set of + config files which are generated in a common base container. + + * step_config: This setting controls the manifest that is used to + create docker config files via puppet. The puppet tags below are + used along with this manifest to generate a config directory for + this container. * docker_puppet_tasks: This section provides data to drive the docker-puppet.py tool directly. The task is executed only once diff --git a/docker/services/aodh-api.yaml b/docker/services/aodh-api.yaml new file mode 100644 index 00000000..ca410d6d --- /dev/null +++ b/docker/services/aodh-api.yaml @@ -0,0 +1,123 @@ +heat_template_version: ocata + +description: > + OpenStack containerized aodh service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerAodhApiImage: + description: image + default: 'centos-binary-aodh-api:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + AodhApiPuppetBase: + type: ../../puppet/services/aodh-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the aodh API role. + value: + service_name: {get_attr: [AodhApiPuppetBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [AodhApiPuppetBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [AodhApiPuppetBase, role_data, step_config] + service_config_settings: {get_attr: [AodhApiPuppetBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + puppet_config: + config_volume: aodh + puppet_tags: aodh_api_paste_ini,aodh_config + step_config: *step_config + config_image: &aodh_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ] + kolla_config: + /var/lib/kolla/config_files/aodh-api.json: + command: /usr/sbin/httpd -DFOREGROUND + config_files: + - dest: /etc/aodh/aodh.conf + owner: aodh + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf + - dest: /etc/httpd/conf.d/10-aodh_wsgi.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-aodh_wsgi.conf + - dest: /etc/httpd/conf/httpd.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf + - dest: /etc/httpd/conf/ports.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf + - dest: /var/www/cgi-bin/aodh/app + owner: aodh + perm: '0644' + source: /var/lib/kolla/config_files/src/var/www/cgi-bin/aodh/app + docker_config: + step_3: + aodh-init-log: + start_order: 0 + image: *aodh_image + user: root + command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/aodh && chown aodh:aodh /var/log/aodh'] + volumes: + - logs:/var/log + aodh_db_sync: + start_order: 1 + image: *aodh_image + net: host + privileged: false + detach: false + volumes: + - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - logs:/var/log + command: /usr/bin/aodh-dbsync + step_4: + aodh-api: + image: *aodh_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/aodh-api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/aodh/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - logs:/var/log + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable aodh service (running under httpd) + tags: step2 + service: name=httpd state=stopped enabled=no diff --git a/docker/services/aodh-evaluator.yaml b/docker/services/aodh-evaluator.yaml new file mode 100644 index 00000000..d3c8c595 --- /dev/null +++ b/docker/services/aodh-evaluator.yaml @@ -0,0 +1,84 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Aodh Evaluator service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerAodhEvaluatorImage: + description: image + default: 'centos-binary-aodh-evaluator:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + AodhEvaluatorBase: + type: ../../puppet/services/aodh-evaluator.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Aodh API role. + value: + service_name: {get_attr: [AodhEvaluatorBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [AodhEvaluatorBase, role_data, config_settings] + step_config: &step_config + get_attr: [AodhEvaluatorBase, role_data, step_config] + service_config_settings: {get_attr: [AodhEvaluatorBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + puppet_config: + config_volume: aodh + puppet_tags: aodh_config + step_config: *step_config + config_image: &aodh_evaluator_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ] + kolla_config: + /var/lib/kolla/config_files/aodh-evaluator.json: + command: /usr/bin/aodh-evaluator + config_files: + - dest: /etc/aodh/aodh.conf + owner: aodh + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf + docker_config: + step_4: + aodh_evaluator: + image: *aodh_evaluator_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/aodh-evaluator.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable openstack-aodh-evaluator service + tags: step2 + service: name=openstack-aodh-evaluator.service state=stopped enabled=no diff --git a/docker/services/aodh-listener.yaml b/docker/services/aodh-listener.yaml new file mode 100644 index 00000000..7aa9618d --- /dev/null +++ b/docker/services/aodh-listener.yaml @@ -0,0 +1,84 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Aodh Listener service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerAodhListenerImage: + description: image + default: 'centos-binary-aodh-listener:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + AodhListenerBase: + type: ../../puppet/services/aodh-listener.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Aodh API role. + value: + service_name: {get_attr: [AodhListenerBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [AodhListenerBase, role_data, config_settings] + step_config: &step_config + get_attr: [AodhListenerBase, role_data, step_config] + service_config_settings: {get_attr: [AodhListenerBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + puppet_config: + config_volume: aodh + puppet_tags: aodh_config + step_config: *step_config + config_image: &aodh_listener_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ] + kolla_config: + /var/lib/kolla/config_files/aodh-listener.json: + command: /usr/bin/aodh-listener + config_files: + - dest: /etc/aodh/aodh.conf + owner: aodh + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf + docker_config: + step_4: + aodh_listener: + image: *aodh_listener_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/aodh-listener.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable openstack-aodh-listener service + tags: step2 + service: name=openstack-aodh-listener.service state=stopped enabled=no diff --git a/docker/services/aodh-notifier.yaml b/docker/services/aodh-notifier.yaml new file mode 100644 index 00000000..f525d6bd --- /dev/null +++ b/docker/services/aodh-notifier.yaml @@ -0,0 +1,84 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Aodh Notifier service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerAodhNotifierImage: + description: image + default: 'centos-binary-aodh-notifier:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + AodhNotifierBase: + type: ../../puppet/services/aodh-notifier.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Aodh API role. + value: + service_name: {get_attr: [AodhNotifierBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [AodhNotifierBase, role_data, config_settings] + step_config: &step_config + get_attr: [AodhNotifierBase, role_data, step_config] + service_config_settings: {get_attr: [AodhNotifierBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + puppet_config: + config_volume: aodh + puppet_tags: aodh_config + step_config: *step_config + config_image: &aodh_notifier_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ] + kolla_config: + /var/lib/kolla/config_files/aodh-notifier.json: + command: /usr/bin/aodh-notifier + config_files: + - dest: /etc/aodh/aodh.conf + owner: aodh + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf + docker_config: + step_4: + aodh_notifier: + image: *aodh_notifier_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/aodh-notifier.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable openstack-aodh-notifier service + tags: step2 + service: name=openstack-aodh-notifier.service state=stopped enabled=no diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml index 127f8839..68a64a7d 100644 --- a/docker/services/database/mongodb.yaml +++ b/docker/services/database/mongodb.yaml @@ -45,20 +45,20 @@ outputs: map_merge: - get_attr: [MongodbPuppetBase, role_data, config_settings] - mongodb::server::fork: false - step_config: + step_config: &step_config list_join: - "\n" - - "['Mongodb_database', 'Mongodb_user', 'Mongodb_replset'].each |String $val| { noop_resource($val) }" - {get_attr: [MongodbPuppetBase, role_data, step_config]} - upgrade_tasks: {get_attr: [MongodbPuppetBase, role_data, upgrade_tasks]} # BEGIN DOCKER SETTINGS # - docker_image: &mongodb_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ] - puppet_tags: file # set this even though file is the default - config_volume: mongodb - config_image: *mongodb_image + puppet_config: + config_volume: mongodb + puppet_tags: file # set this even though file is the default + step_config: *step_config + config_image: &mongodb_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ] kolla_config: /var/lib/kolla/config_files/mongodb.json: command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run @@ -98,3 +98,7 @@ outputs: volumes: - "mongodb:/var/lib/mongodb/" - "logs:/var/log/kolla:ro" + upgrade_tasks: + - name: Stop and disable mongodb service + tags: step2 + service: name=mongod state=stopped enabled=no diff --git a/docker/services/database/mysql.yaml b/docker/services/database/mysql.yaml index 5809396e..531c1ebd 100644 --- a/docker/services/database/mysql.yaml +++ b/docker/services/database/mysql.yaml @@ -54,20 +54,20 @@ outputs: pid-file: /var/lib/mysql/mariadb.pid mysqld_safe: pid-file: /var/lib/mysql/mariadb.pid - step_config: + step_config: &step_config list_join: - "\n" - - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }" - {get_attr: [MysqlPuppetBase, role_data, step_config]} - upgrade_tasks: {get_attr: [MysqlPuppetBase, role_data, upgrade_tasks]} # BEGIN DOCKER SETTINGS # - docker_image: &mysql_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ] - puppet_tags: file # set this even though file is the default - config_volume: mysql - config_image: *mysql_image + puppet_config: + config_volume: mysql + puppet_tags: file # set this even though file is the default + step_config: *step_config + config_image: &mysql_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ] kolla_config: /var/lib/kolla/config_files/mysql.json: command: /usr/bin/mysqld_safe @@ -82,17 +82,29 @@ outputs: perm: '0644' docker_config: step_2: - mysql_bootstrap: + mysql_data_ownership: start_order: 0 detach: false image: *mysql_image net: host + user: root + # Kolla does only non-recursive chown + command: ['chown', '-R', 'mysql:', '/var/lib/mysql'] + volumes: + - /var/lib/mysql:/var/lib/mysql + mysql_bootstrap: + start_order: 1 + detach: false + image: *mysql_image + net: host + # Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done + command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start'] volumes: &mysql_volumes - /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json - /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro - /etc/localtime:/etc/localtime:ro - /etc/hosts:/etc/hosts:ro - - mariadb:/var/lib/mysql/ + - /var/lib/mysql:/var/lib/mysql environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS - KOLLA_BOOTSTRAP=True @@ -110,7 +122,7 @@ outputs: - {get_param: MysqlRootPassword} - {get_param: [DefaultPasswords, mysql_root_password]} mysql: - start_order: 1 + start_order: 2 image: *mysql_image restart: always net: host @@ -128,5 +140,14 @@ outputs: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ] volumes: - - "mariadb:/var/lib/mysql/:ro" + - "/var/lib/mysql:/var/lib/mysql/:ro" - "/var/lib/config-data/mysql/root:/root:ro" #provides .my.cnf + host_prep_tasks: + - name: create /var/lib/mysql + file: + path: /var/lib/mysql + state: directory + upgrade_tasks: + - name: Stop and disable mysql service + tags: step2 + service: name=mariadb state=stopped enabled=no diff --git a/docker/services/glance-api.yaml b/docker/services/glance-api.yaml index b8ab9622..77e4aa01 100644 --- a/docker/services/glance-api.yaml +++ b/docker/services/glance-api.yaml @@ -45,16 +45,18 @@ outputs: map_merge: - get_attr: [GlanceApiPuppetBase, role_data, config_settings] - glance::api::sync_db: false - step_config: {get_attr: [GlanceApiPuppetBase, role_data, step_config]} + step_config: &step_config + get_attr: [GlanceApiPuppetBase, role_data, step_config] service_config_settings: {get_attr: [GlanceApiPuppetBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS # - docker_image: &glance_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ] - puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config - config_volume: glance_api - config_image: *glance_image + puppet_config: + config_volume: glance_api + puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config + step_config: *step_config + config_image: &glance_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ] kolla_config: /var/lib/kolla/config_files/glance-api.json: command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf @@ -94,3 +96,7 @@ outputs: volumes: *glance_volumes environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable glance_api service + tags: step2 + service: name=openstack-glance-api state=stopped enabled=no diff --git a/docker/services/gnocchi-api.yaml b/docker/services/gnocchi-api.yaml new file mode 100644 index 00000000..a64d1507 --- /dev/null +++ b/docker/services/gnocchi-api.yaml @@ -0,0 +1,118 @@ +heat_template_version: ocata + +description: > + OpenStack containerized gnocchi service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerGnocchiApiImage: + description: image + default: 'centos-binary-gnocchi-api:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + GnocchiApiPuppetBase: + type: ../../puppet/services/gnocchi-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the gnocchi API role. + value: + service_name: {get_attr: [GnocchiApiPuppetBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [GnocchiApiPuppetBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [GnocchiApiPuppetBase, role_data, step_config] + service_config_settings: {get_attr: [GnocchiApiPuppetBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + puppet_config: + config_volume: gnocchi + puppet_tags: gnocchi_api_paste_ini,gnocchi_config + step_config: *step_config + config_image: &gnocchi_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ] + kolla_config: + /var/lib/kolla/config_files/gnocchi-api.json: + command: /usr/sbin/httpd -DFOREGROUND + config_files: + - dest: /etc/gnocchi/gnocchi.conf + owner: gnocchi + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/gnocchi/gnocchi.conf + - dest: /etc/httpd/conf.d/10-gnocchi_wsgi.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-gnocchi_wsgi.conf + - dest: /etc/httpd/conf/httpd.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf + - dest: /etc/httpd/conf/ports.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf + - dest: /var/www/cgi-bin/gnocchi/app + owner: gnocchi + perm: '0644' + source: /var/lib/kolla/config_files/src/var/www/cgi-bin/gnocchi/app + docker_config: + step_3: + gnocchi-init-log: + start_order: 0 + image: *gnocchi_image + user: root + command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/gnocchi && chown gnocchi:gnocchi /var/log/gnocchi'] + volumes: + - logs:/var/log + gnocchi_db_sync: + start_order: 1 + image: *gnocchi_image + net: host + detach: false + privileged: false + volumes: + - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - logs:/var/log + command: ["/usr/bin/gnocchi-upgrade", "--skip-storage"] + step_4: + gnocchi-api: + image: *gnocchi_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/gnocchi-api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/gnocchi/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/gnocchi/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS diff --git a/docker/services/gnocchi-metricd.yaml b/docker/services/gnocchi-metricd.yaml new file mode 100644 index 00000000..6437e942 --- /dev/null +++ b/docker/services/gnocchi-metricd.yaml @@ -0,0 +1,78 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Gnocchi Metricd service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerGnocchiMetricdImage: + description: image + default: 'centos-binary-gnocchi-metricd:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + GnocchiMetricdBase: + type: ../../puppet/services/gnocchi-metricd.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Gnocchi API role. + value: + service_name: {get_attr: [GnocchiMetricdBase, role_data, service_name]} + config_settings: {get_attr: [GnocchiMetricdBase, role_data, config_settings]} + step_config: &step_config + get_attr: [GnocchiMetricdBase, role_data, step_config] + service_config_settings: {get_attr: [GnocchiMetricdBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + puppet_config: + config_volume: gnocchi + puppet_tags: gnocchi_config + step_config: *step_config + config_image: &gnocchi_metricd_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ] + kolla_config: + /var/lib/kolla/config_files/gnocchi-metricd.json: + command: /usr/bin/gnocchi-metricd + config_files: + - dest: /etc/gnocchi/gnocchi.conf + owner: gnocchi + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/gnocchi/gnocchi.conf + docker_config: + step_4: + gnocchi_metricd: + image: *gnocchi_metricd_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/gnocchi-metricd.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/gnocchi/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS diff --git a/docker/services/gnocchi-statsd.yaml b/docker/services/gnocchi-statsd.yaml new file mode 100644 index 00000000..32c16521 --- /dev/null +++ b/docker/services/gnocchi-statsd.yaml @@ -0,0 +1,78 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Gnocchi Statsd service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerGnocchiStatsdImage: + description: image + default: 'centos-binary-gnocchi-statsd:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + GnocchiStatsdBase: + type: ../../puppet/services/gnocchi-statsd.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Gnocchi API role. + value: + service_name: {get_attr: [GnocchiStatsdBase, role_data, service_name]} + config_settings: {get_attr: [GnocchiStatsdBase, role_data, config_settings]} + step_config: &step_config + get_attr: [GnocchiStatsdBase, role_data, step_config] + service_config_settings: {get_attr: [GnocchiStatsdBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + puppet_config: + config_volume: gnocchi + puppet_tags: gnocchi_config + step_config: *step_config + config_image: &gnocchi_statsd_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ] + kolla_config: + /var/lib/kolla/config_files/gnocchi-statsd.json: + command: /usr/bin/gnocchi-statsd + config_files: + - dest: /etc/gnocchi/gnocchi.conf + owner: gnocchi + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/gnocchi/gnocchi.conf + docker_config: + step_4: + gnocchi_statsd: + image: *gnocchi_statsd_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/gnocchi-statsd.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/gnocchi/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS diff --git a/docker/services/heat-api-cfn.yaml b/docker/services/heat-api-cfn.yaml index 93632166..2a27efb4 100644 --- a/docker/services/heat-api-cfn.yaml +++ b/docker/services/heat-api-cfn.yaml @@ -51,19 +51,18 @@ outputs: map_merge: - get_attr: [HeatBase, role_data, config_settings] - apache::default_vhost: false - step_config: {get_attr: [HeatBase, role_data, step_config]} + step_config: &step_config + get_attr: [HeatBase, role_data, step_config] service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &heat_api_cfn_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ] - puppet_tags: heat_config,file,concat,file_line - config_volume: heat - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] + puppet_config: + config_volume: heat + puppet_tags: heat_config,file,concat,file_line + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] kolla_config: /var/lib/kolla/config_files/heat_api_cfn.json: command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf @@ -75,7 +74,10 @@ outputs: docker_config: step_4: heat_api_cfn: - image: *heat_api_cfn_image + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ] net: host privileged: false restart: always @@ -88,3 +90,7 @@ outputs: - /dev:/dev environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable heat_api_cfn service + tags: step2 + service: name=openstack-heat-api-cfn state=stopped enabled=no diff --git a/docker/services/heat-api.yaml b/docker/services/heat-api.yaml index 2efabb61..c429870b 100644 --- a/docker/services/heat-api.yaml +++ b/docker/services/heat-api.yaml @@ -51,19 +51,18 @@ outputs: map_merge: - get_attr: [HeatBase, role_data, config_settings] - apache::default_vhost: false - step_config: {get_attr: [HeatBase, role_data, step_config]} + step_config: &step_config + get_attr: [HeatBase, role_data, step_config] service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &heat_api_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ] - puppet_tags: heat_config,file,concat,file_line - config_volume: heat - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] + puppet_config: + config_volume: heat + puppet_tags: heat_config,file,concat,file_line + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] kolla_config: /var/lib/kolla/config_files/heat_api.json: command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf @@ -75,7 +74,10 @@ outputs: docker_config: step_4: heat_api: - image: *heat_api_image + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ] net: host privileged: false restart: always @@ -88,3 +90,7 @@ outputs: - /dev:/dev environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable heat_api service + tags: step2 + service: name=openstack-heat-api state=stopped enabled=no diff --git a/docker/services/heat-engine.yaml b/docker/services/heat-engine.yaml index db8c2be5..85a00b1d 100644 --- a/docker/services/heat-engine.yaml +++ b/docker/services/heat-engine.yaml @@ -46,16 +46,18 @@ outputs: map_merge: - get_attr: [HeatBase, role_data, config_settings] - apache::default_vhost: false - step_config: {get_attr: [HeatBase, role_data, step_config]} + step_config: &step_config + get_attr: [HeatBase, role_data, step_config] service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &heat_engine_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] - puppet_tags: heat_config,file,concat,file_line - config_volume: heat - config_image: *heat_engine_image + puppet_config: + config_volume: heat + puppet_tags: heat_config,file,concat,file_line + step_config: *step_config + config_image: &heat_engine_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] kolla_config: /var/lib/kolla/config_files/heat_engine.json: command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf @@ -90,3 +92,7 @@ outputs: - /etc/localtime:/etc/localtime:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable heat_engine service + tags: step2 + service: name=openstack-heat-engine state=stopped enabled=no diff --git a/docker/services/ironic-api.yaml b/docker/services/ironic-api.yaml index 80120568..5ae82d46 100644 --- a/docker/services/ironic-api.yaml +++ b/docker/services/ironic-api.yaml @@ -48,19 +48,18 @@ outputs: config_settings: map_merge: - get_attr: [IronicApiBase, role_data, config_settings] - step_config: {get_attr: [IronicApiBase, role_data, step_config]} + step_config: &step_config + get_attr: [IronicApiBase, role_data, step_config] service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &ironic_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ] - puppet_tags: ironic_config - config_volume: ironic - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] + puppet_config: + config_volume: ironic + puppet_tags: ironic_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] kolla_config: /var/lib/kolla/config_files/ironic_api.json: command: /usr/bin/ironic-api @@ -72,7 +71,10 @@ outputs: docker_config: step_3: ironic_db_sync: - image: *ironic_image + image: &ironic_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ] net: host privileged: false detach: false @@ -97,3 +99,7 @@ outputs: - /etc/localtime:/etc/localtime:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable ironic_api service + tags: step2 + service: name=openstack-ironic-api state=stopped enabled=no diff --git a/docker/services/ironic-conductor.yaml b/docker/services/ironic-conductor.yaml index 945ef3fc..8c18a160 100644 --- a/docker/services/ironic-conductor.yaml +++ b/docker/services/ironic-conductor.yaml @@ -55,19 +55,18 @@ outputs: - ironic::pxe::tftp_root: /var/lib/ironic/tftpboot - ironic::pxe::http_root: /var/lib/ironic/httpboot - ironic::conductor::http_root: /var/lib/ironic/httpboot - step_config: {get_attr: [IronicConductorBase, role_data, step_config]} + step_config: &step_config + get_attr: [IronicConductorBase, role_data, step_config] service_config_settings: {get_attr: [IronicConductorBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &ironic_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ] - puppet_tags: ironic_config - config_volume: ironic - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] + puppet_config: + config_volume: ironic + puppet_tags: ironic_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] kolla_config: /var/lib/kolla/config_files/ironic_conductor.json: command: /usr/bin/ironic-conductor @@ -86,7 +85,10 @@ outputs: docker_config: step_4: ironic-init-dirs: - image: *ironic_image + image: &ironic_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ] user: root command: ['/bin/bash', '-c', 'mkdir /var/lib/ironic/httpboot && mkdir /var/lib/ironic/tftpboot'] volumes: @@ -109,3 +111,7 @@ outputs: - ironic:/var/lib/ironic environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable ironic_conductor service + tags: step2 + service: name=openstack-ironic-conductor state=stopped enabled=no diff --git a/docker/services/ironic-pxe.yaml b/docker/services/ironic-pxe.yaml index bc7b4677..370b665e 100644 --- a/docker/services/ironic-pxe.yaml +++ b/docker/services/ironic-pxe.yaml @@ -37,19 +37,17 @@ outputs: value: service_name: ironic_pxe config_settings: {} - step_config: '' + step_config: &step_config '' service_config_settings: {} # BEGIN DOCKER SETTINGS - docker_image: &ironic_pxe_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ] - puppet_tags: ironic_config - config_volume: ironic - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] + puppet_config: + config_volume: ironic + puppet_tags: ironic_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] kolla_config: /var/lib/kolla/config_files/ironic_pxe_http.json: command: /usr/sbin/httpd -DFOREGROUND @@ -101,7 +99,10 @@ outputs: step_4: ironic_pxe_tftp: start_order: 90 - image: *ironic_pxe_image + image: &ironic_pxe_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ] net: host privileged: false restart: always diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml index 644326e0..3f8baef7 100644 --- a/docker/services/keystone.yaml +++ b/docker/services/keystone.yaml @@ -30,6 +30,12 @@ parameters: description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true + KeystoneTokenProvider: + description: The keystone token format + type: string + default: 'uuid' + constraints: + - allowed_values: ['uuid', 'fernet'] resources: @@ -40,6 +46,9 @@ resources: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} +conditions: + keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]} + outputs: role_data: description: Role data for the Keystone API role. @@ -49,20 +58,21 @@ outputs: map_merge: - get_attr: [KeystoneBase, role_data, config_settings] - apache::default_vhost: false - step_config: + step_config: &step_config list_join: - "\n" - - "['Keystone_user', 'Keystone_endpoint', 'Keystone_domain', 'Keystone_tenant', 'Keystone_user_role', 'Keystone_role', 'Keystone_service'].each |String $val| { noop_resource($val) }" - {get_attr: [KeystoneBase, role_data, step_config]} service_config_settings: {get_attr: [KeystoneBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &keystone_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ] - puppet_tags: keystone_config - config_volume: keystone - config_image: *keystone_image + puppet_config: + config_volume: keystone + puppet_tags: keystone_config + step_config: *step_config + config_image: &keystone_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ] kolla_config: /var/lib/kolla/config_files/keystone.json: command: /usr/sbin/httpd -DFOREGROUND @@ -79,6 +89,16 @@ outputs: owner: keystone perm: '0600' source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/1 + - dest: /etc/keystone/fernet-keys/0 + owner: keystone + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/keystone/fernet-keys/0 + optional: {if: [keystone_fernet_tokens, false, true]} + - dest: /etc/keystone/fernet-keys/1 + owner: keystone + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/keystone/fernet-keys/1 + optional: {if: [keystone_fernet_tokens, false, true]} - dest: /etc/httpd/conf.d/10-keystone_wsgi_admin.conf owner: root perm: '0644' @@ -109,7 +129,7 @@ outputs: start_order: 0 image: *keystone_image user: root - command: ['/bin/bash', '-c', 'mkdir /var/log/httpd && mkdir /var/log/keystone && chown keystone:keystone /var/log/keystone'] + command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/keystone && chown keystone:keystone /var/log/keystone'] volumes: - logs:/var/log keystone_db_sync: @@ -152,3 +172,7 @@ outputs: list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ] + upgrade_tasks: + - name: Stop and disable keystone service (running under httpd) + tags: step2 + service: name=httpd state=stopped enabled=no diff --git a/docker/services/memcached.yaml b/docker/services/memcached.yaml index d459c825..a78be3c8 100644 --- a/docker/services/memcached.yaml +++ b/docker/services/memcached.yaml @@ -42,16 +42,18 @@ outputs: value: service_name: {get_attr: [MemcachedBase, role_data, service_name]} config_settings: {get_attr: [MemcachedBase, role_data, config_settings]} - step_config: {get_attr: [MemcachedBase, role_data, step_config]} + step_config: &step_config + get_attr: [MemcachedBase, role_data, step_config] service_config_settings: {get_attr: [MemcachedBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &memcached_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ] - puppet_tags: 'file' - config_volume: 'memcached' - config_image: *memcached_image + puppet_config: + config_volume: 'memcached' + puppet_tags: 'file' + step_config: *step_config + config_image: &memcached_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ] kolla_config: {} docker_config: step_1: @@ -67,3 +69,7 @@ outputs: command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS'] environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable memcached service + tags: step2 + service: name=memcached state=stopped enabled=no diff --git a/docker/services/mistral-api.yaml b/docker/services/mistral-api.yaml index e535a817..4dd3b74c 100644 --- a/docker/services/mistral-api.yaml +++ b/docker/services/mistral-api.yaml @@ -48,19 +48,18 @@ outputs: config_settings: map_merge: - get_attr: [MistralApiBase, role_data, config_settings] - step_config: {get_attr: [MistralApiBase, role_data, step_config]} + step_config: &step_config + get_attr: [MistralApiBase, role_data, step_config] service_config_settings: {get_attr: [MistralApiBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &mistral_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ] - puppet_tags: mistral_config - config_volume: mistral - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] + puppet_config: + config_volume: mistral + puppet_tags: mistral_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] kolla_config: /var/lib/kolla/config_files/mistral_api.json: command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api @@ -73,7 +72,10 @@ outputs: step_3: mistral_db_sync: start_order: 1 - image: *mistral_image + image: &mistral_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ] net: host privileged: false detach: false @@ -113,3 +115,7 @@ outputs: - /etc/localtime:/etc/localtime:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable mistral_api service + tags: step2 + service: name=openstack-mistral-api state=stopped enabled=no diff --git a/docker/services/mistral-engine.yaml b/docker/services/mistral-engine.yaml index be4c8af7..fd72e344 100644 --- a/docker/services/mistral-engine.yaml +++ b/docker/services/mistral-engine.yaml @@ -49,19 +49,18 @@ outputs: config_settings: map_merge: - get_attr: [MistralBase, role_data, config_settings] - step_config: {get_attr: [MistralBase, role_data, step_config]} + step_config: &step_config + get_attr: [MistralBase, role_data, step_config] service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &mistral_engine_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ] - puppet_tags: mistral_config - config_volume: mistral - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] + puppet_config: + config_volume: mistral + puppet_tags: mistral_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] kolla_config: /var/lib/kolla/config_files/mistral_engine.json: command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine @@ -73,7 +72,10 @@ outputs: docker_config: step_4: mistral_engine: - image: *mistral_engine_image + image: &mistral_engine_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ] net: host privileged: false restart: always @@ -85,3 +87,8 @@ outputs: - /etc/localtime:/etc/localtime:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable mistral_engine service + tags: step2 + service: name=openstack-mistral-engine state=stopped enabled=no + diff --git a/docker/services/mistral-executor.yaml b/docker/services/mistral-executor.yaml index 33608a42..0274ff48 100644 --- a/docker/services/mistral-executor.yaml +++ b/docker/services/mistral-executor.yaml @@ -49,19 +49,18 @@ outputs: config_settings: map_merge: - get_attr: [MistralBase, role_data, config_settings] - step_config: {get_attr: [MistralBase, role_data, step_config]} + step_config: &step_config + get_attr: [MistralBase, role_data, step_config] service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &mistral_executor_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ] - puppet_tags: mistral_config - config_volume: mistral - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] + puppet_config: + config_volume: mistral + puppet_tags: mistral_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] kolla_config: /var/lib/kolla/config_files/mistral_executor.json: command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor @@ -73,7 +72,10 @@ outputs: docker_config: step_4: mistral_executor: - image: *mistral_executor_image + image: &mistral_executor_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ] net: host privileged: false restart: always @@ -89,3 +91,7 @@ outputs: - /var/lib/config-data/nova/etc/nova:/etc/nova:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable mistral_executor service + tags: step2 + service: name=openstack-mistral-executor state=stopped enabled=no diff --git a/docker/services/neutron-api.yaml b/docker/services/neutron-api.yaml index dfd1d5c0..ed03de6c 100644 --- a/docker/services/neutron-api.yaml +++ b/docker/services/neutron-api.yaml @@ -49,19 +49,18 @@ outputs: config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - step_config: {get_attr: [NeutronBase, role_data, step_config]} + step_config: &step_config + get_attr: [NeutronBase, role_data, step_config] service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &neutron_api_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ] - puppet_tags: neutron_config,neutron_api_config - config_volume: neutron - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + puppet_config: + config_volume: neutron + puppet_tags: neutron_config,neutron_api_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] kolla_config: /var/lib/kolla/config_files/neutron_api.json: command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini @@ -77,7 +76,10 @@ outputs: docker_config: step_3: neutron_db_sync: - image: *neutron_api_image + image: &neutron_api_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ] net: host privileged: false detach: false @@ -103,3 +105,7 @@ outputs: - /etc/localtime:/etc/localtime:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable neutron_api service + tags: step2 + service: name=neutron-server state=stopped enabled=no diff --git a/docker/services/neutron-dhcp.yaml b/docker/services/neutron-dhcp.yaml index fc13b3d1..a4854d90 100644 --- a/docker/services/neutron-dhcp.yaml +++ b/docker/services/neutron-dhcp.yaml @@ -49,19 +49,18 @@ outputs: config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - step_config: {get_attr: [NeutronBase, role_data, step_config]} + step_config: &step_config + get_attr: [NeutronBase, role_data, step_config] service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &neutron_dhcp_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ] - puppet_tags: neutron_config,neutron_dhcp_agent_config - config_volume: neutron - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + puppet_config: + config_volume: neutron + puppet_tags: neutron_config,neutron_dhcp_agent_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] kolla_config: /var/lib/kolla/config_files/neutron_dhcp.json: command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log @@ -77,7 +76,10 @@ outputs: docker_config: step_4: neutron_dhcp: - image: *neutron_dhcp_image + image: &neutron_dhcp_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ] net: host pid: host privileged: true @@ -91,3 +93,7 @@ outputs: - /run/:/run environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable neutron_dhcp service + tags: step2 + service: name=neutron-dhcp-agent state=stopped enabled=no diff --git a/docker/services/neutron-l3.yaml b/docker/services/neutron-l3.yaml index c74ab4fe..61ad8f4a 100644 --- a/docker/services/neutron-l3.yaml +++ b/docker/services/neutron-l3.yaml @@ -47,17 +47,16 @@ outputs: value: service_name: {get_attr: [NeutronL3Base, role_data, service_name]} config_settings: {get_attr: [NeutronL3Base, role_data, config_settings]} - step_config: {get_attr: [NeutronL3Base, role_data, step_config]} - docker_image: &neutron_l3_agent_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ] - puppet_tags: neutron_config,neutron_l3_agent_config - config_volume: neutron - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + step_config: &step_config + get_attr: [NeutronL3Base, role_data, step_config] + puppet_config: + puppet_tags: neutron_config,neutron_l3_agent_config + config_volume: neutron + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] kolla_config: /var/lib/kolla/config_files/neutron-l3-agent.json: command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini @@ -73,7 +72,10 @@ outputs: docker_config: step_4: neutronl3agent: - image: *neutron_l3_agent_image + image: &neutron_l3_agent_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ] net: host pid: host privileged: true diff --git a/docker/services/neutron-ovs-agent.yaml b/docker/services/neutron-ovs-agent.yaml index ab99da5e..4102693b 100644 --- a/docker/services/neutron-ovs-agent.yaml +++ b/docker/services/neutron-ovs-agent.yaml @@ -42,14 +42,16 @@ outputs: value: service_name: {get_attr: [NeutronOvsAgentBase, role_data, service_name]} config_settings: {get_attr: [NeutronOvsAgentBase, role_data, config_settings]} - step_config: {get_attr: [NeutronOvsAgentBase, role_data, step_config]} - docker_image: &neutron_ovs_agent_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ] - puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2 - config_volume: neutron - config_image: *neutron_ovs_agent_image + step_config: &step_config + get_attr: [NeutronOvsAgentBase, role_data, step_config] + puppet_config: + config_volume: neutron + puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2 + step_config: *step_config + config_image: &neutron_ovs_agent_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ] kolla_config: /var/lib/kolla/config_files/neutron-openvswitch-agent.json: command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini @@ -82,3 +84,7 @@ outputs: - /run:/run environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable neutron_ovs_agent service + tags: step2 + service: name=neutron-openvswitch-agent state=stopped enabled=no diff --git a/docker/services/neutron-plugin-ml2.yaml b/docker/services/neutron-plugin-ml2.yaml index 37ab8db2..34864d3a 100644 --- a/docker/services/neutron-plugin-ml2.yaml +++ b/docker/services/neutron-plugin-ml2.yaml @@ -44,15 +44,17 @@ outputs: config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - step_config: {get_attr: [NeutronBase, role_data, step_config]} + step_config: &step_config + get_attr: [NeutronBase, role_data, step_config] service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &docker_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] - puppet_tags: '' - config_volume: 'neutron' - config_image: *docker_image + puppet_config: + config_volume: 'neutron' + puppet_tags: '' + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] kolla_config: {} docker_config: {} diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml index 0cf1b859..1c57bbf5 100644 --- a/docker/services/nova-api.yaml +++ b/docker/services/nova-api.yaml @@ -49,19 +49,18 @@ outputs: map_merge: - get_attr: [NovaApiBase, role_data, config_settings] - apache::default_vhost: false - step_config: {get_attr: [NovaApiBase, role_data, step_config]} + step_config: &step_config + get_attr: [NovaApiBase, role_data, step_config] service_config_settings: {get_attr: [NovaApiBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &nova_api_image - list_join: + puppet_config: + config_volume: nova + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ] - puppet_tags: nova_config - config_volume: nova - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] kolla_config: /var/lib/kolla/config_files/nova_api.json: command: /usr/bin/nova-api @@ -74,7 +73,10 @@ outputs: step_3: nova_api_db_sync: start_order: 1 - image: *nova_api_image + image: &nova_api_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ] net: host detach: false volumes: &nova_api_volumes @@ -142,3 +144,7 @@ outputs: - '/usr/bin/nova-manage' - 'cell_v2' - 'discover_hosts' + upgrade_tasks: + - name: Stop and disable nova_api service + tags: step2 + service: name=openstack-nova-api state=stopped enabled=no diff --git a/docker/services/nova-compute.yaml b/docker/services/nova-compute.yaml index 570df95f..7fc00b47 100644 --- a/docker/services/nova-compute.yaml +++ b/docker/services/nova-compute.yaml @@ -43,14 +43,16 @@ outputs: value: service_name: {get_attr: [NovaComputeBase, role_data, service_name]} config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]} - step_config: {get_attr: [NovaComputeBase, role_data, step_config]} - puppet_tags: nova_config,nova_paste_api_ini - docker_image: &nova_compute_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] - config_volume: nova_libvirt - config_image: *nova_compute_image + step_config: &step_config + get_attr: [NovaComputeBase, role_data, step_config] + puppet_config: + config_volume: nova_libvirt + puppet_tags: nova_config,nova_paste_api_ini + step_config: *step_config + config_image: &nova_compute_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] kolla_config: /var/lib/kolla/config_files/nova-compute.json: command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf diff --git a/docker/services/nova-conductor.yaml b/docker/services/nova-conductor.yaml index aa009b4f..09a6d0f6 100644 --- a/docker/services/nova-conductor.yaml +++ b/docker/services/nova-conductor.yaml @@ -47,19 +47,18 @@ outputs: value: service_name: {get_attr: [NovaConductorBase, role_data, service_name]} config_settings: {get_attr: [NovaConductorBase, role_data, config_settings]} - step_config: {get_attr: [NovaConductorBase, role_data, step_config]} + step_config: &step_config + get_attr: [NovaConductorBase, role_data, step_config] service_config_settings: {get_attr: [NovaConductorBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &nova_conductor_image - list_join: + puppet_config: + config_volume: nova + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ] - puppet_tags: nova_config - config_volume: nova - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] kolla_config: /var/lib/kolla/config_files/nova_conductor.json: command: /usr/bin/nova-conductor @@ -71,7 +70,10 @@ outputs: docker_config: step_4: nova_conductor: - image: *nova_conductor_image + image: &nova_conductor_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ] net: host privileged: false restart: always @@ -83,3 +85,7 @@ outputs: - /etc/localtime:/etc/localtime:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable nova_conductor service + tags: step2 + service: name=openstack-nova-conductor state=stopped enabled=no diff --git a/docker/services/nova-ironic.yaml b/docker/services/nova-ironic.yaml index c1858ded..d3c0af44 100644 --- a/docker/services/nova-ironic.yaml +++ b/docker/services/nova-ironic.yaml @@ -45,17 +45,16 @@ outputs: value: service_name: {get_attr: [NovaIronicBase, role_data, service_name]} config_settings: {get_attr: [NovaIronicBase, role_data, config_settings]} - step_config: {get_attr: [NovaIronicBase, role_data, step_config]} - puppet_tags: nova_config,nova_paste_api_ini - docker_image: &nova_ironic_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] - config_volume: nova - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + step_config: &step_config + get_attr: [NovaIronicBase, role_data, step_config] + puppet_config: + config_volume: nova + puppet_tags: nova_config,nova_paste_api_ini + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] kolla_config: /var/lib/kolla/config_files/nova_ironic.json: command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf @@ -71,7 +70,10 @@ outputs: docker_config: step_5: novacompute: - image: *nova_ironic_image + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] net: host privileged: true user: root diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml index d6e7dc76..e25b2014 100644 --- a/docker/services/nova-libvirt.yaml +++ b/docker/services/nova-libvirt.yaml @@ -48,17 +48,16 @@ outputs: value: service_name: {get_attr: [NovaLibvirtBase, role_data, service_name]} config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]} - step_config: {get_attr: [NovaLibvirtBase, role_data, step_config]} - docker_image: &libvirt_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ] - puppet_tags: nova_config - config_volume: nova_libvirt - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] + step_config: &step_config + get_attr: [NovaLibvirtBase, role_data, step_config] + puppet_config: + config_volume: nova_libvirt + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] kolla_config: /var/lib/kolla/config_files/nova-libvirt.json: command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf @@ -70,7 +69,10 @@ outputs: docker_config: step_3: nova_libvirt: - image: *libvirt_image + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ] net: host pid: host privileged: true diff --git a/docker/services/nova-metadata.yaml b/docker/services/nova-metadata.yaml index a4baaa27..b452c61b 100644 --- a/docker/services/nova-metadata.yaml +++ b/docker/services/nova-metadata.yaml @@ -37,12 +37,14 @@ outputs: config_settings: map_merge: - get_attr: [NovaMetadataBase, role_data, config_settings] - step_config: {get_attr: [NovaMetadataBase, role_data, step_config]} + step_config: &step_config + get_attr: [NovaMetadataBase, role_data, step_config] service_config_settings: {get_attr: [NovaMetadataBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: '' - puppet_tags: '' - config_volume: '' - config_image: '' + puppet_config: + config_volume: '' + puppet_tags: '' + step_config: *step_config + config_image: '' kolla_config: {} docker_config: {} diff --git a/docker/services/nova-placement.yaml b/docker/services/nova-placement.yaml index f0f7d724..0f32e33f 100644 --- a/docker/services/nova-placement.yaml +++ b/docker/services/nova-placement.yaml @@ -45,19 +45,18 @@ outputs: map_merge: - get_attr: [NovaPlacementBase, role_data, config_settings] - apache::default_vhost: false - step_config: {get_attr: [NovaPlacementBase, role_data, step_config]} + step_config: &step_config + get_attr: [NovaPlacementBase, role_data, step_config] service_config_settings: {get_attr: [NovaPlacementBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &nova_placement_image - list_join: + puppet_config: + config_volume: nova_placement + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ] - puppet_tags: nova_config - config_volume: nova_placement - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ] kolla_config: /var/lib/kolla/config_files/nova_placement.json: command: /usr/sbin/httpd -DFOREGROUND @@ -70,6 +69,12 @@ outputs: owner: root perm: '0644' source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-placement_wsgi.conf + # puppet generates a stubbed out version of the stock one so we + # copy it in to overwrite the existing one + - dest: /etc/httpd/conf.d/00-nova-placement-api.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/00-nova-placement-api.conf - dest: /etc/httpd/conf/httpd.conf owner: root perm: '0644' @@ -87,7 +92,10 @@ outputs: step_3: nova_placement: start_order: 1 - image: *nova_placement_image + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ] net: host user: root restart: always @@ -99,3 +107,7 @@ outputs: - /etc/localtime:/etc/localtime:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable nova_placement service (running under httpd) + tags: step2 + service: name=httpd state=stopped enabled=no diff --git a/docker/services/nova-scheduler.yaml b/docker/services/nova-scheduler.yaml index a1a98b48..0b64ca37 100644 --- a/docker/services/nova-scheduler.yaml +++ b/docker/services/nova-scheduler.yaml @@ -46,19 +46,18 @@ outputs: value: service_name: {get_attr: [NovaSchedulerBase, role_data, service_name]} config_settings: {get_attr: [NovaSchedulerBase, role_data, config_settings]} - step_config: {get_attr: [NovaSchedulerBase, role_data, step_config]} + step_config: &step_config + get_attr: [NovaSchedulerBase, role_data, step_config] service_config_settings: {get_attr: [NovaSchedulerBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &nova_scheduler_image - list_join: + puppet_config: + config_volume: nova + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ] - puppet_tags: nova_config - config_volume: nova - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] kolla_config: /var/lib/kolla/config_files/nova_scheduler.json: command: /usr/bin/nova-scheduler @@ -70,7 +69,10 @@ outputs: docker_config: step_4: nova_scheduler: - image: *nova_scheduler_image + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ] net: host privileged: false restart: always @@ -82,3 +84,7 @@ outputs: - /etc/localtime:/etc/localtime:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable nova_scheduler service + tags: step2 + service: name=openstack-nova-scheduler state=stopped enabled=no diff --git a/docker/services/rabbitmq.yaml b/docker/services/rabbitmq.yaml index cea3d8a7..573ec178 100644 --- a/docker/services/rabbitmq.yaml +++ b/docker/services/rabbitmq.yaml @@ -46,16 +46,18 @@ outputs: value: service_name: {get_attr: [RabbitmqBase, role_data, service_name]} config_settings: {get_attr: [RabbitmqBase, role_data, config_settings]} - step_config: {get_attr: [RabbitmqBase, role_data, step_config]} + step_config: &step_config + get_attr: [RabbitmqBase, role_data, step_config] service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &rabbitmq_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ] - puppet_tags: file - config_volume: rabbitmq - config_image: *rabbitmq_image + puppet_config: + config_volume: rabbitmq + puppet_tags: file + step_config: *step_config + config_image: &rabbitmq_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ] kolla_config: /var/lib/kolla/config_files/rabbitmq.json: command: /usr/lib/rabbitmq/bin/rabbitmq-server @@ -117,3 +119,7 @@ outputs: - rabbitmq:/var/lib/rabbitmq/ environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable rabbitmq service + tags: step2 + service: name=rabbitmq-server state=stopped enabled=no diff --git a/docker/services/services.yaml b/docker/services/services.yaml index cd9f4cb5..84c56b5b 100644 --- a/docker/services/services.yaml +++ b/docker/services/services.yaml @@ -67,13 +67,25 @@ outputs: {get_attr: [PuppetServices, role_data, global_config_settings]} step_config: {get_attr: [ServiceChain, role_data, step_config]} - docker_image: {get_attr: [ServiceChain, role_data, docker_image]} - puppet_tags: {get_attr: [ServiceChain, role_data, puppet_tags]} - config_volume: {get_attr: [ServiceChain, role_data, config_volume]} - config_image: {get_attr: [ServiceChain, role_data, config_image]} + puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]} kolla_config: map_merge: {get_attr: [ServiceChain, role_data, kolla_config]} docker_config: {get_attr: [ServiceChain, role_data, docker_config]} docker_puppet_tasks: {get_attr: [ServiceChain, role_data, docker_puppet_tasks]} + host_prep_tasks: + yaql: + # Note we use distinct() here to filter any identical tasks + expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct() + data: {get_attr: [ServiceChain, role_data]} + upgrade_tasks: + yaql: + # Note we use distinct() here to filter any identical tasks, e.g yum update for all services + expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct() + data: {get_attr: [ServiceChain, role_data]} + upgrade_batch_tasks: + yaql: + # Note we use distinct() here to filter any identical tasks, e.g yum update for all services + expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct() + data: {get_attr: [ServiceChain, role_data]} diff --git a/docker/services/swift-proxy.yaml b/docker/services/swift-proxy.yaml index 09553319..93e21c81 100644 --- a/docker/services/swift-proxy.yaml +++ b/docker/services/swift-proxy.yaml @@ -42,16 +42,18 @@ outputs: value: service_name: {get_attr: [SwiftProxyBase, role_data, service_name]} config_settings: {get_attr: [SwiftProxyBase, role_data, config_settings]} - step_config: {get_attr: [SwiftProxyBase, role_data, step_config]} + step_config: &step_config + get_attr: [SwiftProxyBase, role_data, step_config] service_config_settings: {get_attr: [SwiftProxyBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &swift_proxy_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] - puppet_tags: swift_proxy_config - config_volume: swift - config_image: *swift_proxy_image + puppet_config: + config_volume: swift + puppet_tags: swift_proxy_config + step_config: *step_config + config_image: &swift_proxy_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] kolla_config: /var/lib/kolla/config_files/swift_proxy.json: command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf @@ -74,3 +76,7 @@ outputs: - /dev:/dev environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable swift_proxy service + tags: step2 + service: name=openstack-swift-proxy state=stopped enabled=no diff --git a/docker/services/swift-ringbuilder.yaml b/docker/services/swift-ringbuilder.yaml index de91e7cf..21102505 100644 --- a/docker/services/swift-ringbuilder.yaml +++ b/docker/services/swift-ringbuilder.yaml @@ -66,15 +66,17 @@ outputs: value: service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]} config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]} - step_config: {get_attr: [SwiftRingbuilderBase, role_data, step_config]} + step_config: &step_config + get_attr: [SwiftRingbuilderBase, role_data, step_config] service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]} - puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance # BEGIN DOCKER SETTINGS - docker_image: &docker_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] - config_volume: 'swift' - config_image: *docker_image + puppet_config: + config_volume: 'swift' + puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] kolla_config: {} docker_config: {} diff --git a/docker/services/swift-storage.yaml b/docker/services/swift-storage.yaml index 5b2ec6e6..8e76504c 100644 --- a/docker/services/swift-storage.yaml +++ b/docker/services/swift-storage.yaml @@ -54,16 +54,18 @@ outputs: value: service_name: {get_attr: [SwiftStorageBase, role_data, service_name]} config_settings: {get_attr: [SwiftStorageBase, role_data, config_settings]} - step_config: {get_attr: [SwiftStorageBase, role_data, step_config]} + step_config: &step_config + get_attr: [SwiftStorageBase, role_data, step_config] service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &swift_proxy_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] - puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config - config_volume: swift - config_image: *swift_proxy_image + puppet_config: + config_volume: swift + puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] kolla_config: /var/lib/kolla/config_files/swift_account_auditor.json: command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf @@ -341,3 +343,20 @@ outputs: - swift-srv:/srv - /dev:/dev environment: *kolla_env + upgrade_tasks: + - name: Stop and disable swift storage services + tags: step2 + service: name={{ item }} state=stopped enabled=no + with_items: + - openstack-swift-account-auditor + - openstack-swift-account-reaper + - openstack-swift-account-replicator + - openstack-swift-account + - openstack-swift-container-auditor + - openstack-swift-container-replicator + - openstack-swift-container-updater + - openstack-swift-container + - openstack-swift-object-auditor + - openstack-swift-object-replicator + - openstack-swift-object-updater + - openstack-swift-object diff --git a/docker/services/zaqar.yaml b/docker/services/zaqar.yaml index 9f248ce1..3ec819e0 100644 --- a/docker/services/zaqar.yaml +++ b/docker/services/zaqar.yaml @@ -42,16 +42,18 @@ outputs: value: service_name: {get_attr: [ZaqarBase, role_data, service_name]} config_settings: {get_attr: [ZaqarBase, role_data, config_settings]} - step_config: {get_attr: [ZaqarBase, role_data, step_config]} + step_config: &step_config + get_attr: [ZaqarBase, role_data, step_config] service_config_settings: {get_attr: [ZaqarBase, role_data, service_config_settings]} # BEGIN DOCKER SETTINGS - docker_image: &zaqar_image - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ] - puppet_tags: zaqar_config - config_volume: zaqar - config_image: *zaqar_image + puppet_config: + config_volume: zaqar + puppet_tags: zaqar_config + step_config: *step_config + config_image: &zaqar_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ] kolla_config: /var/lib/kolla/config_files/zaqar.json: command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf @@ -97,3 +99,8 @@ outputs: - /etc/localtime:/etc/localtime:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable zaqar service + tags: step2 + service: name=openstack-zaqar.service state=stopped enabled=no + diff --git a/environments/cadf.yaml b/environments/cadf.yaml new file mode 100644 index 00000000..af5c7fdf --- /dev/null +++ b/environments/cadf.yaml @@ -0,0 +1,2 @@ +parameter_defaults: + KeystoneNotificationFormat: cadf diff --git a/environments/docker.yaml b/environments/docker.yaml index cb13c5c3..1571ebc5 100644 --- a/environments/docker.yaml +++ b/environments/docker.yaml @@ -1,5 +1,10 @@ resource_registry: - OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml + # This can be used when you don't want to run puppet on the host, + # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker + # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml + OS::TripleO::Services::Docker: ../puppet/services/docker.yaml + # The compute node still needs extra initialization steps + OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml #NOTE (dprince) add roles to be docker enabled as we support them OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml @@ -14,11 +19,6 @@ resource_registry: OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml - # FIXME: these need to go into a environments/services-docker dir? - OS::TripleO::Services::NovaIronic: ../docker/services/nova-ironic.yaml - OS::TripleO::Services::IronicApi: ../docker/services/ironic-api.yaml - OS::TripleO::Services::IronicConductor: ../docker/services/ironic-conductor.yaml - OS::TripleO::Services::IronicPxe: ../docker/services/ironic-pxe.yaml OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml @@ -26,18 +26,24 @@ resource_registry: OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml - OS::TripleO::Services::MistralApi: ../docker/services/mistral-api.yaml - OS::TripleO::Services::MistralEngine: ../docker/services/mistral-engine.yaml - OS::TripleO::Services::MistralExecutor: ../docker/services/mistral-executor.yaml - OS::TripleO::Services::Zaqar: ../docker/services/zaqar.yaml OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml + OS::TripleO::Services::GnocchiApi: ../docker/services/gnocchi-api.yaml + OS::TripleO::Services::GnocchiMetricd: ../docker/services/gnocchi-metricd.yaml + OS::TripleO::Services::GnocchiStatsd: ../docker/services/gnocchi-statsd.yaml + + OS::TripleO::Services::AodhApi: ../docker/services/aodh-api.yaml + OS::TripleO::Services::AodhEvaluator: ../docker/services/aodh-evaluator.yaml + OS::TripleO::Services::AodhNotifier: ../docker/services/aodh-notifier.yaml + OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml OS::TripleO::PostDeploySteps: ../docker/post.yaml + OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml + OS::TripleO::Services: ../docker/services/services.yaml parameter_defaults: @@ -50,3 +56,4 @@ parameter_defaults: - OS::TripleO::Services::NovaCompute - OS::TripleO::Services::NovaLibvirt - OS::TripleO::Services::ComputeNeutronOvsAgent + - OS::TripleO::Services::Docker diff --git a/environments/enable-internal-tls.yaml b/environments/enable-internal-tls.yaml index ff4ecfbe..f485e4a5 100644 --- a/environments/enable-internal-tls.yaml +++ b/environments/enable-internal-tls.yaml @@ -2,6 +2,7 @@ # a TLS for in the internal network via certmonger parameter_defaults: EnableInternalTLS: true + RabbitClientUseSSL: true # Required for novajoin to enroll the overcloud nodes ServerMetadata: @@ -11,6 +12,8 @@ resource_registry: OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml OS::TripleO::Services::ApacheTLS: ../puppet/services/apache-internal-tls-certmonger.yaml OS::TripleO::Services::MySQLTLS: ../puppet/services/database/mysql-internal-tls-certmonger.yaml + OS::TripleO::Services::RabbitMQTLS: ../puppet/services/rabbitmq-internal-tls-certmonger.yaml + # We use apache as a TLS proxy OS::TripleO::Services::TLSProxyBase: ../puppet/services/apache.yaml diff --git a/environments/major-upgrade-composable-steps-docker.yaml b/environments/major-upgrade-composable-steps-docker.yaml new file mode 100644 index 00000000..5fa2f2d8 --- /dev/null +++ b/environments/major-upgrade-composable-steps-docker.yaml @@ -0,0 +1,10 @@ +resource_registry: + # FIXME(shardy) do we need to break major_upgrade_steps.yaml apart to + # enable docker specific logic, or is just overridding PostUpgradeSteps + # enough (as we want to share the ansible tasks steps etc) + OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml +parameter_defaults: + UpgradeLevelNovaCompute: auto + UpgradeInitCommonCommand: | + #!/bin/bash + # Ocata to Pike, put any needed host-level workarounds here diff --git a/environments/major-upgrade-converge-docker.yaml b/environments/major-upgrade-converge-docker.yaml new file mode 100644 index 00000000..463206f1 --- /dev/null +++ b/environments/major-upgrade-converge-docker.yaml @@ -0,0 +1,7 @@ +# Use this to reset any mappings only used for upgrades after the +# update of all nodes is completed +resource_registry: + OS::TripleO::PostDeploySteps: ../docker/post.yaml +parameter_defaults: + UpgradeLevelNovaCompute: '' + UpgradeInitCommonCommand: '' diff --git a/environments/neutron-bgpvpn.yaml b/environments/neutron-bgpvpn.yaml new file mode 100644 index 00000000..dc6c1454 --- /dev/null +++ b/environments/neutron-bgpvpn.yaml @@ -0,0 +1,16 @@ +# A Heat environment file that can be used to deploy Neutron BGPVPN service +# +# Currently there are four types of service provider for Neutron BGPVPN +# The default option is a dummy driver that allows to enable the API. +# In order to enable other backend, replace the content of BgpvpnServiceProvider +# +# - Bagpipe: BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.service_drivers.bagpipe.bagpipe.BaGPipeBGPVPNDriver:default +# - OpenContrail: BGPVPN:OpenContrail:networking_bgpvpn.neutron.services.service_drivers.opencontrail.opencontrail.OpenContrailBGPVPNDriver:default +# - OpenDaylight: BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default +# - Nuage: BGPVPN:Nuage:nuage_neutron.bgpvpn.services.service_drivers.driver.NuageBGPVPNDriver:default +resource_registry: + OS::TripleO::Services::NeutronBgpvpnApi: ../puppet/services/neutron-bgpvpn-api.yaml + +parameter_defaults: + NeutronServicePlugins: 'networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin' + BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default' diff --git a/environments/services-docker/ironic.yaml b/environments/services-docker/ironic.yaml new file mode 100644 index 00000000..e927ecb3 --- /dev/null +++ b/environments/services-docker/ironic.yaml @@ -0,0 +1,5 @@ +resource_registry: + OS::TripleO::Services::IronicApi: ../../docker/services/ironic-api.yaml + OS::TripleO::Services::IronicConductor: ../../docker/services/ironic-conductor.yaml + OS::TripleO::Services::IronicPxe: ../../docker/services/ironic-pxe.yaml + OS::TripleO::Services::NovaIronic: ../../docker/services/nova-ironic.yaml diff --git a/environments/services-docker/mistral.yaml b/environments/services-docker/mistral.yaml new file mode 100644 index 00000000..a215d2a0 --- /dev/null +++ b/environments/services-docker/mistral.yaml @@ -0,0 +1,4 @@ +resource_registry: + OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml + OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml + OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml diff --git a/environments/services-docker/zaqar.yaml b/environments/services-docker/zaqar.yaml new file mode 100644 index 00000000..ca0b3b15 --- /dev/null +++ b/environments/services-docker/zaqar.yaml @@ -0,0 +1,2 @@ +resource_registry: + OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml diff --git a/extraconfig/tasks/aodh_data_migration.sh b/extraconfig/tasks/aodh_data_migration.sh deleted file mode 100644 index d4c29673..00000000 --- a/extraconfig/tasks/aodh_data_migration.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# -# This delivers the aodh data migration script to be invoked as part of the tripleo -# major upgrade workflow to migrate all the alarm data from mongodb to mysql. -# This needs to run post controller node upgrades so new aodh mysql db configured and -# running. -# -set -eu - -#Get existing mongodb connection -MONGO_DB_CONNECTION="$(crudini --get /etc/ceilometer/ceilometer.conf database connection)" - -# Get the aodh database string from hiera data -MYSQL_DB_CONNECTION="$(crudini --get /etc/aodh/aodh.conf database connection)" - -#Run migration -/usr/bin/aodh-data-migration --nosql-conn $MONGO_DB_CONNECTION --sql-conn $MYSQL_DB_CONNECTION - - diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml deleted file mode 100644 index cf5d7a84..00000000 --- a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml +++ /dev/null @@ -1,62 +0,0 @@ -heat_template_version: ocata - -description: > - Software-config for ceilometer configuration under httpd during upgrades - -parameters: - servers: - type: json - input_values: - type: json - description: input values for the software deployments -resources: - CeilometerWsgiMitakaNewtonPreUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: puppet - config: - get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp - - CeilometerWsgiMitakaNewtonUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - "#!/bin/bash\n\nset -e\n\n" - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - "disable_standalone_ceilometer_api\n\n" - - CeilometerWsgiMitakaNewtonPostUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: | - #!/bin/bash - set -e - /usr/bin/systemctl reload httpd - - CeilometerWsgiMitakaNewtonPreUpgradeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment - servers: {get_param: [servers, Controller]} - config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig} - - CeilometerWsgiMitakaNewtonUpgradeConfigDeployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment - properties: - name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment - servers: {get_param: [servers, Controller]} - config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig} - - CeilometerWsgiMitakaNewtonPostUpgradeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment - properties: - name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment - servers: {get_param: [servers, Controller]} - config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig} diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh deleted file mode 100755 index 8bdff5e7..00000000 --- a/extraconfig/tasks/major_upgrade_check.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash - -set -eu - -check_cluster() -{ - if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then - echo_error "ERROR: upgrade cannot start with some cluster nodes being offline" - exit 1 - fi -} - -check_pcsd() -{ - if pcs status 2>&1 | grep -E 'Offline'; then - echo_error "ERROR: upgrade cannot start with some pcsd daemon offline" - exit 1 - fi -} - -mysql_need_update() -{ - # Shall we upgrade mysql data directory during the stack upgrade? - if [ "$mariadb_do_major_upgrade" = "auto" ]; then - ret=$(is_mysql_upgrade_needed) - if [ $ret = "1" ]; then - DO_MYSQL_UPGRADE=1 - else - DO_MYSQL_UPGRADE=0 - fi - echo "mysql upgrade required: $DO_MYSQL_UPGRADE" - elif [ "$mariadb_do_major_upgrade" = "no" ]; then - DO_MYSQL_UPGRADE=0 - else - DO_MYSQL_UPGRADE=1 - fi -} - -check_disk_for_mysql_dump() -{ - # Where to backup current database if mysql need to be upgraded - MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp - MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup - # Spare disk ratio for extra safety - MYSQL_BACKUP_SIZE_RATIO=1.2 - - mysql_need_update - - if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then - if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - - if [ -d "$MYSQL_BACKUP_DIR" ]; then - echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously" - exit 1 - fi - mkdir "$MYSQL_BACKUP_DIR" - if [ $? -ne 0 ]; then - echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR" - exit 1 - fi - - # the /root/.my.cnf is needed because we set the mysql root - # password from liberty onwards - backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" - # While not ideal, this step allows us to calculate exactly how much space the dump - # will need. Our main goal here is avoiding any chance of corruption due to disk space - # exhaustion - backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c) - database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }') - free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1) - - # we need at least space for a new mysql database + dump of the existing one, - # times a small factor for additional safety room - # note: bash doesn't do floating point math or floats in if statements, - # so use python to apply the ratio and cast it back to integer - required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))") - if [ $required_space -ge $free_space ]; then - echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)" - exit 1 - fi - fi - fi -} - -check_python_rpm() -{ - # If for some reason rpm-python are missing we want to error out early enough - if ! rpm -q rpm-python &> /dev/null; then - echo_error "ERROR: upgrade cannot start without rpm-python installed" - exit 1 - fi -} - -check_clean_cluster() -{ - if pcs status | grep -q Stopped:; then - echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running." - exit 1 - fi -} - -check_galera_root_password() -{ - # BZ: 1357112 - if [ ! -e /root/.my.cnf ]; then - echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update." - exit 1 - fi -} diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh deleted file mode 100755 index 080831ab..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -set -eu - -check_cluster -check_pcsd -if [[ -n $(is_bootstrap_node) ]]; then - check_clean_cluster -fi -check_python_rpm -check_galera_root_password -check_disk_for_mysql_dump - -# We want to disable fencing during the cluster --stop as it might fence -# nodes where a service fails to stop, which could be fatal during an upgrade -# procedure. So we remember the stonith state. If it was enabled we reenable it -# at the end of this script -if [[ -n $(is_bootstrap_node) ]]; then - STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }') - # We create this empty file if stonith was set to true so we can reenable stonith in step2 - rm -f /var/tmp/stonith-true - if [ $STONITH_STATE == "true" ]; then - touch /var/tmp/stonith-true - fi - pcs property set stonith-enabled=false -fi - -# Migrate to HA NG and fix up rabbitmq queues -# We fix up the rabbitmq ha queues after the migration because it will -# restart the rabbitmq resource. Doing it after the migration means no other -# services will be restart as there are no other constraints -if [[ -n $(is_bootstrap_node) ]]; then - migrate_full_to_ng_ha - rabbitmq_newton_ocata_upgrade -fi - diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh deleted file mode 100755 index 8b900842..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh +++ /dev/null @@ -1,177 +0,0 @@ -#!/bin/bash - -set -eu - -cluster_sync_timeout=1800 - -# After migrating the cluster to HA-NG the services not under pacemaker's control -# are still up and running. We need to stop them explicitely otherwise during the yum -# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which -# is going to take a long time because rabbit is down. By having the service stopped -# systemctl try-restart is a noop - -for service in $(services_to_migrate); do - manage_systemd_service stop "${service%%-clone}" - # So the reason for not reusing check_resource_systemd is that - # I have observed systemctl is-active returning unknown with at least - # one service that was stopped (See LP 1627254) - timeout=600 - tstart=$(date +%s) - tend=$(( $tstart + $timeout )) - check_interval=3 - while (( $(date +%s) < $tend )); do - if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then - echo "$service still active, sleeping $check_interval seconds." - sleep $check_interval - else - # we do not care if it is inactive, unknown or failed as long as it is - # not running - break - fi - - done -done - -# In case the mysql package is updated, the database on disk must be -# upgraded as well. This typically needs to happen during major -# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...) -# -# Because in-place upgrades are not supported across 2+ major versions -# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle -# https://bugzilla.redhat.com/show_bug.cgi?id=1341968 -# -# The default is to determine automatically if upgrade is needed based -# on mysql package versionning, but this can be overriden manually -# to support specific upgrade scenario - -# Calling this function will set the DO_MYSQL_UPGRADE variable which is used -# later -mysql_need_update - -if [[ -n $(is_bootstrap_node) ]]; then - if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" - mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql" - cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR" - fi - - pcs resource disable redis - check_resource redis stopped 600 - pcs resource disable rabbitmq - check_resource rabbitmq stopped 600 - pcs resource disable galera - check_resource galera stopped 600 - pcs resource disable openstack-cinder-volume - check_resource openstack-cinder-volume stopped 600 - # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address: - # https://bugzilla.redhat.com/show_bug.cgi?id=1330688 - for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do - pcs resource disable $vip - check_resource $vip stopped 60 - done - pcs cluster stop --all -fi - - -# Swift isn't controlled by pacemaker -systemctl_swift stop - -tstart=$(date +%s) -while systemctl is-active pacemaker; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > cluster_sync_timeout )) ; then - echo_error "ERROR: cluster shutdown timed out" - exit 1 - fi -done - -# The reason we do an sql dump *and* we move the old dir out of -# the way is because it gives us an extra level of safety in case -# something goes wrong during the upgrade. Once the restore is -# successful we go ahead and remove it. If the directory exists -# we bail out as it means the upgrade process had issues in the last -# run. -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then - echo_error "ERROR: mysql backup dir already exist" - exit 1 - fi - mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR -fi - -# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 -special_case_ovs_upgrade_if_needed - -yum -y install python-zaqarclient # needed for os-collect-config -yum -y -q update - -# We need to ensure at least those two configuration settings, otherwise -# mariadb 10.1+ won't activate galera replication. -# wsrep_cluster_address must only be set though, its value does not -# matter because it's overriden by the galera resource agent. -cat >> /etc/my.cnf.d/galera.cnf <<EOF -[mysqld] -wsrep_on = ON -wsrep_cluster_address = gcomm://localhost -EOF - -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - # Scripts run via heat have no HOME variable set and this confuses - # mysqladmin - export HOME=/root - - mkdir /var/lib/mysql || /bin/true - chown mysql:mysql /var/lib/mysql - chmod 0755 /var/lib/mysql - restorecon -R /var/lib/mysql/ - mysql_install_db --datadir=/var/lib/mysql --user=mysql - chown -R mysql:mysql /var/lib/mysql/ - - if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then - mysqld_safe --wsrep-new-cluster & - # We have a populated /root/.my.cnf with root/password here so - # we need to temporarily rename it because the newly created - # db is empty and no root password is set - mv /root/.my.cnf /root/.my.cnf.temporary - timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done' - mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql" - mv /root/.my.cnf.temporary /root/.my.cnf - mysqladmin -u root shutdown - # The import was successful so we may remove the folder - rm -r "$MYSQL_BACKUP_DIR" - fi -fi - -# If we reached here without error we can safely blow away the origin -# mysql dir from every controller - -# TODO: What if the upgrade fails on the bootstrap node, but not on -# this controller. Data may be lost. -if [ $DO_MYSQL_UPGRADE -eq 1 ]; then - rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR -fi - -# Let's reset the stonith back to true if it was true, before starting the cluster -if [[ -n $(is_bootstrap_node) ]]; then - if [ -f /var/tmp/stonith-true ]; then - pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true - fi - rm -f /var/tmp/stonith-true -fi - -# Pin messages sent to compute nodes to kilo, these will be upgraded later -crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute" -# https://bugzilla.redhat.com/show_bug.cgi?id=1284047 -# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435 -crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit -# https://bugzilla.redhat.com/show_bug.cgi?id=1284058 -# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists -crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server" -# LP: 1615035, required only for M/N upgrade. -crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager -# LP: 1627450, required only for M/N upgrade -crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler - -crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm - diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh deleted file mode 100755 index a3cbd945..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash - -set -eu - -cluster_form_timeout=600 -cluster_settle_timeout=1800 -galera_sync_timeout=600 - -if [[ -n $(is_bootstrap_node) ]]; then - pcs cluster start --all - - tstart=$(date +%s) - while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > cluster_form_timeout )) ; then - echo_error "ERROR: timed out forming the cluster" - exit 1 - fi - done - - if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then - echo_error "ERROR: timed out waiting for cluster to finish transition" - exit 1 - fi - - for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do - pcs resource enable $vip - check_resource_pacemaker $vip started 60 - done -fi - -start_or_enable_service galera -check_resource galera started 600 -start_or_enable_service redis -check_resource redis started 600 -# We need mongod which is now a systemd service up and running before calling -# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes -# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely -# we should be good. -# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm -systemctl start mongod -check_resource mongod started 600 - -if [[ -n $(is_bootstrap_node) ]]; then - tstart=$(date +%s) - while ! clustercheck; do - sleep 5 - tnow=$(date +%s) - if (( tnow-tstart > galera_sync_timeout )) ; then - echo_error "ERROR galera sync timed out" - exit 1 - fi - done - - # Run all the db syncs - # TODO: check if this can be triggered in puppet and removed from here - ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types - cinder-manage db sync - glance-manage db_sync - heat-manage --config-file /etc/heat/heat.conf db_sync - keystone-manage db_sync - neutron-db-manage upgrade heads - nova-manage db sync - nova-manage api_db sync - nova-manage db online_data_migrations - sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head -fi diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh deleted file mode 100755 index d2cb9553..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -eu - -start_or_enable_service rabbitmq -check_resource rabbitmq started 600 -start_or_enable_service redis -check_resource redis started 600 -start_or_enable_service openstack-cinder-volume -check_resource openstack-cinder-volume started 600 - -# start httpd so keystone is available for gnocchi -# upgrade to run. -systemctl start httpd - -# Swift isn't controled by pacemaker -systemctl_swift start diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh deleted file mode 100755 index fa95f1f8..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -eu - -if [[ -n $(is_bootstrap_node) ]]; then - # run gnocchi upgrade - gnocchi-upgrade -fi diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh deleted file mode 100755 index d569084d..00000000 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -set -eu - -# We need to start the systemd services we explicitely stopped at step _1.sh -# FIXME: Should we let puppet during the convergence step do the service enabling or -# should we add it here? -services=$(services_to_migrate) -if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then - services=${services%%openstack-sahara*} -fi -for service in $services; do - manage_systemd_service start "${service%%-clone}" - check_resource_systemd "${service%%-clone}" started 600 -done diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml deleted file mode 100644 index 74d3be71..00000000 --- a/extraconfig/tasks/major_upgrade_pacemaker.yaml +++ /dev/null @@ -1,175 +0,0 @@ -heat_template_version: ocata -description: 'Upgrade for Pacemaker deployments' - -parameters: - servers: - type: json - input_values: - type: json - description: input values for the software deployments - - UpgradeLevelNovaCompute: - type: string - description: Nova Compute upgrade level - default: '' - MySqlMajorUpgrade: - type: string - description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade - constraints: - - allowed_values: ['auto', 'yes', 'no'] - default: 'auto' - KeepSaharaServicesOnUpgrade: - type: boolean - default: true - description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton - - -resources: - # TODO(jistr): for Mitaka->Newton upgrades and further we can use - # map_merge with input_values instead of feeding params into scripts - # via str_replace on bash snippets - - ControllerPacemakerUpgradeConfig_Step1: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' - params: - UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - - str_replace: - template: | - #!/bin/bash - mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE' - params: - MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_check.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_1.sh - - ControllerPacemakerUpgradeDeployment_Step1: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step1} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step2: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' - params: - UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - - str_replace: - template: | - #!/bin/bash - mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE' - params: - MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_check.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_2.sh - - ControllerPacemakerUpgradeDeployment_Step2: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step1 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step2} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step3: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_3.sh - - ControllerPacemakerUpgradeDeployment_Step3: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step2 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step3} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step4: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_4.sh - - ControllerPacemakerUpgradeDeployment_Step4: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step3 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step4} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step5: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_5.sh - - ControllerPacemakerUpgradeDeployment_Step5: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step4 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step5} - input_values: {get_param: input_values} - - ControllerPacemakerUpgradeConfig_Step6: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE' - params: - KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_pacemaker_migrations.sh - - get_file: major_upgrade_controller_pacemaker_6.sh - - ControllerPacemakerUpgradeDeployment_Step6: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step5 - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step6} - input_values: {get_param: input_values} diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh deleted file mode 100644 index ae22a1e7..00000000 --- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh +++ /dev/null @@ -1,200 +0,0 @@ -#!/bin/bash - -# Special pieces of upgrade migration logic go into this -# file. E.g. Pacemaker cluster transitions for existing deployments, -# matching changes to overcloud_controller_pacemaker.pp (Puppet -# handles deployment, this file handles migrations). -# -# This file shouldn't execute any action on its own, all logic should -# be wrapped into bash functions. Upgrade scripts will source this -# file and call the functions defined in this file where appropriate. -# -# The migration functions should be idempotent. If the migration has -# been already applied, it should be possible to call the function -# again without damaging the deployment or failing the upgrade. - -# If the major version of mysql is going to change after the major -# upgrade, the database must be upgraded on disk to avoid failures -# due to internal incompatibilities between major mysql versions -# https://bugs.launchpad.net/tripleo/+bug/1587449 -# This function detects whether a database upgrade is required -# after a mysql package upgrade. It returns 0 when no major upgrade -# has to take place, 1 otherwise. -function is_mysql_upgrade_needed { - # The name of the package which provides mysql might differ - # after the upgrade. Consider the generic package name, which - # should capture the major version change (e.g. 5.5 -> 10.1) - local name="mariadb" - local output - local ret - set +e - output=$(yum -q check-update $name) - ret=$? - set -e - if [ $ret -ne 100 ]; then - # no updates so we exit - echo "0" - return - fi - - local currentepoch=$(rpm -q --qf "%{epoch}" $name) - local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2) - local currentrelease=$(rpm -q --qf "%{release}" $name) - local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name) - local newepoch=$(echo "$newoutput" | awk '{ print $1 }') - local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2) - local newrelease=$(echo "$newoutput" | awk '{ print $3 }') - - # With this we trigger the dump restore/path if we change either epoch or - # version in the package If only the release tag changes we do not do it - # FIXME: we could refine this by trying to parse the mariadb version - # into X.Y.Z and trigger the update only if X and/or Y change. - output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc") - if [ "$output" != "-1" ]; then - echo "0" - return - fi - echo "1" -} - -# This function returns the list of services to be migrated away from pacemaker -# and to systemd. The reason to have these services in a separate function is because -# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2} -# and in the function to migrate the cluster from full HA to HA NG -function services_to_migrate { - # The following PCMK resources the ones the we are going to delete - PCMK_RESOURCE_TODELETE=" - httpd-clone - memcached-clone - mongod-clone - neutron-dhcp-agent-clone - neutron-l3-agent-clone - neutron-metadata-agent-clone - neutron-netns-cleanup-clone - neutron-openvswitch-agent-clone - neutron-ovs-cleanup-clone - neutron-server-clone - openstack-aodh-evaluator-clone - openstack-aodh-listener-clone - openstack-aodh-notifier-clone - openstack-ceilometer-central-clone - openstack-ceilometer-collector-clone - openstack-ceilometer-notification-clone - openstack-cinder-api-clone - openstack-cinder-scheduler-clone - openstack-glance-api-clone - openstack-gnocchi-metricd-clone - openstack-gnocchi-statsd-clone - openstack-heat-api-cfn-clone - openstack-heat-api-clone - openstack-heat-api-cloudwatch-clone - openstack-heat-engine-clone - openstack-nova-api-clone - openstack-nova-conductor-clone - openstack-nova-consoleauth-clone - openstack-nova-novncproxy-clone - openstack-nova-scheduler-clone - openstack-sahara-api-clone - openstack-sahara-engine-clone - " - echo $PCMK_RESOURCE_TODELETE -} - -# This function will migrate a mitaka system where all the resources are managed -# via pacemaker to a newton setup where only a few services will be managed by pacemaker -# On a high-level it will operate as follows: -# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place -# during the conversion -# 2. Remove all the colocation constraints and then the ordering constraints, except the -# ones related to haproxy/VIPs which exist in Newton as well -# 3. Take the cluster out of maintenance-mode -# 4. Remove all the resources that won't be managed by pacemaker in newton. The -# outcome will be -# that they are stopped and removed from pacemakers control -# 5. Do a resource cleanup to make sure the cluster is in a clean state -function migrate_full_to_ng_ha { - if [[ -n $(pcmk_running) ]]; then - pcs property set maintenance-mode=true - - # First we go through all the colocation constraints (except the ones - # we want to keep, i.e. the haproxy/ip ones) and we remove those - COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\)) - for constraint in $COL_CONSTRAINTS; do - log_debug "Deleting colocation constraint $constraint from CIB" - pcs constraint remove "$constraint" - done - - # Now we kill all the ordering constraints (except the haproxy/ip ones) - ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\)) - for constraint in $ORD_CONSTRAINTS; do - log_debug "Deleting ordering constraint $constraint from CIB" - pcs constraint remove "$constraint" - done - # At this stage all the pacemaker resources are removed from the CIB. - # Once we remove the maintenance-mode those systemd resources will keep - # on running. They shall be systemd enabled via the puppet converge - # step later on - pcs property set maintenance-mode=false - - # At this stage there are no constraints whatsoever except the haproxy/ip ones - # which we want to keep. We now disable and then delete each resource - # that will move to systemd. - # We want the systemd resources be stopped before doing "yum update", - # that way "systemctl try-restart <service>" is no-op because the - # service was down already - PCS_STATUS_OUTPUT="$(pcs status)" - for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do - if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then - log_debug "Deleting $resource from the CIB" - if ! pcs resource disable "$resource" --wait=600; then - echo_error "ERROR: resource $resource failed to be disabled" - exit 1 - fi - pcs resource delete --force "$resource" - else - log_debug "Service $resource not found as a pacemaker resource, not trying to delete." - fi - done - - # We need to do a pcs resource cleanup here + crm_resource --wait to - # make sure the cluster is in a clean state before we stop everything, - # upgrade and restart everything - pcs resource cleanup - # We are making sure here that the cluster is stable before proceeding - if ! timeout -k 10 600 crm_resource --wait; then - echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting." - exit 1 - fi - fi -} - -function disable_standalone_ceilometer_api { - if [[ -n $(is_bootstrap_node) ]]; then - if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then - # Disable pacemaker resources for ceilometer-api - manage_pacemaker_service disable openstack-ceilometer-api - check_resource_pacemaker openstack-ceilometer-api stopped 600 - pcs resource delete openstack-ceilometer-api --wait=600 - fi - fi -} - - -# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton -# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}" -# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}" -# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances) -# Note that changing an attribute like this makes the rabbitmq resource restart -function rabbitmq_newton_ocata_upgrade { - if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then - # Number of controller is obtained by counting how many hostnames we - # have in controller_node_names hiera key - nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1)) - nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2))) - if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then - echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues" - exit 1 - fi - pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600 - fi -} diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml deleted file mode 100644 index 45933fb7..00000000 --- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml +++ /dev/null @@ -1,25 +0,0 @@ -heat_template_version: ocata - -description: > - Software-config for performing aodh data migration - -parameters: - servers: - type: json - input_values: - type: json - description: input values for the software deployments -resources: - - AodhMysqlMigrationScriptConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: aodh_data_migration.sh} - - AodhMysqlMigrationScriptDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: AodhMysqlMigrationScriptConfig} - input_values: {get_param: input_values} diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp deleted file mode 100644 index a8d43663..00000000 --- a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This puppet manifest is to be used only during a Mitaka->Newton upgrade -# It configures ceilometer to be run under httpd but it makes sure to not -# restart any services. This snippet needs to be called before init as a -# pre upgrade migration. - -Service <| - tag == 'ceilometer-service' -|> { - hasrestart => true, - restart => '/bin/true', - start => '/bin/true', - stop => '/bin/true', -} - -if $::hostname == downcase(hiera('bootstrap_nodeid')) { - $pacemaker_master = true - $sync_db = true -} else { - $pacemaker_master = false - $sync_db = false -} - -include ::tripleo::packages - - -if str2bool(hiera('mongodb::server::ipv6', false)) { - $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[') - $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') -} else { - $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017') -} -$mongodb_replset = hiera('mongodb::server::replset') -$mongo_node_string = join($mongo_node_ips_with_port, ',') -$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - -$rabbit_hosts = hiera('rabbitmq_node_ips', undef) -$rabbit_port = hiera('ceilometer::rabbit_port', 5672) -$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}") - -class { '::ceilometer' : - rabbit_hosts => $rabbit_endpoints, -} - -class {'::ceilometer::db': - database_connection => $database_connection, -} - -if $sync_db { - include ::ceilometer::db::sync -} - -include ::ceilometer::config - -class { '::ceilometer::api': - enabled => true, - service_name => 'httpd', - keystone_password => hiera('ceilometer::keystone::auth::password'), - identity_uri => hiera('ceilometer::keystone::authtoken::auth_url'), - auth_uri => hiera('ceilometer::keystone::authtoken::auth_uri'), - keystone_tenant => hiera('ceilometer::keystone::authtoken::project_name'), -} - -class { '::apache' : - service_enable => false, - service_manage => true, - service_restart => '/bin/true', - purge_configs => false, - purge_vhost_dir => false, -} - -# To ensure existing ports are not overridden -class { '::aodh::wsgi::apache': - servername => $::hostname, - ssl => false, -} -class { '::gnocchi::wsgi::apache': - servername => $::hostname, - ssl => false, -} - -class { '::keystone::wsgi::apache': - servername => $::hostname, - ssl => false, -} -class { '::ceilometer::wsgi::apache': - servername => $::hostname, - ssl => false, -} diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh index c2565410..24211ab0 100644 --- a/extraconfig/tasks/tripleo_upgrade_node.sh +++ b/extraconfig/tasks/tripleo_upgrade_node.sh @@ -34,31 +34,34 @@ if [[ -n \$NOVA_COMPUTE ]]; then crudini --set /etc/nova/nova.conf upgrade_levels compute auto fi -$(declare -f special_case_ovs_upgrade_if_needed) -special_case_ovs_upgrade_if_needed - -yum -y install python-zaqarclient # needed for os-collect-config if [[ -n \$SWIFT_STORAGE ]]; then systemctl_swift stop fi + yum -y update + if [[ -n \$SWIFT_STORAGE ]]; then systemctl_swift start fi # Due to bug#1640177 we need to restart compute agent if [[ -n \$NOVA_COMPUTE ]]; then - echo "Restarting openstack ceilometer agent compute" + log_debug "Restarting openstack ceilometer agent compute" systemctl restart openstack-ceilometer-compute fi # Apply puppet manifest to converge just right after the ${ROLE} upgrade $(declare -f run_puppet) for step in 1 2 3 4 5 6; do + log_debug "Running puppet step \$step for ${ROLE}" if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then - echo "Puppet failure at step \${step}" + log_debug "Puppet failure at step \${step}" exit 1 fi + log_debug "Completed puppet step \$step" done + +log_debug "TripleO upgrade run completed." + ENDOFCAT # ensure the permissions are OK diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index 6bf415b2..4c87373e 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -70,9 +70,6 @@ if [[ "$pacemaker_status" == "active" && \ fi fi -# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 -special_case_ovs_upgrade_if_needed - if [[ "$pacemaker_status" == "active" ]] ; then echo "Pacemaker running, stopping cluster node and doing full package update" node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*") diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml index b811a5a3..212e9379 100644 --- a/overcloud-resource-registry-puppet.j2.yaml +++ b/overcloud-resource-registry-puppet.j2.yaml @@ -145,6 +145,7 @@ resource_registry: OS::TripleO::Services::Kernel: puppet/services/kernel.yaml OS::TripleO::Services::MySQL: puppet/services/database/mysql.yaml OS::TripleO::Services::MySQLTLS: OS::Heat::None + OS::TripleO::Services::NeutronBgpvpnApi: OS::Heat::None OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml @@ -169,6 +170,7 @@ resource_registry: OS::TripleO::Services::PacemakerRemote: OS::Heat::None OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml + OS::TripleO::Services::RabbitMQTLS: OS::Heat::None OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml OS::TripleO::Services::HAProxyPublicTLS: OS::Heat::None OS::TripleO::Services::HAProxyInternalTLS: OS::Heat::None @@ -249,6 +251,7 @@ resource_registry: OS::TripleO::Services::OctaviaWorker: OS::Heat::None OS::TripleO::Services::MySQLClient: puppet/services/database/mysql-client.yaml OS::TripleO::Services::Vpp: OS::Heat::None + OS::TripleO::Services::Docker: OS::Heat::None parameter_defaults: EnablePackageInstall: false diff --git a/plan-environment.yaml b/plan-environment.yaml new file mode 100644 index 00000000..f629eff3 --- /dev/null +++ b/plan-environment.yaml @@ -0,0 +1,5 @@ +version: 1.0
+
+template: overcloud.yaml
+environments:
+- path: overcloud-resource-registry-puppet.yaml
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml index a5218dbe..51f9abac 100644 --- a/puppet/blockstorage-role.yaml +++ b/puppet/blockstorage-role.yaml @@ -448,6 +448,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: name: UpdateDeployment config: {get_resource: UpdateConfig} diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml index 0867e17f..d7d7f478 100644 --- a/puppet/cephstorage-role.yaml +++ b/puppet/cephstorage-role.yaml @@ -460,6 +460,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: config: {get_resource: UpdateConfig} server: {get_resource: CephStorage} diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml index 1a0294af..ebdd762d 100644 --- a/puppet/compute-role.yaml +++ b/puppet/compute-role.yaml @@ -483,6 +483,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: name: UpdateDeployment config: {get_resource: UpdateConfig} diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml index 825006ba..2f4f583c 100644 --- a/puppet/controller-role.yaml +++ b/puppet/controller-role.yaml @@ -523,6 +523,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: name: UpdateDeployment config: {get_resource: UpdateConfig} diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml index 3daf3fd3..b6d1239a 100644 --- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml +++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml @@ -53,41 +53,40 @@ resources: NetworkMidoNetConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - midonet_data: - mapped_data: - enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController} - enable_cassandra_on_controller: {get_param: EnableCassandraOnController} - midonet_tunnelzone_name: {get_param: TunnelZoneName} - midonet_tunnelzone_type: {get_param: TunnelZoneType} - midonet_libvirt_qemu_data: | - user = "root" - group = "root" - cgroup_device_acl = [ - "/dev/null", "/dev/full", "/dev/zero", - "/dev/random", "/dev/urandom", - "/dev/ptmx", "/dev/kvm", "/dev/kqemu", - "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", - "/dev/net/tun" - ] - tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort} - tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} - tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} - tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} - tripleo::haproxy::midonet_api: true - # Missed Neutron Puppet data - neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' - neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' - neutron::plugins::midonet::midonet_api_port: 8081 - neutron::params::midonet_server_package: 'python-networking-midonet' + datafiles: + midonet_data: + mapped_data: + enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController} + enable_cassandra_on_controller: {get_param: EnableCassandraOnController} + midonet_tunnelzone_name: {get_param: TunnelZoneName} + midonet_tunnelzone_type: {get_param: TunnelZoneType} + midonet_libvirt_qemu_data: | + user = "root" + group = "root" + cgroup_device_acl = [ + "/dev/null", "/dev/full", "/dev/zero", + "/dev/random", "/dev/urandom", + "/dev/ptmx", "/dev/kvm", "/dev/kqemu", + "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", + "/dev/net/tun" + ] + tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort} + tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} + tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} + tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} + tripleo::haproxy::midonet_api: true + # Missed Neutron Puppet data + neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' + neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' + neutron::plugins::midonet::midonet_api_port: 8081 + neutron::params::midonet_server_package: 'python-networking-midonet' - # Make sure the l3 agent does not run - l3_agent_service: false - neutron::agents::l3::manage_service: false - neutron::agents::l3::enabled: false + # Make sure the l3 agent does not run + l3_agent_service: false + neutron::agents::l3::manage_service: false + neutron::agents::l3::enabled: false NetworkMidonetDeploymentControllers: diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml index 9b900bc4..b05fa636 100644 --- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml +++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml @@ -101,31 +101,30 @@ resources: NetworkCiscoConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - neutron_cisco_data: - mapped_data: - neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip} - neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username} - neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password} - neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list} - neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs} - neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig} - neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork} - neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix} - neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin} - neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix} - neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig} - neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime} - neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount} - neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate} - neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk} - neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig} - neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks} - neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges} - neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges} + datafiles: + neutron_cisco_data: + mapped_data: + neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip} + neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username} + neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password} + neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list} + neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs} + neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig} + neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork} + neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix} + neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin} + neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix} + neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig} + neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime} + neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount} + neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate} + neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk} + neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig} + neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks} + neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges} + neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges} NetworkCiscoDeployment: type: OS::Heat::StructuredDeployments diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml index 7fe2a842..533c0ee9 100644 --- a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml @@ -20,14 +20,13 @@ resources: NeutronBigswitchConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - neutron_bigswitch_data: - mapped_data: - neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} - neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} + datafiles: + neutron_bigswitch_data: + mapped_data: + neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} + neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} NeutronBigswitchDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml index 47c782c7..1d16e909 100644 --- a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml +++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml @@ -50,22 +50,21 @@ resources: NovaNuageConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - nova_nuage_data: - mapped_data: - nuage::vrs::active_controller: {get_input: ActiveController} - nuage::vrs::standby_controller: {get_input: StandbyController} - nuage::metadataagent::metadata_port: {get_input: MetadataPort} - nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort} - nuage::metadataagent::metadata_secret: {get_input: SharedSecret} - nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion} - nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername} - nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs} - nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType} - nuage::metadataagent::nova_region_name: {get_input: NovaRegionName} + datafiles: + nova_nuage_data: + mapped_data: + nuage::vrs::active_controller: {get_input: ActiveController} + nuage::vrs::standby_controller: {get_input: StandbyController} + nuage::metadataagent::metadata_port: {get_input: MetadataPort} + nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort} + nuage::metadataagent::metadata_secret: {get_input: SharedSecret} + nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion} + nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername} + nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs} + nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType} + nuage::metadataagent::nova_region_name: {get_input: NovaRegionName} NovaNuageDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml index 763ae39a..378f7f98 100644 --- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml +++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml @@ -91,35 +91,34 @@ resources: CinderNetappConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - cinder_netapp_data: - mapped_data: - tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend} - cinder::backend::netapp::title: {get_input: NetappBackendName} - cinder::backend::netapp::netapp_login: {get_input: NetappLogin} - cinder::backend::netapp::netapp_password: {get_input: NetappPassword} - cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname} - cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort} - cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier} - cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily} - cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol} - cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType} - cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler} - cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList} - cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver} - cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName} - cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares} - cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig} - cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions} - cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath} - cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps} - cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword} - cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools} - cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType} - cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath} + datafiles: + cinder_netapp_data: + mapped_data: + tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend} + cinder::backend::netapp::title: {get_input: NetappBackendName} + cinder::backend::netapp::netapp_login: {get_input: NetappLogin} + cinder::backend::netapp::netapp_password: {get_input: NetappPassword} + cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname} + cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort} + cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier} + cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily} + cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol} + cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType} + cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler} + cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList} + cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver} + cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName} + cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares} + cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig} + cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions} + cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath} + cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps} + cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword} + cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools} + cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType} + cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath} CinderNetappDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml index 0f4806db..1456337f 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml @@ -38,19 +38,18 @@ resources: NeutronBigswitchConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - neutron_bigswitch_data: - mapped_data: - neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} - neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} - neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} - neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval} - neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id} - neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl} - neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory} + datafiles: + neutron_bigswitch_data: + mapped_data: + neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} + neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} + neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} + neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval} + neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id} + neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl} + neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory} NeutronBigswitchDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml index 6eae812f..bca6010a 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml @@ -96,48 +96,47 @@ resources: CiscoN1kvConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - cisco_n1kv_data: - mapped_data: - #enable_cisco_n1kv: {get_input: EnableCiscoN1kv} - # VEM Parameters - n1kv_vem_source: {get_input: n1kv_vem_source} - n1kv_vem_version: {get_input: n1kv_vem_version} - neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} - neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id} - neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6} - neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf} - neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile} - neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config} - neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb} - neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet} - neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood} - #VSM Parameter - n1kv_vsm_source: {get_input: n1kv_vsm_source} - n1kv_vsm_version: {get_input: n1kv_vsm_version} - n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf} - n1k_vsm::vsm_role: {get_input: n1kv_vsm_role} - n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl} - n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br} - n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password} - n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id} - n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip} - n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask} - n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip} - n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip} - n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan} - # Cisco N1KV driver Parameters - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username} - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password} - neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration} - neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size} - neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout} - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval} - neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries} + datafiles: + cisco_n1kv_data: + mapped_data: + #enable_cisco_n1kv: {get_input: EnableCiscoN1kv} + # VEM Parameters + n1kv_vem_source: {get_input: n1kv_vem_source} + n1kv_vem_version: {get_input: n1kv_vem_version} + neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} + neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id} + neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6} + neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf} + neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile} + neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config} + neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb} + neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet} + neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood} + #VSM Parameter + n1kv_vsm_source: {get_input: n1kv_vsm_source} + n1kv_vsm_version: {get_input: n1kv_vsm_version} + n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf} + n1k_vsm::vsm_role: {get_input: n1kv_vsm_role} + n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl} + n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br} + n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password} + n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id} + n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip} + n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask} + n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip} + n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip} + n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan} + # Cisco N1KV driver Parameters + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password} + neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration} + neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size} + neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval} + neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries} CiscoN1kvDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml index 172484dc..6ee06d78 100644 --- a/puppet/objectstorage-role.yaml +++ b/puppet/objectstorage-role.yaml @@ -447,6 +447,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: config: {get_resource: UpdateConfig} server: {get_resource: SwiftStorage} diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml index 2e1bd6f1..1f68f41f 100644 --- a/puppet/role.role.j2.yaml +++ b/puppet/role.role.j2.yaml @@ -481,6 +481,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: config: {get_resource: UpdateConfig} server: {get_resource: {{role}}} diff --git a/puppet/services/aodh-base.yaml b/puppet/services/aodh-base.yaml index c2c2d023..48a2aecd 100644 --- a/puppet/services/aodh-base.yaml +++ b/puppet/services/aodh-base.yaml @@ -77,8 +77,10 @@ outputs: aodh::rabbit_use_ssl: {get_param: RabbitClientUseSSL} aodh::rabbit_port: {get_param: RabbitClientPort} aodh::keystone::authtoken::project_name: 'service' + aodh::keystone::authtoken::user_domain_name: 'Default' + aodh::keystone::authtoken::project_domain_name: 'Default' aodh::keystone::authtoken::password: {get_param: AodhPassword} - aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } aodh::auth::auth_password: {get_param: AodhPassword} aodh::auth::auth_region: 'regionOne' diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml index cba92415..d8787c87 100644 --- a/puppet/services/barbican-api.yaml +++ b/puppet/services/barbican-api.yaml @@ -74,7 +74,7 @@ outputs: map_merge: - get_attr: [ApacheServiceBase, role_data, config_settings] - barbican::keystone::authtoken::password: {get_param: BarbicanPassword} - barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} barbican::keystone::authtoken::project_name: 'service' barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]} @@ -135,14 +135,14 @@ outputs: nova::compute::barbican_endpoint: get_param: [EndpointMap, BarbicanInternal, uri] nova::compute::barbican_auth_endpoint: - get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix] + get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] cinder_api: cinder::api::keymgr_api_class: > castellan.key_manager.barbican_key_manager.BarbicanKeyManager cinder::api::keymgr_encryption_api_url: get_param: [EndpointMap, BarbicanInternal, uri] cinder::api::keymgr_encryption_auth_url: - get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix] + get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] metadata_settings: get_attr: [ApacheServiceBase, role_data, metadata_settings] upgrade_tasks: diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml index 874c6893..a9c84289 100644 --- a/puppet/services/ceilometer-base.yaml +++ b/puppet/services/ceilometer-base.yaml @@ -98,14 +98,18 @@ outputs: # we include db_sync class in puppet-tripleo ceilometer::db::sync_db: false ceilometer::keystone::authtoken::project_name: 'service' + ceilometer::keystone::authtoken::user_domain_name: 'Default' + ceilometer::keystone::authtoken::project_domain_name: 'Default' ceilometer::keystone::authtoken::password: {get_param: CeilometerPassword} - ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword} ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers} ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion} ceilometer::agent::auth::auth_tenant_name: 'service' + ceilometer::agent::auth::auth_user_domain_name: 'Default' + ceilometer::agent::auth::auth_project_domain_name: 'Default' ceilometer::agent::auth::auth_endpoint_type: 'internalURL' ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher} ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher} diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml index c0ea7aaa..958b0e7d 100644 --- a/puppet/services/cinder-api.yaml +++ b/puppet/services/cinder-api.yaml @@ -80,10 +80,12 @@ outputs: map_merge: - get_attr: [CinderBase, role_data, config_settings] - get_attr: [ApacheServiceBase, role_data, config_settings] - - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} cinder::keystone::authtoken::password: {get_param: CinderPassword} cinder::keystone::authtoken::project_name: 'service' + cinder::keystone::authtoken::user_domain_name: 'Default' + cinder::keystone::authtoken::project_domain_name: 'Default' cinder::api::enable_proxy_headers_parsing: true cinder::api::nova_catalog_info: 'compute:nova:internalURL' @@ -161,13 +163,13 @@ outputs: tags: step0,validation - name: check for cinder running under apache (post upgrade) tags: step1 - shell: "apachectl -t -D DUMP_VHOSTS | grep -q cinder" + shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder" register: cinder_apache ignore_errors: true - name: Stop cinder_api service (running under httpd) tags: step1 service: name=httpd state=stopped - when: "cinder_apache.rc == 0" + when: cinder_apache.rc == 0 - name: Stop and disable cinder_api service (pre-upgrade not under httpd) tags: step1 when: cinder_api_enabled.rc == 0 diff --git a/puppet/services/cinder-backend-scaleio.yaml b/puppet/services/cinder-backend-scaleio.yaml index eb709cd5..c4e4aa3d 100644 --- a/puppet/services/cinder-backend-scaleio.yaml +++ b/puppet/services/cinder-backend-scaleio.yaml @@ -106,6 +106,6 @@ outputs: cinder::backend::scaleio::sio_round_volume_capacity: {get_param: CinderScaleIORoundVolumeCapacity} cinder::backend::scaleio::sio_unmap_volume_before_deletion: {get_param: CinderScaleIOUnmapVolumeBeforeDeletion} cinder::backend::scaleio::sio_max_over_subscription_ratio: {get_param: CinderScaleIOMaxOverSubscriptionRatio} - cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOThinProvision} + cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOSanThinProvision} step_config: | include ::tripleo::profile::base::cinder::volume diff --git a/puppet/services/congress.yaml b/puppet/services/congress.yaml index 8bc9f2e3..fd1ee24b 100644 --- a/puppet/services/congress.yaml +++ b/puppet/services/congress.yaml @@ -74,8 +74,10 @@ outputs: congress::server::bind_host: {get_param: [ServiceNetMap, CongressApiNetwork]} congress::keystone::authtoken::project_name: 'service' - congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} - congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + congress::keystone::authtoken::user_domain_name: 'Default' + congress::keystone::authtoken::project_domain_name: 'Default' + congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} congress::db::mysql::password: {get_param: CongressPassword} congress::db::mysql::user: congress diff --git a/puppet/services/docker.yaml b/puppet/services/docker.yaml new file mode 100644 index 00000000..e7da2383 --- /dev/null +++ b/puppet/services/docker.yaml @@ -0,0 +1,43 @@ +heat_template_version: ocata + +description: > + Configures docker on the host + +parameters: + DockerNamespace: + description: namespace + default: tripleoupstream + type: string + DockerNamespaceIsRegistry: + type: boolean + default: false + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +outputs: + role_data: + description: Role data for the docker service + value: + service_name: docker + config_settings: + tripleo::profile::base::docker::docker_namespace: {get_param: DockerNamespace} + tripleo::profile::base::docker::insecure_registry: {get_param: DockerNamespaceIsRegistry} + step_config: | + include ::tripleo::profile::base::docker + upgrade_tasks: + - name: Install docker packages on upgrade if missing + tags: step3 + yum: name=docker state=latest + diff --git a/puppet/services/ec2-api.yaml b/puppet/services/ec2-api.yaml index 70821396..10f6d311 100644 --- a/puppet/services/ec2-api.yaml +++ b/puppet/services/ec2-api.yaml @@ -72,13 +72,13 @@ outputs: ec2api::api::ec2api_listen: str_replace: template: - '"%{::fqdn_$NETWORK}"' + "%{hiera('fqdn_$NETWORK')}" params: $NETWORK: {get_param: [ServiceNetMap, Ec2ApiNetwork]} ec2api::metadata::metadata_listen: str_replace: template: - '"%{::fqdn_$NETWORK}"' + "%{hiera('fqdn_$NETWORK')}" params: $NETWORK: {get_param: [ServiceNetMap, Ec2ApiMetadataNetwork]} ec2api::db::database_connection: diff --git a/puppet/services/etcd.yaml b/puppet/services/etcd.yaml index ec87a75a..5db8bec0 100644 --- a/puppet/services/etcd.yaml +++ b/puppet/services/etcd.yaml @@ -19,9 +19,9 @@ parameters: via parameter_defaults in the resource registry. type: json EtcdInitialClusterToken: - default: 'etcd-tripleo' description: Initial cluster token for the etcd cluster during bootstrap. type: string + hidden: true MonitoringSubscriptionEtcd: default: 'overcloud-etcd' type: string @@ -36,7 +36,7 @@ outputs: etcd::etcd_name: str_replace: template: - '"%{::fqdn_$NETWORK}"' + "%{hiera('fqdn_$NETWORK')}" params: $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]} # NOTE: bind IP is found in Heat replacing the network name with the local node IP diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index ce389dc1..b06f9993 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -48,6 +48,68 @@ parameters: EnableInternalTLS: type: boolean default: false + CephClientUserName: + default: openstack + type: string + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + GlanceNotifierStrategy: + description: Strategy to use for Glance notification queue + type: string + default: noop + GlanceLogFile: + description: The filepath of the file to use for logging messages from Glance. + type: string + default: '' + GlanceBackend: + default: swift + description: The short name of the Glance backend to use. Should be one + of swift, rbd, or file + type: string + constraints: + - allowed_values: ['swift', 'file', 'rbd'] + GlanceNfsEnabled: + default: false + description: > + When using GlanceBackend 'file', mount NFS share for image storage. + type: boolean + GlanceNfsShare: + default: '' + description: > + NFS share to mount for image storage (when GlanceNfsEnabled is true) + type: string + GlanceNfsOptions: + default: 'intr,context=system_u:object_r:glance_var_lib_t:s0' + description: > + NFS mount options for image storage (when GlanceNfsEnabled is true) + type: string + GlanceRbdPoolName: + default: images + type: string + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint conditions: use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]} @@ -62,13 +124,6 @@ resources: EndpointMap: {get_param: EndpointMap} EnableInternalTLS: {get_param: EnableInternalTLS} - GlanceBase: - type: ./glance-base.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - outputs: role_data: description: Role data for the Glance API role. @@ -80,7 +135,6 @@ outputs: - glance config_settings: map_merge: - - get_attr: [GlanceBase, role_data, config_settings] - get_attr: [TLSProxyBase, role_data, config_settings] - glance::api::database_connection: list_join: @@ -132,10 +186,41 @@ outputs: - use_tls_proxy - 'localhost' - {get_param: [ServiceNetMap, GlanceApiNetwork]} + glance_notifier_strategy: {get_param: GlanceNotifierStrategy} + glance_log_file: {get_param: GlanceLogFile} + glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] } + glance::backend::swift::swift_store_user: service:glance + glance::backend::swift::swift_store_key: {get_param: GlancePassword} + glance::backend::swift::swift_store_create_container_on_put: true + glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} + glance_backend: {get_param: GlanceBackend} + glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName} + glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort} + glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword} + glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + glance::notify::rabbitmq::notification_driver: messagingv2 + tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled} + tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare} + tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions} + service_config_settings: + keystone: + glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]} + glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]} + glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]} + glance::keystone::auth::password: {get_param: GlancePassword } + glance::keystone::auth::region: {get_param: KeystoneRegion} + glance::keystone::auth::tenant: 'service' + mysql: + glance::db::mysql::password: {get_param: GlancePassword} + glance::db::mysql::user: glance + glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + glance::db::mysql::dbname: glance + glance::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" step_config: | include ::tripleo::profile::base::glance::api - service_config_settings: - get_attr: [GlanceBase, role_data, service_config_settings] upgrade_tasks: - name: Check if glance_api is deployed command: systemctl is-enabled openstack-glance-api diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml deleted file mode 100644 index f5548982..00000000 --- a/puppet/services/glance-base.yaml +++ /dev/null @@ -1,126 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Glance Common settings with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - CephClientUserName: - default: openstack - type: string - Debug: - default: '' - description: Set to True to enable debugging on all services. - type: string - GlanceNotifierStrategy: - description: Strategy to use for Glance notification queue - type: string - default: noop - GlanceLogFile: - description: The filepath of the file to use for logging messages from Glance. - type: string - default: '' - GlancePassword: - description: The password for the glance service and db account, used by the glance services. - type: string - hidden: true - GlanceBackend: - default: swift - description: The short name of the Glance backend to use. Should be one - of swift, rbd, or file - type: string - constraints: - - allowed_values: ['swift', 'file', 'rbd'] - GlanceNfsEnabled: - default: false - description: > - When using GlanceBackend 'file', mount NFS share for image storage. - type: boolean - GlanceNfsShare: - default: '' - description: > - NFS share to mount for image storage (when GlanceNfsEnabled is true) - type: string - GlanceNfsOptions: - default: 'intr,context=system_u:object_r:glance_var_lib_t:s0' - description: > - NFS mount options for image storage (when GlanceNfsEnabled is true) - type: string - GlanceRbdPoolName: - default: images - type: string - RabbitPassword: - description: The password for RabbitMQ - type: string - hidden: true - RabbitUserName: - default: guest - description: The username for RabbitMQ - type: string - RabbitClientPort: - default: 5672 - description: Set rabbit subscriber port, change this if using SSL - type: number - RabbitClientUseSSL: - default: false - description: > - Rabbit client subscriber parameter to specify - an SSL connection to the RabbitMQ host. - type: string - KeystoneRegion: - type: string - default: 'regionOne' - description: Keystone region for endpoint - -outputs: - role_data: - description: Role data for the Glance common role. - value: - service_name: glance_base - config_settings: - glance_notifier_strategy: {get_param: GlanceNotifierStrategy} - glance_log_file: {get_param: GlanceLogFile} - glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] } - glance::backend::swift::swift_store_user: service:glance - glance::backend::swift::swift_store_key: {get_param: GlancePassword} - glance::backend::swift::swift_store_create_container_on_put: true - glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} - glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} - glance_backend: {get_param: GlanceBackend} - glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName} - glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort} - glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword} - glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL} - glance::notify::rabbitmq::notification_driver: messagingv2 - tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled} - tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare} - tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions} - service_config_settings: - keystone: - glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]} - glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]} - glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]} - glance::keystone::auth::password: {get_param: GlancePassword } - glance::keystone::auth::region: {get_param: KeystoneRegion} - glance::keystone::auth::tenant: 'service' - mysql: - glance::db::mysql::password: {get_param: GlancePassword} - glance::db::mysql::user: glance - glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} - glance::db::mysql::dbname: glance - glance::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml index 08a939a6..f4629917 100644 --- a/puppet/services/gnocchi-api.yaml +++ b/puppet/services/gnocchi-api.yaml @@ -83,10 +83,12 @@ outputs: gnocchi::api::enabled: true gnocchi::api::enable_proxy_headers_parsing: true gnocchi::api::service_name: 'httpd' - gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword} gnocchi::keystone::authtoken::project_name: 'service' + gnocchi::keystone::authtoken::user_domain_name: 'Default' + gnocchi::keystone::authtoken::project_domain_name: 'Default' gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS} gnocchi::wsgi::apache::servername: str_replace: @@ -103,10 +105,6 @@ outputs: # internal_api_subnet - > IP/CIDR gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]} gnocchi::wsgi::apache::wsgi_process_display_name: 'gnocchi_wsgi' - - gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} - gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} - gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri]} step_config: | include ::tripleo::profile::base::gnocchi::api service_config_settings: diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml index c6310056..d7555561 100644 --- a/puppet/services/gnocchi-base.yaml +++ b/puppet/services/gnocchi-base.yaml @@ -70,8 +70,9 @@ outputs: - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' gnocchi::db::sync::extra_opts: '--skip-storage' gnocchi::storage::swift::swift_user: 'service:gnocchi' - gnocchi::storage::swift::swift_auth_version: 2 + gnocchi::storage::swift::swift_auth_version: 3 gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword} + gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName} gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName} gnocchi::storage::ceph::ceph_keyring: diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml index 483f0a45..c4d44853 100644 --- a/puppet/services/heat-api-cfn.yaml +++ b/puppet/services/heat-api-cfn.yaml @@ -38,8 +38,23 @@ parameters: default: tag: openstack.heat.api.cfn path: /var/log/heat/heat-api-cfn.log + EnableInternalTLS: + type: boolean + default: false + +conditions: + heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]} resources: + + ApacheServiceBase: + type: ./apache.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} + HeatBase: type: ./heat-base.yaml properties: @@ -59,19 +74,32 @@ outputs: config_settings: map_merge: - get_attr: [HeatBase, role_data, config_settings] - - heat::api_cfn::workers: {get_param: HeatWorkers} - tripleo.heat_api_cfn.firewall_rules: + - get_attr: [ApacheServiceBase, role_data, config_settings] + - tripleo.heat_api_cfn.firewall_rules: '125 heat_cfn': dport: - 8000 - 13800 - # NOTE: bind IP is found in Heat replacing the network name with the - # local node IP for the given network; replacement examples - # (eg. for internal_api): + heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]} + heat::wsgi::apache_api_cfn::ssl: {get_param: EnableInternalTLS} + heat::api_cfn::service_name: 'httpd' + # NOTE: bind IP is found in Heat replacing the network name with the local node IP + # for the given network; replacement examples (eg. for internal_api): # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]} + heat::wsgi::apache_api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]} + heat::wsgi::apache_api_cfn::servername: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, HeatApiCfnNetwork]} + - + if: + - heat_workers_zero + - {} + - heat::wsgi::apache_api_cfn::workers: {get_param: HeatWorkers} step_config: | include ::tripleo::profile::base::heat::api_cfn service_config_settings: @@ -94,7 +122,16 @@ outputs: shell: /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b' when: heat_api_cfn_enabled.rc == 0 tags: step0,validation - - name: Stop heat_api_cfn service + - name: check for heat_api_cfn running under apache (post upgrade) tags: step1 - when: heat_api_cfn_enabled.rc == 0 - service: name=openstack-heat-api-cfn state=stopped + shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cfn_wsgi" + register: heat_api_cfn_apache + ignore_errors: true + - name: Stop heat_api_cfn service (running under httpd) + tags: step1 + service: name=httpd state=stopped + when: heat_api_cfn_apache.rc == 0 + - name: Stop and disable heat_api_cfn service (pre-upgrade not under httpd) + tags: step1 + when: heat_api_cfn_apache.rc == 0 + service: name=openstack-heat-api-cfn state=stopped enabled=no diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml index 8879bcb2..7f8fa1fe 100644 --- a/puppet/services/heat-api-cloudwatch.yaml +++ b/puppet/services/heat-api-cloudwatch.yaml @@ -30,8 +30,23 @@ parameters: default: tag: openstack.heat.api.cloudwatch path: /var/log/heat/heat-api-cloudwatch.log + EnableInternalTLS: + type: boolean + default: false + +conditions: + heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]} resources: + + ApacheServiceBase: + type: ./apache.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} + HeatBase: type: ./heat-base.yaml properties: @@ -51,19 +66,34 @@ outputs: config_settings: map_merge: - get_attr: [HeatBase, role_data, config_settings] - - heat::api_cloudwatch::workers: {get_param: HeatWorkers} - tripleo.heat_api_cloudwatch.firewall_rules: + - get_attr: [ApacheServiceBase, role_data, config_settings] + - tripleo.heat_api_cloudwatch.firewall_rules: '125 heat_cloudwatch': dport: - 8003 - 13003 - # NOTE: bind IP is found in Heat replacing the network name with the - # local node IP for the given network; replacement examples - # (eg. for internal_api): + heat::api_cloudwatch::bind_host: + get_param: [ServiceNetMap, HeatApiCloudwatchNetwork] + heat::wsgi::apache_api_cloudwatch::ssl: {get_param: EnableInternalTLS} + heat::api_cloudwatch::service_name: 'httpd' + # NOTE: bind IP is found in Heat replacing the network name with the local node IP + # for the given network; replacement examples (eg. for internal_api): # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - heat::api_cloudwatch::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]} + heat::wsgi::apache_api_cloudwatch::bind_host: + get_param: [ServiceNetMap, HeatApiCloudwatchNetwork] + heat::wsgi::apache_api_cloudwatch::servername: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]} + - + if: + - heat_workers_zero + - {} + - heat::wsgi::apache_api_cloudwatch::workers: {get_param: HeatWorkers} step_config: | include ::tripleo::profile::base::heat::api_cloudwatch upgrade_tasks: @@ -76,7 +106,16 @@ outputs: shell: /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b' when: heat_api_cloudwatch_enabled.rc == 0 tags: step0,validation - - name: Stop heat_api_cloudwatch service + - name: check for heat_api_cloudwatch running under apache (post upgrade) + tags: step1 + shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cloudwatch_wsgi" + register: heat_api_cloudwatch_apache + ignore_errors: true + - name: Stop heat_api_cloudwatch service (running under httpd) + tags: step1 + service: name=httpd state=stopped + when: heat_api_cloudwatch_apache.rc == 0 + - name: Stop and disable heat_api_cloudwatch service (pre-upgrade not under httpd) tags: step1 when: heat_api_cloudwatch_enabled.rc == 0 - service: name=openstack-heat-api-cloudwatch state=stopped + service: name=openstack-heat-api-cloudwatch state=stopped enabled=no diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml index 2464011b..e21369e8 100644 --- a/puppet/services/heat-api.yaml +++ b/puppet/services/heat-api.yaml @@ -38,8 +38,23 @@ parameters: default: tag: openstack.heat.api path: /var/log/heat/heat-api.log + EnableInternalTLS: + type: boolean + default: false + +conditions: + heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]} resources: + + ApacheServiceBase: + type: ./apache.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} + HeatBase: type: ./heat-base.yaml properties: @@ -59,19 +74,32 @@ outputs: config_settings: map_merge: - get_attr: [HeatBase, role_data, config_settings] - - heat::api::workers: {get_param: HeatWorkers} - tripleo.heat_api.firewall_rules: + - get_attr: [ApacheServiceBase, role_data, config_settings] + - tripleo.heat_api.firewall_rules: '125 heat_api': dport: - 8004 - 13004 - # NOTE: bind IP is found in Heat replacing the network name with the - # local node IP for the given network; replacement examples - # (eg. for internal_api): + heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]} + heat::wsgi::apache_api::ssl: {get_param: EnableInternalTLS} + heat::api::service_name: 'httpd' + # NOTE: bind IP is found in Heat replacing the network name with the local node IP + # for the given network; replacement examples (eg. for internal_api): # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]} + heat::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]} + heat::wsgi::apache_api::servername: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, HeatApiNetwork]} + - + if: + - heat_workers_zero + - {} + - heat::wsgi::apache_api::workers: {get_param: HeatWorkers} step_config: | include ::tripleo::profile::base::heat::api service_config_settings: @@ -94,7 +122,16 @@ outputs: shell: /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b' when: heat_api_enabled.rc == 0 tags: step0,validation - - name: Stop heat_api service + - name: check for heat_api running under apache (post upgrade) + tags: step1 + shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_wsgi" + register: heat_api_apache + ignore_errors: true + - name: Stop heat_api service (running under httpd) + tags: step1 + service: name=httpd state=stopped + when: heat_api_apache.rc == 0 + - name: Stop and disable heat_api service (pre-upgrade not under httpd) tags: step1 when: heat_api_enabled.rc == 0 - service: name=openstack-heat-api state=stopped + service: name=openstack-heat-api state=stopped enabled=no diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml index e83a9edd..6ada9c25 100644 --- a/puppet/services/heat-base.yaml +++ b/puppet/services/heat-base.yaml @@ -125,7 +125,9 @@ outputs: value: 'role:admin' heat::rabbit_heartbeat_timeout_threshold: 60 heat::keystone::authtoken::project_name: 'service' - heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + heat::keystone::authtoken::user_domain_name: 'Default' + heat::keystone::authtoken::project_domain_name: 'Default' + heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } heat::keystone::authtoken::password: {get_param: HeatPassword} heat::keystone::domain::domain_name: 'heat_stack' diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml index 60b009a8..7ae518b5 100644 --- a/puppet/services/horizon.yaml +++ b/puppet/services/horizon.yaml @@ -78,7 +78,7 @@ outputs: access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"' options: ['FollowSymLinks','MultiViews'] horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]} - horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]} + horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} horizon::password_validator: {get_param: [HorizonPasswordValidator]} horizon::password_validator_help: {get_param: [HorizonPasswordValidatorHelp]} horizon::secret_key: diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml index 7aab6f8d..e24d0de6 100644 --- a/puppet/services/ironic-api.yaml +++ b/puppet/services/ironic-api.yaml @@ -49,8 +49,10 @@ outputs: - get_attr: [IronicBase, role_data, config_settings] - ironic::api::authtoken::password: {get_param: IronicPassword} ironic::api::authtoken::project_name: 'service' + ironic::api::authtoken::user_domain_name: 'Default' + ironic::api::authtoken::project_domain_name: 'Default' ironic::api::authtoken::username: 'ironic' - ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} # NOTE: bind IP is found in Heat replacing the network name with the # local node IP for the given network; replacement examples diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index f40c8d99..17616867 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -35,7 +35,7 @@ parameters: KeystoneTokenProvider: description: The keystone token format type: string - default: 'uuid' + default: 'fernet' constraints: - allowed_values: ['uuid', 'fernet'] ServiceNetMap: @@ -232,7 +232,7 @@ outputs: keystone::cron::token_flush::maxdelay: 3600 keystone::roles::admin::service_tenant: 'service' keystone::roles::admin::admin_tenant: 'admin' - keystone::cron::token_flush::destination: '/dev/null' + keystone::cron::token_flush::destination: '/var/log/keystone/keystone-tokenflush.log' keystone::config::keystone_config: ec2/driver: value: 'keystone.contrib.ec2.backends.sql.Ec2' diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml index 7b78c82e..4061ca28 100644 --- a/puppet/services/manila-api.yaml +++ b/puppet/services/manila-api.yaml @@ -48,9 +48,11 @@ outputs: map_merge: - get_attr: [ManilaBase, role_data, config_settings] - manila::keystone::authtoken::password: {get_param: ManilaPassword} - manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + manila::keystone::authtoken::auth_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } manila::keystone::authtoken::project_name: 'service' + manila::keystone::authtoken::user_domain_name: 'Default' + manila::keystone::authtoken::project_domain_name: 'Default' tripleo.manila_api.firewall_rules: '150 manila': dport: diff --git a/puppet/services/monitoring/sensu-base.yaml b/puppet/services/monitoring/sensu-base.yaml index a8303a59..2fa1569c 100644 --- a/puppet/services/monitoring/sensu-base.yaml +++ b/puppet/services/monitoring/sensu-base.yaml @@ -29,7 +29,18 @@ parameters: default: false description: > RabbitMQ client subscriber parameter to specify an SSL connection - to the RabbitMQ host. + to the RabbitMQ host. Set MonitoringRabbitUseSSL to true without + specifying a private key or cert chain to use SSL transport, + but not cert auth. + type: string + MonitoringRabbitSSLPrivateKey: + default: '' + description: Private key to be used by Sensu to connect to RabbitMQ host. + type: string + MonitoringRabbitSSLCertChain: + default: '' + description: > + Private SSL cert chain to be used by Sensu to connect to RabbitMQ host. type: string MonitoringRabbitPassword: description: The RabbitMQ password used for monitoring purposes. @@ -71,6 +82,8 @@ outputs: sensu::rabbitmq_password: {get_param: MonitoringRabbitPassword} sensu::rabbitmq_port: {get_param: MonitoringRabbitPort} sensu::rabbitmq_ssl: {get_param: MonitoringRabbitUseSSL} + sensu::rabbitmq_ssl_private_key: {get_param: MonitoringRabbitSSLPrivateKey} + sensu::rabbitmq_ssl_cert_chain: {get_param: MonitoringRabbitSSLCertChain} sensu::rabbitmq_user: {get_param: MonitoringRabbitUserName} sensu::rabbitmq_vhost: {get_param: MonitoringRabbitVhost} sensu::redact: {get_param: SensuRedactVariables} diff --git a/puppet/services/neutron-api.yaml b/puppet/services/neutron-api.yaml index bb191ff0..bb102c08 100644 --- a/puppet/services/neutron-api.yaml +++ b/puppet/services/neutron-api.yaml @@ -128,18 +128,20 @@ outputs: - {get_param: [EndpointMap, MysqlInternal, host]} - '/ovs_neutron' - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' - neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} neutron::server::api_workers: {get_param: NeutronWorkers} neutron::server::rpc_workers: {get_param: NeutronWorkers} neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron::server::enable_proxy_headers_parsing: true neutron::keystone::authtoken::password: {get_param: NeutronPassword} - neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] } + neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneInternal, uri_no_suffix ] } neutron::server::notifications::tenant_name: 'service' neutron::server::notifications::project_name: 'service' neutron::server::notifications::password: {get_param: NovaPassword} neutron::keystone::authtoken::project_name: 'service' + neutron::keystone::authtoken::user_domain_name: 'Default' + neutron::keystone::authtoken::project_domain_name: 'Default' neutron::server::sync_db: true tripleo.neutron_api.firewall_rules: '114 neutron api': diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml index 43657bd9..55361939 100644 --- a/puppet/services/neutron-base.yaml +++ b/puppet/services/neutron-base.yaml @@ -24,7 +24,7 @@ parameters: type: number NeutronDhcpAgentsPerNetwork: type: number - default: 3 + default: 0 description: The number of neutron dhcp agents to schedule per network NeutronCorePlugin: default: 'ml2' @@ -72,24 +72,31 @@ parameters: via parameter_defaults in the resource registry. type: json +conditions: + dhcp_agents_zero: {equals : [{get_param: NeutronDhcpAgentsPerNetwork}, 0]} + outputs: role_data: description: Role data for the Neutron base service. value: service_name: neutron_base config_settings: - neutron::rabbit_password: {get_param: RabbitPassword} - neutron::rabbit_user: {get_param: RabbitUserName} - neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL} - neutron::rabbit_port: {get_param: RabbitClientPort} - neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} - neutron::core_plugin: {get_param: NeutronCorePlugin} - neutron::service_plugins: {get_param: NeutronServicePlugins} - neutron::debug: {get_param: Debug} - neutron::purge_config: {get_param: EnableConfigPurge} - neutron::allow_overlapping_ips: true - neutron::rabbit_heartbeat_timeout_threshold: 60 - neutron::host: '%{::fqdn}' - neutron::db::database_db_max_retries: -1 - neutron::db::database_max_retries: -1 - neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu} + map_merge: + - neutron::rabbit_password: {get_param: RabbitPassword} + neutron::rabbit_user: {get_param: RabbitUserName} + neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + neutron::rabbit_port: {get_param: RabbitClientPort} + neutron::core_plugin: {get_param: NeutronCorePlugin} + neutron::service_plugins: {get_param: NeutronServicePlugins} + neutron::debug: {get_param: Debug} + neutron::purge_config: {get_param: EnableConfigPurge} + neutron::allow_overlapping_ips: true + neutron::rabbit_heartbeat_timeout_threshold: 60 + neutron::host: '%{::fqdn}' + neutron::db::database_db_max_retries: -1 + neutron::db::database_max_retries: -1 + neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu} + - if: + - dhcp_agents_zero + - {} + - tripleo::profile::base::neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} diff --git a/puppet/services/neutron-bgpvpn-api.yaml b/puppet/services/neutron-bgpvpn-api.yaml new file mode 100644 index 00000000..f01cf6f1 --- /dev/null +++ b/puppet/services/neutron-bgpvpn-api.yaml @@ -0,0 +1,34 @@ +heat_template_version: ocata + +description: > + BGPVPN API service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + BgpvpnServiceProvider: + default: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default' + description: Backend to use as a service provider for BGPVPN + type: string + +outputs: + role_data: + description: Role data for the BGPVPN role. + value: + service_name: neutron_bgpvpn_api + config_settings: + neutron::services::bgpvpn::service_providers: {get_param: BgpvpnServiceProvider} + step_config: | + include ::tripleo::profile::base::neutron::bgpvpn diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml index f27b53f2..473c24b4 100644 --- a/puppet/services/nova-api.yaml +++ b/puppet/services/nova-api.yaml @@ -110,8 +110,10 @@ outputs: - 13774 - 8775 nova::keystone::authtoken::project_name: 'service' + nova::keystone::authtoken::user_domain_name: 'Default' + nova::keystone::authtoken::project_domain_name: 'Default' nova::keystone::authtoken::password: {get_param: NovaPassword} - nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} nova::api::enabled: true nova::api::default_floating_pool: {get_param: NovaDefaultFloatingPool} diff --git a/puppet/services/nova-ironic.yaml b/puppet/services/nova-ironic.yaml index 5eb2170a..843f44c5 100644 --- a/puppet/services/nova-ironic.yaml +++ b/puppet/services/nova-ironic.yaml @@ -44,7 +44,7 @@ outputs: nova::compute::vnc_enabled: false nova::ironic::common::password: {get_param: IronicPassword} nova::ironic::common::project_name: 'service' - nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]} + nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} nova::ironic::common::username: 'ironic' nova::ironic::common::api_endpoint: {get_param: [EndpointMap, IronicInternal, uri]} nova::network::neutron::dhcp_domain: '' diff --git a/puppet/services/octavia-base.yaml b/puppet/services/octavia-base.yaml index b537a2bc..a3f616ff 100644 --- a/puppet/services/octavia-base.yaml +++ b/puppet/services/octavia-base.yaml @@ -56,7 +56,7 @@ outputs: octavia::debug: {get_param: Debug} octavia::purge_config: {get_param: EnableConfigPurge} octavia::rabbit_use_ssl: {get_param: RabbitClientUseSSL} - tripleo::profile::base::octavia::rabbit_user: {get_param: RabbitUserName} - tripleo::profile::base::octavia::rabbit_password: {get_param: RabbitPassword} - tripleo::profile::base::octavia::rabbit_port: {get_param: RabbitClientPort} + octavia::rabbit_userid: {get_param: RabbitUserName} + octavia::rabbit_password: {get_param: RabbitPassword} + octavia::rabbit_port: {get_param: RabbitClientPort} diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml index 1e7aa479..e55cd2ee 100644 --- a/puppet/services/opendaylight-api.yaml +++ b/puppet/services/opendaylight-api.yaml @@ -59,7 +59,7 @@ outputs: opendaylight::extra_features: {get_param: OpenDaylightFeatures} opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP} opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]} - opendaylight::nb_connection_protocol: {get_param: OpenDayLightConnectionProtocol} + opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol} tripleo.opendaylight_api.firewall_rules: '137 opendaylight api': dport: @@ -68,3 +68,26 @@ outputs: - 6653 step_config: | include tripleo::profile::base::neutron::opendaylight + upgrade_tasks: + - name: Check if opendaylight is deployed + command: systemctl is-enabled opendaylight + tags: common + ignore_errors: True + register: opendaylight_enabled + - name: "PreUpgrade step0,validation: Check service opendaylight is running" + shell: /usr/bin/systemctl show 'opendaylight' --property ActiveState | grep '\bactive\b' + when: opendaylight_enabled.rc == 0 + tags: step0,validation + - name: Stop opendaylight service + tags: step1 + when: opendaylight_enabled.rc == 0 + service: name=opendaylight state=stopped + - name: Removes ODL snapshots, data, journal directories + file: + state: absent + path: /opt/opendaylight/{{item}} + tags: step2 + with_items: + - snapshots + - data + - journal diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml index cfec3c48..3db0848e 100644 --- a/puppet/services/opendaylight-ovs.yaml +++ b/puppet/services/opendaylight-ovs.yaml @@ -73,3 +73,17 @@ outputs: proto: 'gre' step_config: | include tripleo::profile::base::neutron::plugins::ovs::opendaylight + upgrade_tasks: + - name: Check if openvswitch is deployed + command: systemctl is-enabled openvswitch + tags: common + ignore_errors: True + register: openvswitch_enabled + - name: "PreUpgrade step0,validation: Check service openvswitch is running" + shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b' + when: openvswitch_enabled.rc == 0 + tags: step0,validation + - name: Stop openvswitch service + tags: step1 + when: openvswitch_enabled.rc == 0 + service: name=openvswitch state=stopped diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml index 5be58c18..762d0092 100644 --- a/puppet/services/pacemaker.yaml +++ b/puppet/services/pacemaker.yaml @@ -90,7 +90,7 @@ parameters: PacemakerResources: type: comma_delimited_list description: List of resources managed by pacemaker - default: ['rabbitmq','haproxy'] + default: ['rabbitmq','haproxy','galera'] outputs: role_data: @@ -143,5 +143,7 @@ outputs: pacemaker_cluster: state=online - name: Check pacemaker resource tags: step4 - pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=500 + pacemaker_is_active: + resource: "{{ item }}" + max_wait: 500 with_items: {get_param: PacemakerResources} diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml index b018df35..caada950 100644 --- a/puppet/services/pacemaker/rabbitmq.yaml +++ b/puppet/services/pacemaker/rabbitmq.yaml @@ -68,3 +68,5 @@ outputs: fi pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600 when: is_bootstrap_node and migrate_rabbit_ha_mode + metadata_settings: + get_attr: [RabbitMQServiceBase, role_data, metadata_settings] diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml index 254d7c24..eed98257 100644 --- a/puppet/services/panko-api.yaml +++ b/puppet/services/panko-api.yaml @@ -87,7 +87,7 @@ outputs: upgrade_tasks: - name: Check if httpd is deployed command: systemctl is-enabled httpd - tags: step0,validation + tags: common ignore_errors: True register: httpd_enabled - name: "PreUpgrade step0,validation: Check if httpd is running" @@ -99,3 +99,7 @@ outputs: - name: Stop panko-api service (running under httpd) tags: step1 service: name=httpd state=stopped + when: httpd_enabled.rc == 0 + - name: Install openstack-panko-api package if it was not installed + tags: step3 + yum: name=openstack-panko-api state=latest diff --git a/puppet/services/panko-base.yaml b/puppet/services/panko-base.yaml index 998e64ee..fda13450 100644 --- a/puppet/services/panko-base.yaml +++ b/puppet/services/panko-base.yaml @@ -50,8 +50,10 @@ outputs: panko::debug: {get_param: Debug} panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } panko::keystone::authtoken::project_name: 'service' + panko::keystone::authtoken::user_domain_name: 'Default' + panko::keystone::authtoken::project_domain_name: 'Default' panko::keystone::authtoken::password: {get_param: PankoPassword} - panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } panko::auth::auth_password: {get_param: PankoPassword} panko::auth::auth_region: 'regionOne' diff --git a/puppet/services/rabbitmq-internal-tls-certmonger.yaml b/puppet/services/rabbitmq-internal-tls-certmonger.yaml new file mode 100644 index 00000000..39d6b903 --- /dev/null +++ b/puppet/services/rabbitmq-internal-tls-certmonger.yaml @@ -0,0 +1,47 @@ +heat_template_version: ocata + +description: > + RabbitMQ configurations for using TLS via certmonger. + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + # The following parameters are not needed by the template but are + # required to pass the pep8 tests + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: RabbitMQ configurations for using TLS via certmonger. + value: + service_name: rabbitmq_internal_tls_certmonger + config_settings: + generate_service_certificates: true + tripleo::profile::base::rabbitmq::certificate_specs: + service_certificate: '/etc/pki/tls/certs/rabbitmq.crt' + service_key: '/etc/pki/tls/private/rabbitmq.key' + hostname: + str_replace: + template: "%{hiera('fqdn_NETWORK')}" + params: + NETWORK: {get_param: [ServiceNetMap, RabbitmqNetwork]} + principal: + str_replace: + template: "rabbitmq/%{hiera('fqdn_NETWORK')}" + params: + NETWORK: {get_param: [ServiceNetMap, RabbitmqNetwork]} + metadata_settings: + - service: rabbitmq + network: {get_param: [ServiceNetMap, RabbitmqNetwork]} + type: node diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml index 2c4ccbc9..92a0015a 100644 --- a/puppet/services/rabbitmq.yaml +++ b/puppet/services/rabbitmq.yaml @@ -48,6 +48,18 @@ parameters: MonitoringSubscriptionRabbitmq: default: 'overcloud-rabbitmq' type: string + EnableInternalTLS: + type: boolean + default: false + +resources: + + RabbitMQTLS: + type: OS::TripleO::Services::RabbitMQTLS + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} outputs: role_data: @@ -56,51 +68,62 @@ outputs: service_name: rabbitmq monitoring_subscription: {get_param: MonitoringSubscriptionRabbitmq} config_settings: - rabbitmq::file_limit: {get_param: RabbitFDLimit} - rabbitmq::default_user: {get_param: RabbitUserName} - rabbitmq::default_pass: {get_param: RabbitPassword} - rabbit_ipv6: {get_param: RabbitIPv6} - tripleo.rabbitmq.firewall_rules: - '109 rabbitmq': - dport: - - 4369 - - 5672 - - 25672 - rabbitmq::delete_guest_user: false - rabbitmq::wipe_db_on_cookie_change: true - rabbitmq::port: '5672' - rabbitmq::package_provider: yum - rabbitmq::package_source: undef - rabbitmq::repos_ensure: false - rabbitmq::tcp_keepalive: true - rabbitmq_environment: - NODE_PORT: '' - NODE_IP_ADDRESS: '' - RABBITMQ_NODENAME: "rabbit@%{::hostname}" - RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"' - 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}" - rabbitmq_kernel_variables: - inet_dist_listen_min: '25672' - inet_dist_listen_max: '25672' - rabbitmq_config_variables: - cluster_partition_handling: 'pause_minority' - queue_master_locator: '<<"min-masters">>' - loopback_users: '[]' - rabbitmq::erlang_cookie: - yaql: - expression: $.data.passwords.where($ != '').first() - data: - passwords: - - {get_param: RabbitCookie} - - {get_param: [DefaultPasswords, rabbit_cookie]} - # NOTE: bind IP is found in Heat replacing the network name with the - # local node IP for the given network; replacement examples - # (eg. for internal_api): - # internal_api -> IP - # internal_api_uri -> [IP] - # internal_api_subnet - > IP/CIDR - rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]} - rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues} + map_merge: + - get_attr: [RabbitMQTLS, role_data, config_settings] + - + rabbitmq::file_limit: {get_param: RabbitFDLimit} + rabbitmq::default_user: {get_param: RabbitUserName} + rabbitmq::default_pass: {get_param: RabbitPassword} + rabbit_ipv6: {get_param: RabbitIPv6} + tripleo.rabbitmq.firewall_rules: + '109 rabbitmq': + dport: + - 4369 + - 5672 + - 25672 + rabbitmq::delete_guest_user: false + rabbitmq::wipe_db_on_cookie_change: true + rabbitmq::port: '5672' + rabbitmq::package_provider: yum + rabbitmq::package_source: undef + rabbitmq::repos_ensure: false + rabbitmq::tcp_keepalive: true + rabbitmq_environment: + NODE_PORT: '' + NODE_IP_ADDRESS: '' + RABBITMQ_NODENAME: "rabbit@%{::hostname}" + RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"' + 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}" + rabbitmq_kernel_variables: + inet_dist_listen_min: '25672' + inet_dist_listen_max: '25672' + rabbitmq_config_variables: + cluster_partition_handling: 'pause_minority' + queue_master_locator: '<<"min-masters">>' + loopback_users: '[]' + rabbitmq::erlang_cookie: + yaql: + expression: $.data.passwords.where($ != '').first() + data: + passwords: + - {get_param: RabbitCookie} + - {get_param: [DefaultPasswords, rabbit_cookie]} + # NOTE: bind IP is found in Heat replacing the network name with the + # local node IP for the given network; replacement examples + # (eg. for internal_api): + # internal_api -> IP + # internal_api_uri -> [IP] + # internal_api_subnet - > IP/CIDR + rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]} + rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues} + rabbitmq::ssl: {get_param: EnableInternalTLS} + rabbitmq::ssl_port: '5672' + rabbitmq::ssl_depth: 1 + rabbitmq::ssl_only: {get_param: EnableInternalTLS} + rabbitmq::ssl_interface: {get_param: [ServiceNetMap, RabbitmqNetwork]} + # TODO(jaosorior): Remove this once we set a proper default in + # puppet-tripleo + tripleo::profile::base::rabbitmq::enable_internal_tls: {get_param: EnableInternalTLS} step_config: | include ::tripleo::profile::base::rabbitmq upgrade_tasks: @@ -110,4 +133,5 @@ outputs: - name: Start rabbitmq service tags: step4 service: name=rabbitmq-server state=started - + metadata_settings: + get_attr: [RabbitMQTLS, role_data, metadata_settings] diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml index 224989be..d5131f61 100644 --- a/puppet/services/sahara-base.yaml +++ b/puppet/services/sahara-base.yaml @@ -70,12 +70,14 @@ outputs: sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL} sahara::rabbit_port: {get_param: RabbitClientPort} sahara::debug: {get_param: Debug} + # Remove admin_password when https://review.openstack.org/442619 is merged. sahara::admin_password: {get_param: SaharaPassword} - sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - sahara::identity_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } sahara::use_neutron: true sahara::plugins: {get_param: SaharaPlugins} sahara::rpc_backend: rabbit - sahara::admin_tenant_name: 'service' sahara::db::database_db_max_retries: -1 sahara::db::database_max_retries: -1 + sahara::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + sahara::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + sahara::keystone::authtoken::password: {get_param: SaharaPassword} + sahara::keystone::authtoken::project_name: 'service' diff --git a/puppet/services/sshd.yaml b/puppet/services/sshd.yaml index 41e144a0..12998c33 100644 --- a/puppet/services/sshd.yaml +++ b/puppet/services/sshd.yaml @@ -29,6 +29,6 @@ outputs: value: service_name: sshd config_settings: - BannerText: {get_param: BannerText} + tripleo::profile::base::sshd::bannertext: {get_param: BannerText} step_config: | include ::tripleo::profile::base::sshd diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml index 9b0d2de1..0c3cc1ec 100644 --- a/puppet/services/swift-proxy.yaml +++ b/puppet/services/swift-proxy.yaml @@ -31,9 +31,9 @@ parameters: description: Timeout for requests going from swift-proxy to swift a/c/o services. type: number SwiftWorkers: - default: 0 + default: auto description: Number of workers for Swift service. - type: number + type: string KeystoneRegion: type: string default: 'regionOne' diff --git a/puppet/services/tacker.yaml b/puppet/services/tacker.yaml index 6f92066e..a4c139b5 100644 --- a/puppet/services/tacker.yaml +++ b/puppet/services/tacker.yaml @@ -75,8 +75,10 @@ outputs: tacker::server::bind_host: {get_param: [ServiceNetMap, TackerApiNetwork]} tacker::keystone::authtoken::project_name: 'service' - tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} - tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + tacker::keystone::authtoken::user_domain_name: 'Default' + tacker::keystone::authtoken::project_domain_name: 'Default' + tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} tacker::db::mysql::password: {get_param: TackerPassword} tacker::db::mysql::user: tacker diff --git a/puppet/services/vpp.yaml b/puppet/services/vpp.yaml index 59866d39..7c8f8a28 100644 --- a/puppet/services/vpp.yaml +++ b/puppet/services/vpp.yaml @@ -42,6 +42,16 @@ outputs: step_config: | include ::tripleo::profile::base::vpp upgrade_tasks: + - name: Check if vpp is deployed + command: systemctl is-enabled vpp + tags: common + ignore_errors: True + register: vpp_enabled + - name: "PreUpgrade step0,validation: Check service vpp is running" + shell: /usr/bin/systemctl show 'vpp' --property ActiveState | grep '\bactive\b' + when: vpp_enabled.rc == 0 + tags: step0,validation - name: Stop vpp service - tags: step2 + tags: step1 + when: vpp_enabled.rc == 0 service: name=vpp state=stopped diff --git a/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml index f9afb18d..9343d99e 100644 --- a/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml +++ b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml @@ -64,6 +64,8 @@ features: - Support for Octavia composable services for LBaaS with Neutron. - Support for Collectd composable services for performance monitoring. - Support for Tacker composable service for VNF management. + - Add the plan-environment.yaml file which will facilitate deployment plan + import and export. upgrade: - Update OpenDaylight deployment to use networking-odl v2 as a mechanism driver. diff --git a/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml b/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml new file mode 100644 index 00000000..50b8167e --- /dev/null +++ b/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml @@ -0,0 +1,6 @@ +--- +features: + - Keystone's default token provider is now fernet instead of UUID +upgrade: + - When upgrading, old tokens will not work anymore due to the provider + changing from UUID to fernet. diff --git a/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml b/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml new file mode 100644 index 00000000..2af6aa72 --- /dev/null +++ b/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add support for BGPVPN Neutron service plugin diff --git a/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml b/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml new file mode 100644 index 00000000..b3a62ced --- /dev/null +++ b/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml @@ -0,0 +1,6 @@ +--- +features: + - The relevant parameters have been added to deploy the heat APIs over httpd. + This means that the HeatWorkers now affect httpd instead of the heat API + themselves, and that the apache hieradata will also be deployed in the + nodes where the heat APIs run. diff --git a/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml new file mode 100644 index 00000000..ec22942a --- /dev/null +++ b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + NeutronDhcpAgents had a default value of 3 that, even though unused in + practice was a bad default value. Changing the default value to a + sentinel value and making the hiera conditional allows deploy-time + logic in puppet to provide a default value based on the number of dhcp + agents being deployed. diff --git a/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml new file mode 100644 index 00000000..da995949 --- /dev/null +++ b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml @@ -0,0 +1,6 @@ +--- +security: + - | + Secure EtcdInitialClusterToken by removing the default value + and make the parameter hidden. + Fixes `bug 1673266 <https://bugs.launchpad.net/tripleo/+bug/1673266>`__. diff --git a/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml b/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml new file mode 100644 index 00000000..2f2513c9 --- /dev/null +++ b/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml @@ -0,0 +1,4 @@ +--- +features: + - Deploy Gnocchi with Keystone v3 endpoints and make + sure it doesn't rely on Keystone v2 anymore. diff --git a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml b/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml deleted file mode 100644 index edcc1250..00000000 --- a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - The environments/puppet-pacemaker.yaml file is now deprecated and the HA - deployment is now the default. In order to get the non-HA deployment use - environments/nonha-arch.yaml explicitly. diff --git a/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml b/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml new file mode 100644 index 00000000..c744e0f7 --- /dev/null +++ b/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml @@ -0,0 +1,4 @@ +--- +features: + - Sahara is now deployed with keystone_authtoken parameters and move + forward with Keystone v3 version. diff --git a/roles_data.yaml b/roles_data.yaml index 95b25d98..e0c1c42d 100644 --- a/roles_data.yaml +++ b/roles_data.yaml @@ -52,6 +52,7 @@ - OS::TripleO::Services::HeatEngine - OS::TripleO::Services::MySQL - OS::TripleO::Services::MySQLClient + - OS::TripleO::Services::NeutronBgpvpnApi - OS::TripleO::Services::NeutronDhcpAgent - OS::TripleO::Services::NeutronL3Agent - OS::TripleO::Services::NeutronMetadataAgent @@ -126,6 +127,7 @@ - OS::TripleO::Services::OctaviaHousekeeping - OS::TripleO::Services::OctaviaWorker - OS::TripleO::Services::Vpp + - OS::TripleO::Services::Docker - name: Compute CountDefault: 1 diff --git a/roles_data_undercloud.yaml b/roles_data_undercloud.yaml index 2759429c..9ae8549a 100644 --- a/roles_data_undercloud.yaml +++ b/roles_data_undercloud.yaml @@ -26,6 +26,7 @@ - OS::TripleO::Services::MistralExecutor - OS::TripleO::Services::IronicApi - OS::TripleO::Services::IronicConductor + - OS::TripleO::Services::IronicPxe - OS::TripleO::Services::NovaIronic - OS::TripleO::Services::Zaqar - OS::TripleO::Services::NeutronServer @@ -33,3 +34,10 @@ - OS::TripleO::Services::NeutronCorePlugin - OS::TripleO::Services::NeutronOvsAgent - OS::TripleO::Services::NeutronDhcpAgent + - OS::TripleO::Services::AodhApi + - OS::TripleO::Services::AodhEvaluator + - OS::TripleO::Services::AodhNotifier + - OS::TripleO::Services::AodhListener + - OS::TripleO::Services::GnocchiApi + - OS::TripleO::Services::GnocchiMetricd + - OS::TripleO::Services::GnocchiStatsd diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py index 32987cb2..7c04954a 100755 --- a/tools/yaml-validate.py +++ b/tools/yaml-validate.py @@ -23,6 +23,13 @@ envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml', 'tls-endpoints-public-ip.yaml', 'tls-everywhere-endpoints-dns.yaml'] ENDPOINT_MAP_FILE = 'endpoint_map.yaml' +REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'kolla_config', + 'puppet_config', 'config_settings', 'step_config'] +OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks', + 'service_config_settings', 'host_prep_tasks'] +DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'puppet_tags', 'step_config', + 'config_image'] + def exit_usage(): print('Usage %s <yaml file or directory>' % sys.argv[0]) @@ -69,6 +76,7 @@ def validate_hci_compute_services_default(env_filename, env_tpl): return 1 return 0 + def validate_mysql_connection(settings): no_op = lambda *args: False error_status = [0] @@ -109,6 +117,55 @@ def validate_mysql_connection(settings): return error_status[0] +def validate_docker_service(filename, tpl): + if 'outputs' in tpl and 'role_data' in tpl['outputs']: + if 'value' not in tpl['outputs']['role_data']: + print('ERROR: invalid role_data for filename: %s' + % filename) + return 1 + role_data = tpl['outputs']['role_data']['value'] + + for section_name in REQUIRED_DOCKER_SECTIONS: + if section_name not in role_data: + print('ERROR: %s is required in role_data for %s.' + % (section_name, filename)) + return 1 + + for section_name in role_data.keys(): + if section_name in REQUIRED_DOCKER_SECTIONS: + continue + else: + if section_name in OPTIONAL_DOCKER_SECTIONS: + continue + else: + print('ERROR: %s is extra in role_data for %s.' + % (section_name, filename)) + return 1 + + if 'puppet_config' in role_data: + puppet_config = role_data['puppet_config'] + for key in puppet_config: + if key in DOCKER_PUPPET_CONFIG_SECTIONS: + continue + else: + print('ERROR: %s should not be in puppet_config section.' + % key) + return 1 + for key in DOCKER_PUPPET_CONFIG_SECTIONS: + if key not in puppet_config: + print('ERROR: %s is required in puppet_config for %s.' + % (key, filename)) + return 1 + + if 'parameters' in tpl: + for param in required_params: + if param not in tpl['parameters']: + print('ERROR: parameter %s is required for %s.' + % (param, filename)) + return 1 + return 0 + + def validate_service(filename, tpl): if 'outputs' in tpl and 'role_data' in tpl['outputs']: if 'value' not in tpl['outputs']['role_data']: @@ -158,6 +215,10 @@ def validate(filename): filename != './puppet/services/services.yaml'): retval = validate_service(filename, tpl) + if (filename.startswith('./docker/services/') and + filename != './docker/services/services.yaml'): + retval = validate_docker_service(filename, tpl) + if filename.endswith('hyperconverged-ceph.yaml'): retval = validate_hci_compute_services_default(filename, tpl) diff --git a/validation-scripts/all-nodes.sh b/validation-scripts/all-nodes.sh index 0b8b3523..f1f4cc11 100644 --- a/validation-scripts/all-nodes.sh +++ b/validation-scripts/all-nodes.sh @@ -67,5 +67,23 @@ function ping_default_gateways() { echo "SUCCESS" } +# Verify the FQDN from the nova/ironic deployment matches +# FQDN in the heat templates. +function fqdn_check() { + HOSTNAME=$(hostname) + SHORT_NAME=$(hostname -s) + FQDN_FROM_HOSTS=$(awk '$3 == "'${SHORT_NAME}'"{print $2}' /etc/hosts) + echo -n "Checking hostname vs /etc/hosts entry..." + if [[ $HOSTNAME != $FQDN_FROM_HOSTS ]]; then + echo "FAILURE" + echo -e "System hostname: ${HOSTNAME}\nEntry from /etc/hosts: ${FQDN_FROM_HOSTS}\n" + exit 1 + fi + echo "SUCCESS" +} + ping_controller_ips "$ping_test_ips" ping_default_gateways +if [[ $validate_fqdn == "True" ]];then + fqdn_check +fi |