diff options
171 files changed, 5483 insertions, 781 deletions
diff --git a/capabilities-map.yaml b/capabilities-map.yaml index cc22ff92..66dc1d1d 100644 --- a/capabilities-map.yaml +++ b/capabilities-map.yaml @@ -335,21 +335,11 @@ topics: description: Enables Neutron Nuage backend on the controller requires: - overcloud-resource-registry-puppet.yaml - - file: environments/neutron-opencontrail.yaml - title: OpenContrail Extensions - description: Enables OpenContrail extensions - requires: - - overcloud-resource-registry-puppet.yaml - file: environments/neutron-opendaylight.yaml title: OpenDaylight description: Enables OpenDaylight requires: - overcloud-resource-registry-puppet.yaml - - file: environments/neutron-opendaylight-l3.yaml - title: OpenDaylight with L3 DVR - description: Enables OpenDaylight with L3 DVR - requires: - - overcloud-resource-registry-puppet.yaml - file: environments/neutron-ovs-dpdk.yaml title: DPDK with OVS description: Deploy DPDK with OVS @@ -544,14 +534,6 @@ topics: description: requires: - overcloud-resource-registry-puppet.yaml - - title: Manage Firewall - description: - environments: - - file: environments/manage-firewall.yaml - title: Manage Firewall - description: - requires: - - overcloud-resource-registry-puppet.yaml - title: Operational Tools description: @@ -600,3 +582,8 @@ topics: description: requires: - overcloud-resource-registry-puppet.yaml + - title: Keystone CADF auditing + description: Enable CADF notifications in Keystone for auditing + environments: + - file: environments/cadf.yaml + title: Keystone CADF auditing diff --git a/ci/environments/multinode-3nodes.yaml b/ci/environments/multinode-3nodes.yaml index 03065c6a..d6e2376a 100644 --- a/ci/environments/multinode-3nodes.yaml +++ b/ci/environments/multinode-3nodes.yaml @@ -55,6 +55,7 @@ - OS::TripleO::Services::TripleoFirewall - OS::TripleO::Services::NovaCompute - OS::TripleO::Services::NovaLibvirt + - OS::TripleO::Services::MySQLClient - name: Controller CountDefault: 1 diff --git a/ci/environments/multinode.yaml b/ci/environments/multinode.yaml index 0609dd5f..c946ec8a 100644 --- a/ci/environments/multinode.yaml +++ b/ci/environments/multinode.yaml @@ -1,9 +1,20 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml parameter_defaults: ControllerServices: + - OS::TripleO::Services::CephMon + - OS::TripleO::Services::CephOSD - OS::TripleO::Services::CinderApi - OS::TripleO::Services::CinderScheduler - OS::TripleO::Services::CinderVolume @@ -45,5 +56,9 @@ parameter_defaults: nova::compute::libvirt::libvirt_virt_type: qemu # Required for Centos 7.3 and Qemu 2.6.0 nova::compute::libvirt::libvirt_cpu_mode: 'none' + #NOTE(gfidente): not great but we need this to deploy on ext4 + #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/ + ceph::profile::params::osd_max_object_name_len: 256 + ceph::profile::params::osd_max_object_namespace_len: 64 SwiftCeilometerPipelineEnabled: False Debug: True diff --git a/ci/environments/multinode_major_upgrade.yaml b/ci/environments/multinode_major_upgrade.yaml index 6710fef7..2251cc0c 100644 --- a/ci/environments/multinode_major_upgrade.yaml +++ b/ci/environments/multinode_major_upgrade.yaml @@ -1,6 +1,15 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml parameter_defaults: ControllerServices: @@ -37,6 +46,15 @@ parameter_defaults: - OS::TripleO::Services::Timezone - OS::TripleO::Services::TripleoPackages - OS::TripleO::Services::TripleoFirewall + - OS::TripleO::Services::NovaConductor + - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement + - OS::TripleO::Services::NovaMetadata + - OS::TripleO::Services::NovaScheduler + - OS::TripleO::Services::NovaCompute + - OS::TripleO::Services::NovaLibvirt + - OS::TripleO::Services::Pacemaker + - OS::TripleO::Services::Horizon ControllerExtraConfig: nova::compute::libvirt::services::libvirt_virt_type: qemu nova::compute::libvirt::libvirt_virt_type: qemu diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml index e09ca705..a6f35711 100644 --- a/ci/environments/scenario001-multinode.yaml +++ b/ci/environments/scenario001-multinode.yaml @@ -1,13 +1,24 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml - OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml - OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml - OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml - OS::TripleO::Services::PankoApi: /usr/share/openstack-tripleo-heat-templates/puppet/services/panko-api.yaml - OS::TripleO::Services::Collectd: /usr/share/openstack-tripleo-heat-templates/puppet/services/metrics/collectd.yaml - OS::TripleO::Services::Tacker: /usr/share/openstack-tripleo-heat-templates/puppet/services/tacker.yaml - OS::TripleO::Services::Congress: /usr/share/openstack-tripleo-heat-templates/puppet/services/congress.yaml + OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml + OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml + OS::TripleO::Services::CephClient: ../../puppet/services/ceph-client.yaml + OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml + OS::TripleO::Services::Collectd: ../../puppet/services/metrics/collectd.yaml + OS::TripleO::Services::Tacker: ../../puppet/services/tacker.yaml + OS::TripleO::Services::Congress: ../../puppet/services/congress.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::Redis: ../../puppet/services/pacemaker/database/redis.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml + OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml parameter_defaults: ControllerServices: diff --git a/ci/environments/scenario002-multinode.yaml b/ci/environments/scenario002-multinode.yaml index 3207d133..cbcfa9b3 100644 --- a/ci/environments/scenario002-multinode.yaml +++ b/ci/environments/scenario002-multinode.yaml @@ -4,6 +4,16 @@ resource_registry: OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml + OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml parameter_defaults: ControllerServices: diff --git a/ci/environments/scenario003-multinode.yaml b/ci/environments/scenario003-multinode.yaml index 1dc8b13d..6e926f74 100644 --- a/ci/environments/scenario003-multinode.yaml +++ b/ci/environments/scenario003-multinode.yaml @@ -6,6 +6,14 @@ resource_registry: OS::TripleO::Services::MistralApi: ../../puppet/services/mistral-api.yaml OS::TripleO::Services::MistralEngine: ../../puppet/services/mistral-engine.yaml OS::TripleO::Services::MistralExecutor: ../../puppet/services/mistral-executor.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml parameter_defaults: ControllerServices: diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py index fe87ce7a..157bf63f 100755 --- a/docker/docker-puppet.py +++ b/docker/docker-puppet.py @@ -23,6 +23,7 @@ import os import subprocess import sys import tempfile +import multiprocessing # this is to match what we do in deployed-server @@ -45,6 +46,15 @@ def pull_image(name): def rm_container(name): + if os.environ.get('SHOW_DIFF', None): + print('Diffing container: %s' % name) + subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + cmd_stdout, cmd_stderr = subproc.communicate() + print(cmd_stdout) + print(cmd_stderr) + print('Removing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name], stdout=subprocess.PIPE, @@ -53,6 +63,8 @@ def rm_container(name): print(cmd_stdout) print(cmd_stderr) +process_count = int(os.environ.get('PROCESS_COUNT', + multiprocessing.cpu_count())) config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') print('docker-puppet') @@ -75,12 +87,24 @@ configs = {} for service in (json_data or []): if service is None: continue + if isinstance(service, dict): + service = [ + service.get('config_volume'), + service.get('puppet_tags'), + service.get('step_config'), + service.get('config_image'), + service.get('volumes', []), + ] + config_volume = service[0] or '' puppet_tags = service[1] or '' manifest = service[2] or '' config_image = service[3] or '' volumes = service[4] if len(service) > 4 else [] + if not manifest or not config_image: + continue + print('---------') print('config_volume %s' % config_volume) print('puppet_tags %s' % puppet_tags) @@ -106,34 +130,25 @@ for service in (json_data or []): print('Service compilation completed.\n') -for config_volume in configs: - - service = configs[config_volume] - puppet_tags = service[1] or '' - manifest = service[2] or '' - config_image = service[3] or '' - volumes = service[4] if len(service) > 4 else [] - - if puppet_tags: - puppet_tags = "file,file_line,concat,%s" % puppet_tags - else: - puppet_tags = "file,file_line,concat" +def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)): print('---------') print('config_volume %s' % config_volume) print('puppet_tags %s' % puppet_tags) print('manifest %s' % manifest) print('config_image %s' % config_image) + print('volumes %s' % volumes) hostname = short_hostname() + sh_script = '/var/lib/docker-puppet/docker-puppet-%s.sh' % config_volume - with open('/var/lib/docker-puppet/docker-puppet.sh', 'w') as script_file: + with open(sh_script, 'w') as script_file: os.chmod(script_file.name, 0755) script_file.write("""#!/bin/bash set -ex mkdir -p /etc/puppet cp -a /tmp/puppet-etc/* /etc/puppet rm -Rf /etc/puppet/ssl # not in use and causes permission errors - echo '{"step": 6}' > /etc/puppet/hieradata/docker.json + echo '{"step": %(step)s}' > /etc/puppet/hieradata/docker.json TAGS="" if [ -n "%(puppet_tags)s" ]; then TAGS='--tags "%(puppet_tags)s"' @@ -168,7 +183,8 @@ for config_volume in configs: fi """ % {'puppet_tags': puppet_tags, 'name': config_volume, 'hostname': hostname, - 'no_archive': os.environ.get('NO_ARCHIVE', '')}) + 'no_archive': os.environ.get('NO_ARCHIVE', ''), + 'step': os.environ.get('STEP', '6')}) with tempfile.NamedTemporaryFile() as tmp_man: with open(tmp_man.name, 'w') as man_file: @@ -186,12 +202,12 @@ for config_volume in configs: '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/', - '--volume', '/var/lib/docker-puppet/docker-puppet.sh:/var/lib/docker-puppet/docker-puppet.sh:ro'] + '--volume', '%s:%s:rw' % (sh_script, sh_script) ] for volume in volumes: dcmd.extend(['--volume', volume]) - dcmd.extend(['--entrypoint', '/var/lib/docker-puppet/docker-puppet.sh']) + dcmd.extend(['--entrypoint', sh_script]) env = {} if os.environ.get('NET_HOST', 'false') == 'true': @@ -207,6 +223,34 @@ for config_volume in configs: print(cmd_stderr) if subproc.returncode != 0: print('Failed running docker-puppet.py for %s' % config_volume) - sys.exit(subproc.returncode) - else: - rm_container('docker-puppet-%s' % config_volume) + rm_container('docker-puppet-%s' % config_volume) + return subproc.returncode + +# Holds all the information for each process to consume. +# Instead of starting them all linearly we run them using a process +# pool. This creates a list of arguments for the above function +# to consume. +process_map = [] + +for config_volume in configs: + + service = configs[config_volume] + puppet_tags = service[1] or '' + manifest = service[2] or '' + config_image = service[3] or '' + volumes = service[4] if len(service) > 4 else [] + + if puppet_tags: + puppet_tags = "file,file_line,concat,%s" % puppet_tags + else: + puppet_tags = "file,file_line,concat" + + process_map.append([config_volume, puppet_tags, manifest, config_image, volumes]) + +for p in process_map: + print '--\n%s' % p + +# Fire off processes to perform each configuration. Defaults +# to the number of CPUs on the system. +p = multiprocessing.Pool(process_count) +p.map(mp_puppet_config, process_map) diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2 new file mode 100644 index 00000000..76232d14 --- /dev/null +++ b/docker/docker-steps.j2 @@ -0,0 +1,325 @@ +# certain initialization steps (run in a container) will occur +# on the first role listed in the roles file +{% set primary_role_name = roles[0].name -%} + +heat_template_version: ocata + +description: > + Post-deploy configuration steps via puppet for all roles, + as defined in ../roles_data.yaml + +parameters: + servers: + type: json + description: Mapping of Role name e.g Controller to a list of servers + role_data: + type: json + description: Mapping of Role name e.g Controller to the per-role data + DeployIdentifier: + default: '' + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + # These utility tasks use docker-puppet.py to execute tasks via puppet + # We only execute these on the first node in the primary role + {{primary_role_name}}DockerPuppetTasks: + type: OS::Heat::Value + properties: + type: json + value: + yaql: + expression: + dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1])) + data: + docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]} + +# BEGIN primary_role_name docker-puppet-tasks (run only on a single node) +{% for step in range(1, 6) %} + + {{primary_role_name}}DockerPuppetJsonConfig{{step}}: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json: + {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']} + + {{primary_role_name}}DockerPuppetJsonDeployment{{step}}: + type: OS::Heat::SoftwareDeployment + properties: + server: {get_param: [servers, {{primary_role_name}}, '0']} + config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}} + + {{primary_role_name}}DockerPuppetTasksConfig{{step}}: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: docker-puppet.py} + inputs: + - name: CONFIG + - name: NET_HOST + - name: NO_ARCHIVE + - name: STEP + + {{primary_role_name}}DockerPuppetTasksDeployment{{step}}: + type: OS::Heat::SoftwareDeployment + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step{{step}} + - {{dep.name}}ContainersDeployment_Step{{step}} + {% endfor %} + - {{primary_role_name}}DockerPuppetJsonDeployment{{step}} + properties: + name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}} + server: {get_param: [servers, {{primary_role_name}}, '0']} + config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}} + input_values: + CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json + NET_HOST: 'true' + NO_ARCHIVE: 'true' + STEP: {{step}} + +{% endfor %} +# END primary_role_name docker-puppet-tasks + +{% for role in roles %} + # Post deployment steps for all roles + # A single config is re-applied with an incrementing step number + # {{role.name}} Role steps + {{role.name}}ArtifactsConfig: + type: ../puppet/deploy-artifacts.yaml + + {{role.name}}ArtifactsDeploy: + type: OS::Heat::StructuredDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}ArtifactsConfig} + + {{role.name}}PreConfig: + type: OS::TripleO::Tasks::{{role.name}}PreConfig + properties: + servers: {get_param: [servers, {{role.name}}]} + input_values: + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}CreateConfigDir: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: create-config-dir.sh} + + {{role.name}}CreateConfigDirDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}CreateConfigDir} + + # this creates a JSON config file for our docker-puppet.py script + {{role.name}}GenPuppetConfig: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + /var/lib/docker-puppet/docker-puppet.json: + {get_param: [role_data, {{role.name}}, puppet_config]} + + {{role.name}}GenPuppetDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}GenPuppetConfig} + + {{role.name}}GenerateConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: docker-puppet.py} + + {{role.name}}GenerateConfigDeployment: + type: OS::Heat::SoftwareDeploymentGroup + depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment] + properties: + name: {{role.name}}GenerateConfigDeployment + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}GenerateConfig} + + {{role.name}}PuppetStepConfig: + type: OS::Heat::Value + properties: + type: string + value: + yaql: + expression: + # select 'step_config' only from services that do not have a docker_image + $.data.service_names.zip($.data.step_config, $.data.docker_image).where($[2] = null).where($[1] != null).select($[1]).join("\n") + data: + service_names: {get_param: [role_data, {{role.name}}, service_names]} + step_config: {get_param: [role_data, {{role.name}}, step_config]} + docker_image: {get_param: [role_data, {{role.name}}, docker_image]} + + {{role.name}}DockerConfig: + type: OS::Heat::Value + properties: + type: json + value: + yaql: + expression: + # select 'docker_config' only from services that have a docker_image + $.data.service_names.zip($.data.docker_config, $.data.docker_image).where($[2] != null).select($[1]).reduce($1.mergeWith($2), {}) + data: + service_names: {get_param: [role_data, {{role.name}}, service_names]} + docker_config: {get_param: [role_data, {{role.name}}, docker_config]} + docker_image: {get_param: [role_data, {{role.name}}, docker_image]} + + # Here we are dumping all the docker container startup configuration data + # so that we can have access to how they are started outside of heat + # and docker-cmd. This lets us create command line tools to start and + # test these containers. + {{role.name}}DockerConfigJsonStartupData: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + /var/lib/docker-container-startup-configs.json: + {get_attr: [{{role.name}}DockerConfig, value]} + + {{role.name}}DockerConfigJsonStartupDataDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + config: {get_resource: {{role.name}}DockerConfigJsonStartupData} + servers: {get_param: [servers, {{role.name}}]} + + {{role.name}}KollaJsonConfig: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + {get_param: [role_data, {{role.name}}, kolla_config]} + + {{role.name}}KollaJsonDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + name: {{role.name}}KollaJsonDeployment + config: {get_resource: {{role.name}}KollaJsonConfig} + servers: {get_param: [servers, {{role.name}}]} + + # BEGIN BAREMETAL CONFIG STEPS + + {% if role.name == 'Controller' %} + ControllerPrePuppet: + type: OS::TripleO::Tasks::ControllerPrePuppet + properties: + servers: {get_param: [servers, Controller]} + input_values: + update_identifier: {get_param: DeployIdentifier} + {% endif %} + + {{role.name}}Config: + type: OS::TripleO::{{role.name}}Config + properties: + StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]} + + {% for step in range(1, 6) %} + + {{role.name}}Deployment_Step{{step}}: + type: OS::Heat::StructuredDeploymentGroup + {% if step == 1 %} + depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] + {% else %} + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step{{step -1}} + - {{dep.name}}ContainersDeployment_Step{{step -1}} + {% endfor %} + - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}} + {% endif %} + properties: + name: {{role.name}}Deployment_Step{{step}} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}Config} + input_values: + step: {{step}} + update_identifier: {get_param: DeployIdentifier} + + {% endfor %} + # END BAREMETAL CONFIG STEPS + + # BEGIN CONTAINER CONFIG STEPS + {% for step in range(1, 6) %} + + {{role.name}}ContainersConfig_Step{{step}}: + type: OS::Heat::StructuredConfig + properties: + group: docker-cmd + config: + {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]} + + {{role.name}}ContainersDeployment_Step{{step}}: + type: OS::Heat::StructuredDeploymentGroup + {% if step == 1 %} + depends_on: + - {{role.name}}PreConfig + - {{role.name}}KollaJsonDeployment + - {{role.name}}GenPuppetDeployment + - {{role.name}}GenerateConfigDeployment + {% else %} + depends_on: + {% for dep in roles %} + - {{dep.name}}ContainersDeployment_Step{{step -1}} + - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first + - {{dep.name}}Deployment_Step{{step -1}} + {% endfor %} + - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}} + {% endif %} + properties: + name: {{role.name}}ContainersDeployment_Step{{step}} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}} + + {% endfor %} + # END CONTAINER CONFIG STEPS + + {{role.name}}PostConfig: + type: OS::TripleO::Tasks::{{role.name}}PostConfig + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step5 + - {{primary_role_name}}DockerPuppetTasksDeployment5 + {% endfor %} + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: DeployIdentifier} + + # Note, this should come last, so use depends_on to ensure + # this is created after any other resources. + {{role.name}}ExtraConfigPost: + depends_on: + {% for dep in roles %} + - {{dep.name}}PostConfig + {% endfor %} + type: OS::TripleO::NodeExtraConfigPost + properties: + servers: {get_param: [servers, {{role.name}}]} + + {% if role.name == 'Controller' %} + ControllerPostPuppet: + depends_on: + - ControllerExtraConfigPost + type: OS::TripleO::Tasks::ControllerPostPuppet + properties: + servers: {get_param: [servers, Controller]} + input_values: + update_identifier: {get_param: DeployIdentifier} + {% endif %} + +{% endfor %} diff --git a/docker/docker-toool b/docker/docker-toool new file mode 100755 index 00000000..36aba4a7 --- /dev/null +++ b/docker/docker-toool @@ -0,0 +1,189 @@ +#!/usr/bin/env python +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +import os +import shutil +import sys +import json + +docker_cmd = '/bin/docker' + +# Tool to start docker containers as configured via +# tripleo-heat-templates. +# +# This tool reads data from a json file generated from heat when the +# TripleO stack is run. All the configuration data used to start the +# containerized services is in this file. +# +# By default this tool lists all the containers that are started and +# their start order. +# +# If you wish to see the command line used to start a given container, +# specify it by name using the --container argument. --run can then be +# used with this to actually execute docker to run the container.\n +# +# Other options listed allow you to modify this command line for +# debugging purposes. For example: +# +# docker-toool -c swift-proxy -r -e /bin/bash -u root -i -n test +# +# will run the swift proxy container as user root, executing /bin/bash, +# +# named 'test', and will run interactively (eg -ti). + + +def parse_opts(argv): + parser = argparse.ArgumentParser("Tool to start docker containers via " + "TripleO configurations") + parser.add_argument('-f', '--config', + help="""File to use as docker startup configuration data.""", + default='/var/lib/docker-container-startup-configs.json') + parser.add_argument('-r', '--run', + action='store_true', + help="""Run the container as specified with --container.""", + default=False) + parser.add_argument('-e', '--command', + help="""Override the command used to run the container.""", + default='') + parser.add_argument('-c', '--container', + help="""Specify a container to run or show the command for.""", + default='') + parser.add_argument('-u', '--user', + help="""User to run container as.""", + default='') + parser.add_argument('-n', '--name', + help="""Name of container.""", + default='') + parser.add_argument('-i', '--interactive', + action='store_true', + help="""Start docker container interactively (-ti).""", + default=False) + opts = parser.parse_args(argv[1:]) + + return opts + +def docker_arg_map(key, value): + value = str(value).encode('ascii', 'ignore') + return { + 'environment': "--env=%s" % value, + # 'image': value, + 'net': "--net=%s" % value, + 'pid': "--pid=%s" % value, + 'privileged': "--privileged=%s" % value.lower(), + #'restart': "--restart=%s" % "false", + 'user': "--user=%s" % value, + 'volumes': "--volume=%s" % value, + 'volumes_from': "--volumes-from=%s" % value, + }.get(key, None) + +def run_docker_container(opts, container_name): + container_found = False + + with open(opts.config) as f: + json_data = json.load(f) + + for step in (json_data or []): + if step is None: + continue + for container in (json_data[step] or []): + if container == container_name: + print('container found: %s' % container) + container_found = True + # A few positional arguments: + command = '' + image = '' + + cmd = [ + docker_cmd, + 'run', + '--name', + opts.name or container + ] + for container_data in (json_data[step][container] or []): + if container_data == "environment": + for env in (json_data[step][container][container_data] or []): + arg = docker_arg_map("environment", env) + if arg: + cmd.append(arg) + elif container_data == "volumes": + for volume in (json_data[step][container][container_data] or []): + arg = docker_arg_map("volumes", volume) + if arg: + cmd.append(arg) + elif container_data == "volumes_from": + for volume in (json_data[step][container][container_data] or []): + arg = docker_arg_map("volumes_from", volume) + if arg: + cmd.append(arg) + elif container_data == 'command': + command = json_data[step][container][container_data] + elif container_data == 'image': + image = json_data[step][container][container_data] + else: + # Only add a restart if we're not interactive + if container_data == 'restart': + if opts.interactive: + continue + if container_data == 'user': + if opts.user: + continue + arg = docker_arg_map(container_data, + json_data[step][container][container_data]) + if arg: + cmd.append(arg) + + if opts.user: + cmd.append('--user') + cmd.append(opts.user) + if opts.interactive: + cmd.append('-ti') + # May as well remove it when we're done too + cmd.append('--rm') + cmd.append(image) + if opts.command: + cmd.append(opts.command) + elif command: + cmd.extend(command) + + print ' '.join(cmd) + + if opts.run: + os.execl(docker_cmd, *cmd) + + if not container_found: + print("Container '%s' not found!" % container_name) + +def list_docker_containers(opts): + print opts + with open(opts.config) as f: + json_data = json.load(f) + + for step in (json_data or []): + if step is None: + continue + print step + for container in (json_data[step] or []): + print('\tcontainer: %s' % container) + for container_data in (json_data[step][container] or []): + #print('\t\tcontainer_data: %s' % container_data) + if container_data == "start_order": + print('\t\tstart_order: %s' % json_data[step][container][container_data]) + +opts = parse_opts(sys.argv) + +if opts.container: + run_docker_container(opts, opts.container) +else: + list_docker_containers(opts) + diff --git a/docker/post-upgrade.j2.yaml b/docker/post-upgrade.j2.yaml new file mode 100644 index 00000000..4477f868 --- /dev/null +++ b/docker/post-upgrade.j2.yaml @@ -0,0 +1,4 @@ +# Note the include here is the same as post.j2.yaml but the data used at +# # the time of rendering is different if any roles disable upgrades +{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%} +{% include 'docker-steps.j2' %} diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml index 3473f4ca..fd956215 100644 --- a/docker/post.j2.yaml +++ b/docker/post.j2.yaml @@ -1,314 +1 @@ -# certain initialization steps (run in a container) will occur -# on the first role listed in the roles file -{% set primary_role_name = roles[0].name -%} - -heat_template_version: ocata - -description: > - Post-deploy configuration steps via puppet for all roles, - as defined in ../roles_data.yaml - -parameters: - servers: - type: json - description: Mapping of Role name e.g Controller to a list of servers - role_data: - type: json - description: Mapping of Role name e.g Controller to the per-role data - DeployIdentifier: - default: '' - type: string - description: > - Setting this to a unique value will re-run any deployment tasks which - perform configuration on a Heat stack-update. - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - # These utility tasks use docker-puppet.py to execute tasks via puppet - # We only execute these on the first node in the primary role - {{primary_role_name}}DockerPuppetTasks: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: - dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1])) - data: - docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]} - -# BEGIN primary_role_name docker-puppet-tasks (run only on a single node) -{% for step in range(1, 6) %} - - {{primary_role_name}}DockerPuppetJsonConfig{{step}}: - type: OS::Heat::StructuredConfig - properties: - group: json-file - config: - /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json: - {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']} - - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}: - type: OS::Heat::SoftwareDeployment - properties: - server: {get_param: [servers, {{primary_role_name}}, '0']} - config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}} - - {{primary_role_name}}DockerPuppetTasksConfig{{step}}: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: docker-puppet.py} - inputs: - - name: CONFIG - - name: NET_HOST - - name: NO_ARCHIVE - - {{primary_role_name}}DockerPuppetTasksDeployment{{step}}: - type: OS::Heat::SoftwareDeployment - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step{{step}} - - {{dep.name}}ContainersDeployment_Step{{step}} - {% endfor %} - - {{primary_role_name}}DockerPuppetJsonDeployment{{step}} - properties: - name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}} - server: {get_param: [servers, {{primary_role_name}}, '0']} - config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}} - input_values: - CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json - NET_HOST: 'true' - NO_ARCHIVE: 'true' - -{% endfor %} -# END primary_role_name docker-puppet-tasks - -{% for role in roles %} - # Post deployment steps for all roles - # A single config is re-applied with an incrementing step number - # {{role.name}} Role steps - {{role.name}}ArtifactsConfig: - type: ../puppet/deploy-artifacts.yaml - - {{role.name}}ArtifactsDeploy: - type: OS::Heat::StructuredDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ArtifactsConfig} - - {{role.name}}PreConfig: - type: OS::TripleO::Tasks::{{role.name}}PreConfig - properties: - servers: {get_param: [servers, {{role.name}}]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}CreateConfigDir: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: create-config-dir.sh} - - {{role.name}}CreateConfigDirDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}CreateConfigDir} - - # this creates a JSON config file for our docker-puppet.py script - {{role.name}}GenPuppetConfig: - type: OS::Heat::StructuredConfig - properties: - group: json-file - config: - /var/lib/docker-puppet/docker-puppet.json: - yaql: - # select only services that have a non-null config_image with - # a step_config as well - expression: - $.data.config_volume.zip($.data.puppet_tags, $.data.step_config, $.data.config_image).where($[3] != null and $[1] != null) - data: - config_volume: {get_param: [role_data, {{role.name}}, config_volume]} - step_config: {get_param: [role_data, {{role.name}}, step_config]} - puppet_tags: {get_param: [role_data, {{role.name}}, puppet_tags]} - config_image: {get_param: [role_data, {{role.name}}, config_image]} - - {{role.name}}GenPuppetDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}GenPuppetConfig} - - {{role.name}}GenerateConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: {get_file: docker-puppet.py} - - {{role.name}}GenerateConfigDeployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment] - properties: - name: {{role.name}}GenerateConfigDeployment - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}GenerateConfig} - - {{role.name}}PuppetStepConfig: - type: OS::Heat::Value - properties: - type: string - value: - yaql: - expression: - # select 'step_config' only from services that do not have a docker_image - $.data.service_names.zip($.data.step_config, $.data.docker_image).where($[2] = null).where($[1] != null).select($[1]).join("\n") - data: - service_names: {get_param: [role_data, {{role.name}}, service_names]} - step_config: {get_param: [role_data, {{role.name}}, step_config]} - docker_image: {get_param: [role_data, {{role.name}}, docker_image]} - - {{role.name}}DockerConfig: - type: OS::Heat::Value - properties: - type: json - value: - yaql: - expression: - # select 'docker_config' only from services that have a docker_image - $.data.service_names.zip($.data.docker_config, $.data.docker_image).where($[2] != null).select($[1]).reduce($1.mergeWith($2), {}) - data: - service_names: {get_param: [role_data, {{role.name}}, service_names]} - docker_config: {get_param: [role_data, {{role.name}}, docker_config]} - docker_image: {get_param: [role_data, {{role.name}}, docker_image]} - - {{role.name}}KollaJsonConfig: - type: OS::Heat::StructuredConfig - properties: - group: json-file - config: - {get_param: [role_data, {{role.name}}, kolla_config]} - - {{role.name}}KollaJsonDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - name: {{role.name}}KollaJsonDeployment - config: {get_resource: {{role.name}}KollaJsonConfig} - servers: {get_param: [servers, {{role.name}}]} - - # BEGIN BAREMETAL CONFIG STEPS - - {% if role.name == 'Controller' %} - ControllerPrePuppet: - type: OS::TripleO::Tasks::ControllerPrePuppet - properties: - servers: {get_param: [servers, Controller]} - input_values: - update_identifier: {get_param: DeployIdentifier} - {% endif %} - - {{role.name}}Config: - type: OS::TripleO::{{role.name}}Config - properties: - StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]} - - {% for step in range(1, 6) %} - - {{role.name}}Deployment_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - {% if step == 1 %} - depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] - {% else %} - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step{{step -1}} - - {{dep.name}}ContainersDeployment_Step{{step -1}} - {% endfor %} - - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}} - {% endif %} - properties: - name: {{role.name}}Deployment_Step{{step}} - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: {{step}} - update_identifier: {get_param: DeployIdentifier} - - {% endfor %} - # END BAREMETAL CONFIG STEPS - - # BEGIN CONTAINER CONFIG STEPS - {% for step in range(1, 6) %} - - {{role.name}}ContainersConfig_Step{{step}}: - type: OS::Heat::StructuredConfig - properties: - group: docker-cmd - config: - {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]} - - {{role.name}}ContainersDeployment_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - {% if step == 1 %} - depends_on: - - {{role.name}}PreConfig - - {{role.name}}KollaJsonDeployment - - {{role.name}}GenPuppetDeployment - - {{role.name}}GenerateConfigDeployment - {% else %} - depends_on: - {% for dep in roles %} - - {{dep.name}}ContainersDeployment_Step{{step -1}} - - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first - - {{dep.name}}Deployment_Step{{step -1}} - {% endfor %} - - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}} - {% endif %} - properties: - name: {{role.name}}ContainersDeployment_Step{{step}} - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}} - - {% endfor %} - # END CONTAINER CONFIG STEPS - - {{role.name}}PostConfig: - type: OS::TripleO::Tasks::{{role.name}}PostConfig - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step5 - - {{primary_role_name}}DockerPuppetTasksDeployment5 - {% endfor %} - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - # Note, this should come last, so use depends_on to ensure - # this is created after any other resources. - {{role.name}}ExtraConfigPost: - depends_on: - {% for dep in roles %} - - {{dep.name}}PostConfig - {% endfor %} - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, {{role.name}}]} - - {% if role.name == 'Controller' %} - ControllerPostPuppet: - depends_on: - - ControllerExtraConfigPost - type: OS::TripleO::Tasks::ControllerPostPuppet - properties: - servers: {get_param: [servers, Controller]} - input_values: - update_identifier: {get_param: DeployIdentifier} - {% endif %} - -{% endfor %} +{% include 'docker-steps.j2' %} diff --git a/docker/services/README.rst b/docker/services/README.rst index c054e8c0..881a2a37 100644 --- a/docker/services/README.rst +++ b/docker/services/README.rst @@ -19,8 +19,11 @@ Building Kolla Images TripleO currently relies on Kolla docker containers. Kolla supports container customization and we are making use of this feature within TripleO to inject -puppet (our configuration tool of choice) into the Kolla base images. To -build Kolla images for TripleO adjust your kolla config to build your +puppet (our configuration tool of choice) into the Kolla base images. The +undercloud nova-scheduler also requires openstack-tripleo-common to +provide custom filters. + +To build Kolla images for TripleO adjust your kolla config to build your centos base image with puppet using the example below: .. code-block:: @@ -28,6 +31,7 @@ centos base image with puppet using the example below: $ cat template-overrides.j2 {% extends parent_template %} {% set base_centos_binary_packages_append = ['puppet'] %} +{% set nova_scheduler_packages_append = ['openstack-tripleo-common'] %} kolla-build --base centos --template-override template-overrides.j2 diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml new file mode 100644 index 00000000..e83f4f19 --- /dev/null +++ b/docker/services/database/mongodb.yaml @@ -0,0 +1,105 @@ +heat_template_version: ocata + +description: > + MongoDB service deployment using puppet and docker + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMongodbImage: + description: image + default: 'centos-binary-mongodb:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + MongodbPuppetBase: + type: ../../../puppet/services/database/mongodb.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Containerized service Mongodb using composable services. + value: + service_name: {get_attr: [MongodbPuppetBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [MongodbPuppetBase, role_data, config_settings] + - mongodb::server::fork: false + step_config: &step_config + list_join: + - "\n" + - - "['Mongodb_database', 'Mongodb_user', 'Mongodb_replset'].each |String $val| { noop_resource($val) }" + - {get_attr: [MongodbPuppetBase, role_data, step_config]} + # BEGIN DOCKER SETTINGS # + docker_image: &mongodb_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ] + puppet_config: + config_volume: mongodb + puppet_tags: file # set this even though file is the default + step_config: *step_config + config_image: *mongodb_image + kolla_config: + /var/lib/kolla/config_files/mongodb.json: + command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run + config_files: + - dest: /etc/mongod.conf + source: /var/lib/kolla/config_files/src/etc/mongod.conf + owner: mongodb + perm: '0600' + - dest: /etc/mongos.conf + source: /var/lib/kolla/config_files/src/etc/mongos.conf + owner: mongodb + perm: '0600' + docker_config: + step_2: + mongodb: + image: *mongodb_image + net: host + privileged: false + volumes: &mongodb_volumes + - /var/lib/kolla/config_files/mongodb.json:/var/lib/kolla/config_files/config.json + - /var/lib/config-data/mongodb/:/var/lib/kolla/config_files/src:ro + - /etc/localtime:/etc/localtime:ro + - logs:/var/log/kolla + - mongodb:/var/lib/mongodb/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + docker_puppet_tasks: + # MySQL database initialization occurs only on single node + step_2: + config_volume: 'mongodb_init_tasks' + puppet_tags: 'mongodb_database,mongodb_user,mongodb_replset' + step_config: 'include ::tripleo::profile::base::database::mongodb' + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ] + volumes: + - "mongodb:/var/lib/mongodb/" + - "logs:/var/log/kolla:ro" + upgrade_tasks: + - name: Stop and disable mongodb service + tags: step2 + service: name=mongod state=stopped enabled=no diff --git a/docker/services/database/mysql.yaml b/docker/services/database/mysql.yaml new file mode 100644 index 00000000..c34ebe93 --- /dev/null +++ b/docker/services/database/mysql.yaml @@ -0,0 +1,137 @@ +heat_template_version: ocata + +description: > + MySQL service deployment using puppet + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMysqlImage: + description: image + default: 'centos-binary-mariadb:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + MysqlRootPassword: + type: string + hidden: true + default: '' + +resources: + + MysqlPuppetBase: + type: ../../../puppet/services/database/mysql.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Containerized service MySQL using composable services. + value: + service_name: {get_attr: [MysqlPuppetBase, role_data, service_name]} + config_settings: + map_merge: + - {get_attr: [MysqlPuppetBase, role_data, config_settings]} + # Set PID file to what kolla mariadb bootstrap script expects + - tripleo::profile::base::database::mysql::mysql_server_options: + mysqld: + pid-file: /var/lib/mysql/mariadb.pid + mysqld_safe: + pid-file: /var/lib/mysql/mariadb.pid + step_config: &step_config + list_join: + - "\n" + - - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }" + - {get_attr: [MysqlPuppetBase, role_data, step_config]} + # BEGIN DOCKER SETTINGS # + docker_image: &mysql_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ] + puppet_config: + config_volume: mysql + puppet_tags: file # set this even though file is the default + step_config: *step_config + config_image: *mysql_image + kolla_config: + /var/lib/kolla/config_files/mysql.json: + command: /usr/bin/mysqld_safe + config_files: + - dest: /etc/mysql/my.cnf + source: /var/lib/kolla/config_files/src/etc/my.cnf + owner: mysql + perm: '0644' + - dest: /etc/my.cnf.d/galera.cnf + source: /var/lib/kolla/config_files/src/etc/my.cnf.d/galera.cnf + owner: mysql + perm: '0644' + docker_config: + step_2: + mysql_bootstrap: + start_order: 0 + detach: false + image: *mysql_image + net: host + volumes: &mysql_volumes + - /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json + - /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro + - /etc/localtime:/etc/localtime:ro + - /etc/hosts:/etc/hosts:ro + - mariadb:/var/lib/mysql/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + - KOLLA_BOOTSTRAP=True + # NOTE(mandre) skip wsrep cluster status check + - KOLLA_KUBERNETES=True + - + list_join: + - '=' + - - 'DB_ROOT_PASSWORD' + - + yaql: + expression: $.data.passwords.where($ != '').first() + data: + passwords: + - {get_param: MysqlRootPassword} + - {get_param: [DefaultPasswords, mysql_root_password]} + mysql: + start_order: 1 + image: *mysql_image + restart: always + net: host + volumes: *mysql_volumes + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + docker_puppet_tasks: + # MySQL database initialization occurs only on single node + step_2: + config_volume: 'mysql_init_tasks' + puppet_tags: 'mysql_database,mysql_grant,mysql_user' + step_config: 'include ::tripleo::profile::base::database::mysql' + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ] + volumes: + - "mariadb:/var/lib/mysql/:ro" + - "/var/lib/config-data/mysql/root:/root:ro" #provides .my.cnf + upgrade_tasks: + - name: Stop and disable mysql service + tags: step2 + service: name=mariadb state=stopped enabled=no diff --git a/docker/services/glance-api.yaml b/docker/services/glance-api.yaml new file mode 100644 index 00000000..73d76ad5 --- /dev/null +++ b/docker/services/glance-api.yaml @@ -0,0 +1,103 @@ +heat_template_version: ocata + +description: > + OpenStack Glance service configured with Puppet + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerGlanceApiImage: + description: image + default: 'centos-binary-glance-api:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + GlanceApiPuppetBase: + type: ../../puppet/services/glance-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Glance API role. + value: + service_name: {get_attr: [GlanceApiPuppetBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [GlanceApiPuppetBase, role_data, config_settings] + - glance::api::sync_db: false + step_config: &step_config + get_attr: [GlanceApiPuppetBase, role_data, step_config] + service_config_settings: {get_attr: [GlanceApiPuppetBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS # + docker_image: &glance_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ] + puppet_config: + config_volume: glance_api + puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config + step_config: *step_config + config_image: *glance_image + kolla_config: + /var/lib/kolla/config_files/glance-api.json: + command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf + config_files: + - dest: /etc/glance/glance-api.conf + owner: glance + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/glance/glance-api.conf + - dest: /etc/glance/glance-swift.conf + owner: glance + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/glance/glance-swift.conf + docker_config: + step_3: + glance_api_db_sync: + image: *glance_image + net: host + privileged: false + detach: false + volumes: &glance_volumes + - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json + - /etc/localtime:/etc/localtime:ro + - /lib/modules:/lib/modules:ro + - /var/lib/config-data/glance_api/:/var/lib/kolla/config_files/src:ro + - /run:/run + - /dev:/dev + - /etc/hosts:/etc/hosts:ro + environment: + - KOLLA_BOOTSTRAP=True + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + step_4: + glance_api: + image: *glance_image + net: host + privileged: false + restart: always + volumes: *glance_volumes + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable glance_api service + tags: step2 + service: name=openstack-glance-api state=stopped enabled=no diff --git a/docker/services/heat-api-cfn.yaml b/docker/services/heat-api-cfn.yaml new file mode 100644 index 00000000..2f54c0f1 --- /dev/null +++ b/docker/services/heat-api-cfn.yaml @@ -0,0 +1,97 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Heat API CFN service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerHeatApiCfnImage: + description: image + default: 'centos-binary-heat-api-cfn:latest' + type: string + # we configure all heat services in the same heat engine container + DockerHeatEngineImage: + description: image + default: 'centos-binary-heat-engine:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + HeatBase: + type: ../../puppet/services/heat-api-cfn.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Heat API CFN role. + value: + service_name: {get_attr: [HeatBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [HeatBase, role_data, step_config] + service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &heat_api_cfn_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ] + puppet_config: + config_volume: heat + puppet_tags: heat_config,file,concat,file_line + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] + kolla_config: + /var/lib/kolla/config_files/heat_api_cfn.json: + command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + config_files: + - dest: /etc/heat/heat.conf + owner: heat + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/heat/heat.conf + docker_config: + step_4: + heat_api_cfn: + image: *heat_api_cfn_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /dev:/dev + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable heat_api_cfn service + tags: step2 + service: name=openstack-heat-api-cfn state=stopped enabled=no diff --git a/docker/services/heat-api.yaml b/docker/services/heat-api.yaml new file mode 100644 index 00000000..a212d254 --- /dev/null +++ b/docker/services/heat-api.yaml @@ -0,0 +1,97 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Heat API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerHeatApiImage: + description: image + default: 'centos-binary-heat-api:latest' + type: string + # we configure all heat services in the same heat engine container + DockerHeatEngineImage: + description: image + default: 'centos-binary-heat-engine:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + HeatBase: + type: ../../puppet/services/heat-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Heat API role. + value: + service_name: {get_attr: [HeatBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [HeatBase, role_data, step_config] + service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &heat_api_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ] + puppet_config: + config_volume: heat + puppet_tags: heat_config,file,concat,file_line + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] + kolla_config: + /var/lib/kolla/config_files/heat_api.json: + command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + config_files: + - dest: /etc/heat/heat.conf + owner: heat + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/heat/heat.conf + docker_config: + step_4: + heat_api: + image: *heat_api_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /dev:/dev + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable heat_api service + tags: step2 + service: name=openstack-heat-api state=stopped enabled=no diff --git a/docker/services/heat-engine.yaml b/docker/services/heat-engine.yaml new file mode 100644 index 00000000..c60a3840 --- /dev/null +++ b/docker/services/heat-engine.yaml @@ -0,0 +1,99 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Heat Engine service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerHeatEngineImage: + description: image + default: 'centos-binary-heat-engine:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + HeatBase: + type: ../../puppet/services/heat-engine.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Heat Engine role. + value: + service_name: {get_attr: [HeatBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [HeatBase, role_data, step_config] + service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &heat_engine_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] + puppet_config: + config_volume: heat + puppet_tags: heat_config,file,concat,file_line + step_config: *step_config + config_image: *heat_engine_image + kolla_config: + /var/lib/kolla/config_files/heat_engine.json: + command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + config_files: + - dest: /etc/heat/heat.conf + owner: heat + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/heat/heat.conf + docker_config: + step_3: + heat_engine_db_sync: + image: *heat_engine_image + net: host + privileged: false + detach: false + volumes: + - /var/lib/config-data/heat/etc/heat:/etc/heat:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + command: ['heat-manage', 'db_sync'] + step_4: + heat_engine: + image: *heat_engine_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable heat_engine service + tags: step2 + service: name=openstack-heat-engine state=stopped enabled=no diff --git a/docker/services/ironic-api.yaml b/docker/services/ironic-api.yaml new file mode 100644 index 00000000..ca42c9ec --- /dev/null +++ b/docker/services/ironic-api.yaml @@ -0,0 +1,106 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Ironic API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerIronicApiImage: + description: image + default: 'centos-binary-ironic-api:latest' + type: string + DockerIronicConfigImage: + description: image + default: 'centos-binary-ironic-pxe:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + IronicApiBase: + type: ../../puppet/services/ironic-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Ironic API role. + value: + service_name: {get_attr: [IronicApiBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [IronicApiBase, role_data, config_settings] + step_config: &step_config + get_attr: [IronicApiBase, role_data, step_config] + service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &ironic_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ] + puppet_config: + config_volume: ironic + puppet_tags: ironic_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/ironic_api.json: + command: /usr/bin/ironic-api + config_files: + - dest: /etc/ironic/ironic.conf + owner: ironic + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf + docker_config: + step_3: + ironic_db_sync: + image: *ironic_image + net: host + privileged: false + detach: false + volumes: + - /var/lib/config-data/ironic/etc/:/etc/:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + command: ['ironic-dbsync', '--config-file', '/etc/ironic/ironic.conf'] + step_4: + ironic_api: + start_order: 10 + image: *ironic_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable ironic_api service + tags: step2 + service: name=openstack-ironic-api state=stopped enabled=no diff --git a/docker/services/ironic-conductor.yaml b/docker/services/ironic-conductor.yaml new file mode 100644 index 00000000..ff470008 --- /dev/null +++ b/docker/services/ironic-conductor.yaml @@ -0,0 +1,118 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Ironic Conductor service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerIronicConductorImage: + description: image + default: 'centos-binary-ironic-conductor:latest' + type: string + DockerIronicConfigImage: + description: image + default: 'centos-binary-ironic-pxe:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + IronicConductorBase: + type: ../../puppet/services/ironic-conductor.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Ironic Conductor role. + value: + service_name: {get_attr: [IronicConductorBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [IronicConductorBase, role_data, config_settings] + # to avoid hard linking errors we store these on the same + # volume/device as the ironic master_path + - ironic::drivers::pxe::tftp_root: /var/lib/ironic/tftpboot + - ironic::drivers::pxe::tftp_master_path: /var/lib/ironic/tftpboot/master_images + - ironic::pxe::tftp_root: /var/lib/ironic/tftpboot + - ironic::pxe::http_root: /var/lib/ironic/httpboot + - ironic::conductor::http_root: /var/lib/ironic/httpboot + step_config: &step_config + get_attr: [IronicConductorBase, role_data, step_config] + service_config_settings: {get_attr: [IronicConductorBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &ironic_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ] + puppet_config: + config_volume: ironic + puppet_tags: ironic_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/ironic_conductor.json: + command: /usr/bin/ironic-conductor + config_files: + - dest: /etc/ironic/ironic.conf + owner: ironic + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf + permissions: + - path: /var/lib/ironic/httpboot + owner: ironic:ironic + recurse: true + - path: /var/lib/ironic/tftpboot + owner: ironic:ironic + recurse: true + docker_config: + step_4: + ironic-init-dirs: + image: *ironic_image + user: root + command: ['/bin/bash', '-c', 'mkdir /var/lib/ironic/httpboot && mkdir /var/lib/ironic/tftpboot'] + volumes: + - ironic:/var/lib/ironic + ironic_conductor: + start_order: 80 + image: *ironic_image + net: host + privileged: true + restart: always + volumes: + - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /lib/modules:/lib/modules:ro + - /sys:/sys + - /dev:/dev + - /run:/run #shared? + - ironic:/var/lib/ironic + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable ironic_conductor service + tags: step2 + service: name=openstack-ironic-conductor state=stopped enabled=no diff --git a/docker/services/ironic-pxe.yaml b/docker/services/ironic-pxe.yaml new file mode 100644 index 00000000..25505192 --- /dev/null +++ b/docker/services/ironic-pxe.yaml @@ -0,0 +1,133 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Ironic PXE service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerIronicPxeImage: + description: image + default: 'centos-binary-ironic-pxe:latest' + type: string + DockerIronicConfigImage: + description: image + default: 'centos-binary-ironic-pxe:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +outputs: + role_data: + description: Role data for the Ironic PXE role. + value: + service_name: ironic_pxe + config_settings: {} + step_config: &step_config '' + service_config_settings: {} + # BEGIN DOCKER SETTINGS + docker_image: &ironic_pxe_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ] + puppet_config: + config_volume: ironic + puppet_tags: ironic_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/ironic_pxe_http.json: + command: /usr/sbin/httpd -DFOREGROUND + config_files: + - dest: /etc/ironic/ironic.conf + owner: ironic + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf + - dest: /etc/httpd/conf.d/10-ipxe_vhost.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-ipxe_vhost.conf + - dest: /etc/httpd/conf/httpd.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf + - dest: /etc/httpd/conf/ports.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf + /var/lib/kolla/config_files/ironic_pxe_tftp.json: + command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot + config_files: + - dest: /etc/ironic/ironic.conf + owner: ironic + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf + - dest: /var/lib/ironic/tftpboot/chain.c32 + owner: ironic + perm: '0744' + source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/chain.c32 + - dest: /var/lib/ironic/tftpboot/pxelinux.0 + owner: ironic + perm: '0744' + source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/pxelinux.0 + - dest: /var/lib/ironic/tftpboot/ipxe.efi + owner: ironic + perm: '0744' + source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/ipxe.efi + - dest: /var/lib/ironic/tftpboot/undionly.kpxe + owner: ironic + perm: '0744' + source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/undionly.kpxe + - dest: /var/lib/ironic/tftpboot/map-file + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/map-file + docker_config: + step_4: + ironic_pxe_tftp: + start_order: 90 + image: *ironic_pxe_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /dev/log:/dev/log + - ironic:/var/lib/ironic/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + ironic_pxe_http: + start_order: 91 + image: *ironic_pxe_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/ironic/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - ironic:/var/lib/ironic/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml new file mode 100644 index 00000000..358277a5 --- /dev/null +++ b/docker/services/keystone.yaml @@ -0,0 +1,160 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Keystone service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerKeystoneImage: + description: image + default: 'centos-binary-keystone:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + AdminPassword: + description: The password for the keystone admin account, used for monitoring, querying neutron etc. + type: string + hidden: true + +resources: + + KeystoneBase: + type: ../../puppet/services/keystone.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Keystone API role. + value: + service_name: {get_attr: [KeystoneBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [KeystoneBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + list_join: + - "\n" + - - "['Keystone_user', 'Keystone_endpoint', 'Keystone_domain', 'Keystone_tenant', 'Keystone_user_role', 'Keystone_role', 'Keystone_service'].each |String $val| { noop_resource($val) }" + - {get_attr: [KeystoneBase, role_data, step_config]} + service_config_settings: {get_attr: [KeystoneBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &keystone_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ] + puppet_config: + config_volume: keystone + puppet_tags: keystone_config + step_config: *step_config + config_image: *keystone_image + kolla_config: + /var/lib/kolla/config_files/keystone.json: + command: /usr/sbin/httpd -DFOREGROUND + config_files: + - dest: /etc/keystone/keystone.conf + owner: keystone + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/keystone/keystone.conf + - dest: /etc/keystone/credential-keys/0 + owner: keystone + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/0 + - dest: /etc/keystone/credential-keys/1 + owner: keystone + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/1 + - dest: /etc/httpd/conf.d/10-keystone_wsgi_admin.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_admin.conf + - dest: /etc/httpd/conf.d/10-keystone_wsgi_main.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_main.conf + - dest: /etc/httpd/conf/httpd.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf + - dest: /etc/httpd/conf/ports.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf + - dest: /var/www/cgi-bin/keystone/keystone-admin + owner: keystone + perm: '0644' + source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-admin + - dest: /var/www/cgi-bin/keystone/keystone-public + owner: keystone + perm: '0644' + source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-public + docker_config: + step_3: + keystone-init-log: + start_order: 0 + image: *keystone_image + user: root + command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/keystone && chown keystone:keystone /var/log/keystone'] + volumes: + - logs:/var/log + keystone_db_sync: + start_order: 1 + image: *keystone_image + net: host + privileged: false + detach: false + volumes: &keystone_volumes + - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/keystone/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/keystone/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - logs:/var/log + environment: + - KOLLA_BOOTSTRAP=True + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + keystone: + start_order: 1 + image: *keystone_image + net: host + privileged: false + restart: always + volumes: *keystone_volumes + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + keystone_bootstrap: + start_order: 2 + action: exec + command: + [ 'keystone', 'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ] + docker_puppet_tasks: + # Keystone endpoint creation occurs only on single node + step_3: + config_volume: 'keystone_init_tasks' + puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain' + step_config: 'include ::tripleo::profile::base::keystone' + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ] + upgrade_tasks: + - name: Stop and disable keystone service (running under httpd) + tags: step2 + service: name=httpd state=stopped enabled=no diff --git a/docker/services/memcached.yaml b/docker/services/memcached.yaml new file mode 100644 index 00000000..9467567f --- /dev/null +++ b/docker/services/memcached.yaml @@ -0,0 +1,76 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Memcached services + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMemcachedImage: + description: image + default: 'centos-binary-memcached:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + MemcachedBase: + type: ../../puppet/services/memcached.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Memcached API role. + value: + service_name: {get_attr: [MemcachedBase, role_data, service_name]} + config_settings: {get_attr: [MemcachedBase, role_data, config_settings]} + step_config: &step_config + get_attr: [MemcachedBase, role_data, step_config] + service_config_settings: {get_attr: [MemcachedBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &memcached_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ] + puppet_config: + config_volume: 'memcached' + puppet_tags: 'file' + step_config: *step_config + config_image: *memcached_image + kolla_config: {} + docker_config: + step_1: + memcached: + image: *memcached_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS'] + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable memcached service + tags: step2 + service: name=memcached state=stopped enabled=no diff --git a/docker/services/mistral-api.yaml b/docker/services/mistral-api.yaml new file mode 100644 index 00000000..7680bc62 --- /dev/null +++ b/docker/services/mistral-api.yaml @@ -0,0 +1,122 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Mistral API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMistralApiImage: + description: image + default: 'centos-binary-mistral-api:latest' + type: string + DockerMistralConfigImage: + description: image + default: 'centos-binary-mistral-api:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + MistralApiBase: + type: ../../puppet/services/mistral-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Mistral API role. + value: + service_name: {get_attr: [MistralApiBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [MistralApiBase, role_data, config_settings] + step_config: &step_config + get_attr: [MistralApiBase, role_data, step_config] + service_config_settings: {get_attr: [MistralApiBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &mistral_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ] + puppet_config: + config_volume: mistral + puppet_tags: mistral_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/mistral_api.json: + command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api + config_files: + - dest: /etc/mistral/mistral.conf + owner: mistral + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf + docker_config: + step_3: + mistral_db_sync: + start_order: 1 + image: *mistral_image + net: host + privileged: false + detach: false + volumes: + - /var/lib/config-data/mistral/etc/:/etc/:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head'] + mistral_db_populate: + start_order: 2 + image: *mistral_image + net: host + privileged: false + detach: false + volumes: + - /var/lib/config-data/mistral/etc/:/etc/:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + # NOTE: dprince this requires that we install openstack-tripleo-common into + # the Mistral API image so that we get tripleo* actions + command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate'] + step_4: + mistral_api: + start_order: 15 + image: *mistral_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable mistral_api service + tags: step2 + service: name=openstack-mistral-api state=stopped enabled=no diff --git a/docker/services/mistral-engine.yaml b/docker/services/mistral-engine.yaml new file mode 100644 index 00000000..d61ab1c2 --- /dev/null +++ b/docker/services/mistral-engine.yaml @@ -0,0 +1,95 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Mistral Engine service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMistralEngineImage: + description: image + default: 'centos-binary-mistral-engine:latest' + type: string + DockerMistralConfigImage: + description: image + default: 'centos-binary-mistral-api:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + MistralBase: + type: ../../puppet/services/mistral-engine.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Mistral Engine role. + value: + service_name: {get_attr: [MistralBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [MistralBase, role_data, config_settings] + step_config: &step_config + get_attr: [MistralBase, role_data, step_config] + service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &mistral_engine_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ] + puppet_config: + config_volume: mistral + puppet_tags: mistral_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/mistral_engine.json: + command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine + config_files: + - dest: /etc/mistral/mistral.conf + owner: mistral + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf + docker_config: + step_4: + mistral_engine: + image: *mistral_engine_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable mistral_engine service + tags: step2 + service: name=openstack-mistral-engine state=stopped enabled=no + diff --git a/docker/services/mistral-executor.yaml b/docker/services/mistral-executor.yaml new file mode 100644 index 00000000..42286426 --- /dev/null +++ b/docker/services/mistral-executor.yaml @@ -0,0 +1,98 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Mistral Executor service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMistralExecutorImage: + description: image + default: 'centos-binary-mistral-executor:latest' + type: string + DockerMistralConfigImage: + description: image + default: 'centos-binary-mistral-api:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + MistralBase: + type: ../../puppet/services/mistral-executor.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Mistral Executor role. + value: + service_name: {get_attr: [MistralBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [MistralBase, role_data, config_settings] + step_config: &step_config + get_attr: [MistralBase, role_data, step_config] + service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &mistral_executor_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ] + puppet_config: + config_volume: mistral + puppet_tags: mistral_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/mistral_executor.json: + command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor + config_files: + - dest: /etc/mistral/mistral.conf + owner: mistral + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf + docker_config: + step_4: + mistral_executor: + image: *mistral_executor_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + # FIXME: this is required in order for Nova cells + # initialization workflows on the Undercloud. Need to + # exclude this on the overcloud for security reasons. + - /var/lib/config-data/nova/etc/nova:/etc/nova:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable mistral_executor service + tags: step2 + service: name=openstack-mistral-executor state=stopped enabled=no diff --git a/docker/services/neutron-api.yaml b/docker/services/neutron-api.yaml new file mode 100644 index 00000000..71389046 --- /dev/null +++ b/docker/services/neutron-api.yaml @@ -0,0 +1,112 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Neutron API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNeutronApiImage: + description: image + default: 'centos-binary-neutron-server:latest' + type: string + # we configure all neutron services in the same neutron + DockerNeutronConfigImage: + description: image + default: 'centos-binary-neutron-openvswitch-agent:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + NeutronBase: + type: ../../puppet/services/neutron-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Neutron API role. + value: + service_name: {get_attr: [NeutronBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + step_config: &step_config + get_attr: [NeutronBase, role_data, step_config] + service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &neutron_api_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ] + puppet_config: + config_volume: neutron + puppet_tags: neutron_config,neutron_api_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/neutron_api.json: + command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini + config_files: + - dest: /etc/neutron/neutron.conf + owner: neutron + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf + - dest: /etc/neutron/plugin.ini + owner: neutron + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini + docker_config: + step_3: + neutron_db_sync: + image: *neutron_api_image + net: host + privileged: false + detach: false + # FIXME: we should make config file permissions right + # and run as neutron user + user: root + volumes: + - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro + - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + command: ['neutron-db-manage', 'upgrade', 'heads'] + step_4: + neutron_api: + image: *neutron_api_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable neutron_api service + tags: step2 + service: name=neutron-server state=stopped enabled=no diff --git a/docker/services/neutron-dhcp.yaml b/docker/services/neutron-dhcp.yaml new file mode 100644 index 00000000..ccde63f2 --- /dev/null +++ b/docker/services/neutron-dhcp.yaml @@ -0,0 +1,100 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Neutron DHCP service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNeutronApiImage: + description: image + default: 'centos-binary-neutron-dhcp-agent:latest' + type: string + # we configure all neutron services in the same neutron + DockerNeutronConfigImage: + description: image + default: 'centos-binary-neutron-openvswitch-agent:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + NeutronBase: + type: ../../puppet/services/neutron-dhcp.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Neutron DHCP role. + value: + service_name: {get_attr: [NeutronBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + step_config: &step_config + get_attr: [NeutronBase, role_data, step_config] + service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &neutron_dhcp_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ] + puppet_config: + config_volume: neutron + puppet_tags: neutron_config,neutron_dhcp_agent_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/neutron_dhcp.json: + command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log + config_files: + - dest: /etc/neutron/neutron.conf + owner: neutron + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf + - dest: /etc/neutron/dhcp_agent.ini + owner: neutron + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/neutron/dhcp_agent.ini + docker_config: + step_4: + neutron_dhcp: + image: *neutron_dhcp_image + net: host + pid: host + privileged: true + restart: always + volumes: + - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro + - /etc/localtime:/etc/localtime:ro + - /etc/hosts:/etc/hosts:ro + - /lib/modules:/lib/modules:ro + - /run/:/run + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable neutron_dhcp service + tags: step2 + service: name=neutron-dhcp-agent state=stopped enabled=no diff --git a/docker/services/neutron-l3.yaml b/docker/services/neutron-l3.yaml new file mode 100644 index 00000000..d9a78288 --- /dev/null +++ b/docker/services/neutron-l3.yaml @@ -0,0 +1,92 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Neutron L3 agent + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNeutronL3AgentImage: + description: image + default: 'centos-binary-neutron-l3-agent:latest' + type: string + # we configure all neutron services in the same neutron + DockerNeutronConfigImage: + description: image + default: 'centos-binary-neutron-openvswitch-agent:latest' + type: string + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronL3Base: + type: ../../puppet/services/neutron-l3.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for Neutron L3 agent + value: + service_name: {get_attr: [NeutronL3Base, role_data, service_name]} + config_settings: {get_attr: [NeutronL3Base, role_data, config_settings]} + step_config: &step_config + get_attr: [NeutronL3Base, role_data, step_config] + docker_image: &neutron_l3_agent_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ] + + puppet_config: + puppet_tags: neutron_config,neutron_l3_agent_config + config_volume: neutron + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/neutron-l3-agent.json: + command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini + config_files: + - dest: /etc/neutron/neutron.conf + owner: neutron + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf + - dest: /etc/neutron/l3_agent.ini + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/neutron/l3_agent.ini + docker_config: + step_4: + neutronl3agent: + image: *neutron_l3_agent_image + net: host + pid: host + privileged: true + restart: always + volumes: + - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro + - /etc/localtime:/etc/localtime:ro + - /lib/modules:/lib/modules:ro + - /run:/run + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS diff --git a/docker/services/neutron-ovs-agent.yaml b/docker/services/neutron-ovs-agent.yaml index ab99da5e..6dcf91d9 100644 --- a/docker/services/neutron-ovs-agent.yaml +++ b/docker/services/neutron-ovs-agent.yaml @@ -42,14 +42,17 @@ outputs: value: service_name: {get_attr: [NeutronOvsAgentBase, role_data, service_name]} config_settings: {get_attr: [NeutronOvsAgentBase, role_data, config_settings]} - step_config: {get_attr: [NeutronOvsAgentBase, role_data, step_config]} + step_config: &step_config + get_attr: [NeutronOvsAgentBase, role_data, step_config] docker_image: &neutron_ovs_agent_image list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ] - puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2 - config_volume: neutron - config_image: *neutron_ovs_agent_image + puppet_config: + config_volume: neutron + puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2 + step_config: *step_config + config_image: *neutron_ovs_agent_image kolla_config: /var/lib/kolla/config_files/neutron-openvswitch-agent.json: command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini @@ -82,3 +85,7 @@ outputs: - /run:/run environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable neutron_ovs_agent service + tags: step2 + service: name=neutron-openvswitch-agent state=stopped enabled=no diff --git a/docker/services/neutron-plugin-ml2.yaml b/docker/services/neutron-plugin-ml2.yaml new file mode 100644 index 00000000..5d1a348a --- /dev/null +++ b/docker/services/neutron-plugin-ml2.yaml @@ -0,0 +1,61 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Neutron ML2 Plugin configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNeutronConfigImage: + description: image + default: 'centos-binary-neutron-openvswitch-agent:latest' + type: string + DefaultPasswords: + default: {} + type: json + +resources: + + NeutronBase: + type: ../../puppet/services/neutron-plugin-ml2.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Neutron ML2 Plugin role. + value: + service_name: {get_attr: [NeutronBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + step_config: &step_config + get_attr: [NeutronBase, role_data, step_config] + service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &docker_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + puppet_config: + config_volume: 'neutron' + puppet_tags: '' + step_config: *step_config + config_image: *docker_image + kolla_config: {} + docker_config: {} diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml new file mode 100644 index 00000000..8a892325 --- /dev/null +++ b/docker/services/nova-api.yaml @@ -0,0 +1,151 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNovaApiImage: + description: image + default: 'centos-binary-nova-api:latest' + type: string + DockerNovaBaseImage: + description: image + default: 'centos-binary-nova-base:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + NovaApiBase: + type: ../../puppet/services/nova-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Nova API role. + value: + service_name: {get_attr: [NovaApiBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NovaApiBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [NovaApiBase, role_data, step_config] + service_config_settings: {get_attr: [NovaApiBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &nova_api_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ] + puppet_config: + config_volume: nova + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + kolla_config: + /var/lib/kolla/config_files/nova_api.json: + command: /usr/bin/nova-api + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + docker_config: + step_3: + nova_api_db_sync: + start_order: 1 + image: *nova_api_image + net: host + detach: false + volumes: &nova_api_volumes + - /var/lib/config-data/nova/etc/:/etc/:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + command: ['/usr/bin/nova-manage', 'api_db', 'sync'] + # FIXME: we probably want to wait on the 'cell_v2 update' in order for this + # to be capable of upgrading a baremetal setup. This is to ensure the name + # of the cell is 'default' + nova_api_map_cell0: + start_order: 2 + image: *nova_api_image + net: host + detach: false + volumes: *nova_api_volumes + command: + - '/usr/bin/nova-manage' + - 'cell_v2' + - 'map_cell0' + nova_api_create_default_cell: + start_order: 3 + image: *nova_api_image + net: host + detach: false + volumes: *nova_api_volumes + # NOTE: allowing the exit code 2 is a dirty way of making + # this idempotent (if the resource already exists a conflict + # is raised) + exit_codes: [0,2] + command: + - '/usr/bin/nova-manage' + - 'cell_v2' + - 'create_cell' + - '--name="default"' + nova_db_sync: + start_order: 4 + image: *nova_api_image + net: host + detach: false + volumes: *nova_api_volumes + command: ['/usr/bin/nova-manage', 'db', 'sync'] + step_4: + nova_api: + start_order: 2 + image: *nova_api_image + net: host + user: nova + privileged: true + restart: always + volumes: + - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + nova_api_discover_hosts: + start_order: 3 + image: *nova_api_image + net: host + detach: false + volumes: *nova_api_volumes + command: + - '/usr/bin/nova-manage' + - 'cell_v2' + - 'discover_hosts' + upgrade_tasks: + - name: Stop and disable nova_api service + tags: step2 + service: name=openstack-nova-api state=stopped enabled=no diff --git a/docker/services/nova-compute.yaml b/docker/services/nova-compute.yaml index 8eebc397..9f4e353a 100644 --- a/docker/services/nova-compute.yaml +++ b/docker/services/nova-compute.yaml @@ -43,14 +43,17 @@ outputs: value: service_name: {get_attr: [NovaComputeBase, role_data, service_name]} config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]} - step_config: {get_attr: [NovaComputeBase, role_data, step_config]} - puppet_tags: nova_config,nova_paste_api_ini + step_config: &step_config + get_attr: [NovaComputeBase, role_data, step_config] docker_image: &nova_compute_image list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] - config_volume: nova_libvirt - config_image: *nova_compute_image + puppet_config: + config_volume: nova_libvirt + puppet_tags: nova_config,nova_paste_api_ini + step_config: *step_config + config_image: *nova_compute_image kolla_config: /var/lib/kolla/config_files/nova-compute.json: command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf @@ -64,6 +67,7 @@ outputs: perm: '0600' source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf docker_config: + # FIXME: run discover hosts here step_4: novacompute: image: *nova_compute_image diff --git a/docker/services/nova-conductor.yaml b/docker/services/nova-conductor.yaml new file mode 100644 index 00000000..8bc81e32 --- /dev/null +++ b/docker/services/nova-conductor.yaml @@ -0,0 +1,92 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova Conductor service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNovaConductorImage: + description: image + default: 'centos-binary-nova-conductor:latest' + type: string + DockerNovaBaseImage: + description: image + default: 'centos-binary-nova-base:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + NovaConductorBase: + type: ../../puppet/services/nova-conductor.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Nova Conductor service. + value: + service_name: {get_attr: [NovaConductorBase, role_data, service_name]} + config_settings: {get_attr: [NovaConductorBase, role_data, config_settings]} + step_config: &step_config + get_attr: [NovaConductorBase, role_data, step_config] + service_config_settings: {get_attr: [NovaConductorBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &nova_conductor_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ] + puppet_config: + config_volume: nova + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + kolla_config: + /var/lib/kolla/config_files/nova_conductor.json: + command: /usr/bin/nova-conductor + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + docker_config: + step_4: + nova_conductor: + image: *nova_conductor_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable nova_conductor service + tags: step2 + service: name=openstack-nova-conductor state=stopped enabled=no diff --git a/docker/services/nova-ironic.yaml b/docker/services/nova-ironic.yaml new file mode 100644 index 00000000..5b46010f --- /dev/null +++ b/docker/services/nova-ironic.yaml @@ -0,0 +1,91 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova Ironic Compute service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNovaComputeImage: + description: image + default: 'centos-binary-nova-compute-ironic:latest' + type: string + DockerNovaBaseImage: + description: image + default: 'centos-binary-nova-base:latest' + type: string + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + + NovaIronicBase: + type: ../../puppet/services/nova-ironic.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Nova Compute service. + value: + service_name: {get_attr: [NovaIronicBase, role_data, service_name]} + config_settings: {get_attr: [NovaIronicBase, role_data, config_settings]} + step_config: &step_config + get_attr: [NovaIronicBase, role_data, step_config] + docker_image: &nova_ironic_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] + puppet_config: + config_volume: nova + puppet_tags: nova_config,nova_paste_api_ini + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + kolla_config: + /var/lib/kolla/config_files/nova_ironic.json: + command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + - dest: /etc/nova/rootwrap.conf + owner: nova + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf + docker_config: + step_5: + novacompute: + image: *nova_ironic_image + net: host + privileged: true + user: root + restart: always + volumes: + - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova:/var/lib/kolla/config_files/src:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - /dev:/dev + - /etc/iscsi:/etc/iscsi + - nova_compute:/var/lib/nova/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml index d6e7dc76..ed54f3d9 100644 --- a/docker/services/nova-libvirt.yaml +++ b/docker/services/nova-libvirt.yaml @@ -48,17 +48,20 @@ outputs: value: service_name: {get_attr: [NovaLibvirtBase, role_data, service_name]} config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]} - step_config: {get_attr: [NovaLibvirtBase, role_data, step_config]} + step_config: &step_config + get_attr: [NovaLibvirtBase, role_data, step_config] docker_image: &libvirt_image list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ] - puppet_tags: nova_config - config_volume: nova_libvirt - config_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] + puppet_config: + config_volume: nova_libvirt + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] kolla_config: /var/lib/kolla/config_files/nova-libvirt.json: command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf diff --git a/docker/services/nova-metadata.yaml b/docker/services/nova-metadata.yaml new file mode 100644 index 00000000..90c4c1c9 --- /dev/null +++ b/docker/services/nova-metadata.yaml @@ -0,0 +1,51 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova Metadata service + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + NovaMetadataBase: + type: ../../puppet/services/nova-metadata.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Nova Metadata service. + value: + service_name: {get_attr: [NovaMetadataBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NovaMetadataBase, role_data, config_settings] + step_config: &step_config + get_attr: [NovaMetadataBase, role_data, step_config] + service_config_settings: {get_attr: [NovaMetadataBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: '' + puppet_config: + config_volume: '' + puppet_tags: '' + step_config: *step_config + config_image: '' + kolla_config: {} + docker_config: {} diff --git a/docker/services/nova-placement.yaml b/docker/services/nova-placement.yaml new file mode 100644 index 00000000..8da48d37 --- /dev/null +++ b/docker/services/nova-placement.yaml @@ -0,0 +1,114 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova Placement API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNovaPlacementImage: + description: image + default: 'centos-binary-nova-placement-api' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + NovaPlacementBase: + type: ../../puppet/services/nova-placement.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Nova Placement API role. + value: + service_name: {get_attr: [NovaPlacementBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NovaPlacementBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [NovaPlacementBase, role_data, step_config] + service_config_settings: {get_attr: [NovaPlacementBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &nova_placement_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ] + puppet_config: + config_volume: nova_placement + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ] + kolla_config: + /var/lib/kolla/config_files/nova_placement.json: + command: /usr/sbin/httpd -DFOREGROUND + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + - dest: /etc/httpd/conf.d/10-placement_wsgi.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-placement_wsgi.conf + # puppet generates a stubbed out version of the stock one so we + # copy it in to overwrite the existing one + - dest: /etc/httpd/conf.d/00-nova-placement-api.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/00-nova-placement-api.conf + - dest: /etc/httpd/conf/httpd.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf + - dest: /etc/httpd/conf/ports.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf + - dest: /var/www/cgi-bin/nova/nova-placement-api + owner: nova + perm: '0644' + source: /var/lib/kolla/config_files/src/var/www/cgi-bin/nova/nova-placement-api + docker_config: + # start this early so it is up before computes start reporting + step_3: + nova_placement: + start_order: 1 + image: *nova_placement_image + net: host + user: root + restart: always + volumes: + - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova_placement/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/nova_placement/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable nova_placement service (running under httpd) + tags: step2 + service: name=httpd state=stopped enabled=no diff --git a/docker/services/nova-scheduler.yaml b/docker/services/nova-scheduler.yaml new file mode 100644 index 00000000..c24d5b26 --- /dev/null +++ b/docker/services/nova-scheduler.yaml @@ -0,0 +1,91 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova Scheduler service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNovaSchedulerImage: + description: image + default: 'centos-binary-nova-scheduler:latest' + type: string + DockerNovaBaseImage: + description: image + default: 'centos-binary-nova-base:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + NovaSchedulerBase: + type: ../../puppet/services/nova-scheduler.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Nova Scheduler service. + value: + service_name: {get_attr: [NovaSchedulerBase, role_data, service_name]} + config_settings: {get_attr: [NovaSchedulerBase, role_data, config_settings]} + step_config: &step_config + get_attr: [NovaSchedulerBase, role_data, step_config] + service_config_settings: {get_attr: [NovaSchedulerBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &nova_scheduler_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ] + puppet_config: + config_volume: nova + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + kolla_config: + /var/lib/kolla/config_files/nova_scheduler.json: + command: /usr/bin/nova-scheduler + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + docker_config: + step_4: + nova_scheduler: + image: *nova_scheduler_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable nova_scheduler service + tags: step2 + service: name=openstack-nova-scheduler state=stopped enabled=no diff --git a/docker/services/rabbitmq.yaml b/docker/services/rabbitmq.yaml new file mode 100644 index 00000000..ed440718 --- /dev/null +++ b/docker/services/rabbitmq.yaml @@ -0,0 +1,126 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Rabbitmq service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerRabbitmqImage: + description: image + default: 'centos-binary-rabbitmq:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + RabbitCookie: + type: string + default: '' + hidden: true + +resources: + + RabbitmqBase: + type: ../../puppet/services/rabbitmq.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Rabbitmq API role. + value: + service_name: {get_attr: [RabbitmqBase, role_data, service_name]} + config_settings: {get_attr: [RabbitmqBase, role_data, config_settings]} + step_config: &step_config + get_attr: [RabbitmqBase, role_data, step_config] + service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &rabbitmq_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ] + puppet_config: + config_volume: rabbitmq + puppet_tags: file + step_config: *step_config + config_image: *rabbitmq_image + kolla_config: + /var/lib/kolla/config_files/rabbitmq.json: + command: /usr/lib/rabbitmq/bin/rabbitmq-server + config_files: + - dest: /etc/rabbitmq/rabbitmq.config + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq.config + - dest: /etc/rabbitmq/enabled_plugins + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/rabbitmq/enabled_plugins + - dest: /etc/rabbitmq/rabbitmq-env.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq-env.conf + - dest: /etc/rabbitmq/rabbitmqadmin.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmqadmin.conf + docker_config: + step_1: + rabbitmq_bootstrap: + start_order: 0 + image: *rabbitmq_image + net: host + privileged: false + volumes: + - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - rabbitmq:/var/lib/rabbitmq/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + - KOLLA_BOOTSTRAP=True + - + list_join: + - '=' + - - 'RABBITMQ_CLUSTER_COOKIE' + - + yaql: + expression: $.data.passwords.where($ != '').first() + data: + passwords: + - {get_param: RabbitCookie} + - {get_param: [DefaultPasswords, rabbit_cookie]} + rabbitmq: + start_order: 1 + image: *rabbitmq_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - rabbitmq:/var/lib/rabbitmq/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable rabbitmq service + tags: step2 + service: name=rabbitmq-server state=stopped enabled=no diff --git a/docker/services/services.yaml b/docker/services/services.yaml index cd9f4cb5..3f094ff8 100644 --- a/docker/services/services.yaml +++ b/docker/services/services.yaml @@ -68,12 +68,20 @@ outputs: step_config: {get_attr: [ServiceChain, role_data, step_config]} docker_image: {get_attr: [ServiceChain, role_data, docker_image]} - puppet_tags: {get_attr: [ServiceChain, role_data, puppet_tags]} - config_volume: {get_attr: [ServiceChain, role_data, config_volume]} - config_image: {get_attr: [ServiceChain, role_data, config_image]} + puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]} kolla_config: map_merge: {get_attr: [ServiceChain, role_data, kolla_config]} docker_config: {get_attr: [ServiceChain, role_data, docker_config]} docker_puppet_tasks: {get_attr: [ServiceChain, role_data, docker_puppet_tasks]} + upgrade_tasks: + yaql: + # Note we use distinct() here to filter any identical tasks, e.g yum update for all services + expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct() + data: {get_attr: [ServiceChain, role_data]} + upgrade_batch_tasks: + yaql: + # Note we use distinct() here to filter any identical tasks, e.g yum update for all services + expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct() + data: {get_attr: [ServiceChain, role_data]} diff --git a/docker/services/swift-proxy.yaml b/docker/services/swift-proxy.yaml new file mode 100644 index 00000000..66118412 --- /dev/null +++ b/docker/services/swift-proxy.yaml @@ -0,0 +1,83 @@ +heat_template_version: ocata + +description: > + OpenStack containerized swift proxy service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerSwiftProxyImage: + description: image + default: 'centos-binary-swift-proxy-server:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + SwiftProxyBase: + type: ../../puppet/services/swift-proxy.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the swift proxy. + value: + service_name: {get_attr: [SwiftProxyBase, role_data, service_name]} + config_settings: {get_attr: [SwiftProxyBase, role_data, config_settings]} + step_config: &step_config + get_attr: [SwiftProxyBase, role_data, step_config] + service_config_settings: {get_attr: [SwiftProxyBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &swift_proxy_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] + puppet_config: + config_volume: swift + puppet_tags: swift_proxy_config + step_config: *step_config + config_image: *swift_proxy_image + kolla_config: + /var/lib/kolla/config_files/swift_proxy.json: + command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf + docker_config: + step_4: + swift_proxy: + image: *swift_proxy_image + net: host + user: swift + restart: always + # I'm mounting /etc/swift as rw. Are the rings written to at all during runtime? + volumes: + - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable swift_proxy service + tags: step2 + service: name=openstack-swift-proxy state=stopped enabled=no diff --git a/docker/services/swift-ringbuilder.yaml b/docker/services/swift-ringbuilder.yaml new file mode 100644 index 00000000..027a6956 --- /dev/null +++ b/docker/services/swift-ringbuilder.yaml @@ -0,0 +1,83 @@ +heat_template_version: ocata + +description: > + OpenStack Swift Ringbuilder + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerSwiftProxyImage: + description: image + default: 'centos-binary-swift-proxy-server:latest' + type: string + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + SwiftMinPartHours: + type: number + default: 1 + description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance. + SwiftPartPower: + default: 10 + description: Partition Power to use when building Swift rings + type: number + SwiftRingBuild: + default: true + description: Whether to manage Swift rings or not + type: boolean + SwiftReplicas: + type: number + default: 3 + description: How many replicas to use in the swift rings. + SwiftRawDisks: + default: {} + description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})' + type: json + SwiftUseLocalDir: + default: true + description: 'Use a local directory for Swift storage services when building rings' + type: boolean + +resources: + + SwiftRingbuilderBase: + type: ../../puppet/services/swift-ringbuilder.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for Swift Ringbuilder configuration in containers. + value: + service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]} + config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]} + step_config: &step_config + get_attr: [SwiftRingbuilderBase, role_data, step_config] + service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &docker_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] + puppet_config: + config_volume: 'swift' + puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance + step_config: *step_config + config_image: *docker_image + kolla_config: {} + docker_config: {} diff --git a/docker/services/swift-storage.yaml b/docker/services/swift-storage.yaml new file mode 100644 index 00000000..2eb55632 --- /dev/null +++ b/docker/services/swift-storage.yaml @@ -0,0 +1,363 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Swift Storage services. + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerSwiftProxyImage: + description: image + default: 'centos-binary-swift-proxy-server:latest' + type: string + DockerSwiftAccountImage: + description: image + default: 'centos-binary-swift-account:latest' + type: string + DockerSwiftContainerImage: + description: image + default: 'centos-binary-swift-container:latest' + type: string + DockerSwiftObjectImage: + description: image + default: 'centos-binary-swift-object:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + DefaultPasswords: + default: {} + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + +resources: + + SwiftStorageBase: + type: ../../puppet/services/swift-storage.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the swift storage services. + value: + service_name: {get_attr: [SwiftStorageBase, role_data, service_name]} + config_settings: {get_attr: [SwiftStorageBase, role_data, config_settings]} + step_config: &step_config + get_attr: [SwiftStorageBase, role_data, step_config] + service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &swift_proxy_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] + puppet_config: + config_volume: swift + puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config + step_config: *step_config + config_image: *swift_proxy_image + kolla_config: + /var/lib/kolla/config_files/swift_account_auditor.json: + command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf + /var/lib/kolla/config_files/swift_account_reaper.json: + command: /usr/bin/swift-account-reaper /etc/swift/account-server.conf + /var/lib/kolla/config_files/swift_account_replicator.json: + command: /usr/bin/swift-account-replicator /etc/swift/account-server.conf + /var/lib/kolla/config_files/swift_account_server.json: + command: /usr/bin/swift-account-server /etc/swift/account-server.conf + /var/lib/kolla/config_files/swift_container_auditor.json: + command: /usr/bin/swift-container-auditor /etc/swift/container-server.conf + /var/lib/kolla/config_files/swift_container_replicator.json: + command: /usr/bin/swift-container-replicator /etc/swift/container-server.conf + /var/lib/kolla/config_files/swift_container_updater.json: + command: /usr/bin/swift-container-updater /etc/swift/container-server.conf + /var/lib/kolla/config_files/swift_container_server.json: + command: /usr/bin/swift-container-server /etc/swift/container-server.conf + /var/lib/kolla/config_files/swift_object_auditor.json: + command: /usr/bin/swift-object-auditor /etc/swift/object-server.conf + /var/lib/kolla/config_files/swift_object_expirer.json: + command: /usr/bin/swift-object-expirer /etc/swift/object-expirer.conf + /var/lib/kolla/config_files/swift_object_replicator.json: + command: /usr/bin/swift-object-replicator /etc/swift/object-server.conf + /var/lib/kolla/config_files/swift_object_updater.json: + command: /usr/bin/swift-object-updater /etc/swift/object-server.conf + /var/lib/kolla/config_files/swift_object_server.json: + command: /usr/bin/swift-object-server /etc/swift/object-server.conf + docker_config: + step_3: + # The puppet config sets this up but we don't have a way to mount the named + # volume during the configuration stage. We just need to create this + # directory and make sure it's owned by swift. + swift_setup_srv: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ] + user: root + command: ['/bin/bash', '-c', 'mkdir /srv/node && chown swift:swift /srv/node'] + volumes: + - swift-srv:/srv + step_4: + swift_account_auditor: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: &kolla_env + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + swift_account_reaper: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_account_replicator: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_account_server: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_container_auditor: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_container_replicator: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_container_updater: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_container_server: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_object_auditor: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_object_expirer: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_object_replicator: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_object_updater: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_object_server: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + upgrade_tasks: + - name: Stop and disable swift storage services + tags: step2 + service: name={{ item }} state=stopped enabled=no + with_items: + - openstack-swift-account-auditor + - openstack-swift-account-reaper + - openstack-swift-account-replicator + - openstack-swift-account + - openstack-swift-container-auditor + - openstack-swift-container-replicator + - openstack-swift-container-updater + - openstack-swift-container + - openstack-swift-object-auditor + - openstack-swift-object-replicator + - openstack-swift-object-updater + - openstack-swift-object diff --git a/docker/services/zaqar.yaml b/docker/services/zaqar.yaml new file mode 100644 index 00000000..30905ffe --- /dev/null +++ b/docker/services/zaqar.yaml @@ -0,0 +1,107 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Zaqar services + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerZaqarImage: + description: image + default: 'centos-binary-zaqar:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + ZaqarBase: + type: ../../puppet/services/zaqar.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Zaqar API role. + value: + service_name: {get_attr: [ZaqarBase, role_data, service_name]} + config_settings: {get_attr: [ZaqarBase, role_data, config_settings]} + step_config: &step_config + get_attr: [ZaqarBase, role_data, step_config] + service_config_settings: {get_attr: [ZaqarBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &zaqar_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ] + puppet_config: + config_volume: zaqar + puppet_tags: zaqar_config + step_config: *step_config + config_image: *zaqar_image + kolla_config: + /var/lib/kolla/config_files/zaqar.json: + command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf + config_files: + - dest: /etc/zaqar/zaqar.conf + owner: zaqar + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf + /var/lib/kolla/config_files/zaqar_websocket.json: + command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf --config-file /etc/zaqar/1.conf + config_files: + - dest: /etc/zaqar/zaqar.conf + owner: zaqar + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf + - dest: /etc/zaqar/1.conf + owner: zaqar + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/zaqar/1.conf + docker_config: + step_4: + zaqar: + image: *zaqar_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + zaqar_websocket: + image: *zaqar_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable zaqar service + tags: step2 + service: name=openstack-zaqar.service state=stopped enabled=no + diff --git a/environments/cadf.yaml b/environments/cadf.yaml new file mode 100644 index 00000000..af5c7fdf --- /dev/null +++ b/environments/cadf.yaml @@ -0,0 +1,2 @@ +parameter_defaults: + KeystoneNotificationFormat: cadf diff --git a/environments/docker.yaml b/environments/docker.yaml index 37612b07..3696f908 100644 --- a/environments/docker.yaml +++ b/environments/docker.yaml @@ -1,12 +1,48 @@ resource_registry: - OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml + # This can be used when you don't want to run puppet on the host, + # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker + # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml + OS::TripleO::Services::Docker: ../puppet/services/docker.yaml #NOTE (dprince) add roles to be docker enabled as we support them OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml OS::TripleO::Services::NovaCompute: ../docker/services/nova-compute.yaml + OS::TripleO::Services::Keystone: ../docker/services/keystone.yaml + OS::TripleO::Services::GlanceApi: ../docker/services/glance-api.yaml + OS::TripleO::Services::HeatApi: ../docker/services/heat-api.yaml + OS::TripleO::Services::HeatApiCfn: ../docker/services/heat-api-cfn.yaml + OS::TripleO::Services::HeatEngine: ../docker/services/heat-engine.yaml + OS::TripleO::Services::NovaApi: ../docker/services/nova-api.yaml + OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml + OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml + OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml + # FIXME: these need to go into a environments/services-docker dir? + OS::TripleO::Services::NovaIronic: ../docker/services/nova-ironic.yaml + OS::TripleO::Services::IronicApi: ../docker/services/ironic-api.yaml + OS::TripleO::Services::IronicConductor: ../docker/services/ironic-conductor.yaml + OS::TripleO::Services::IronicPxe: ../docker/services/ironic-pxe.yaml + OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml + OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml + OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml + OS::TripleO::Services::NeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml + OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml + OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml + OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml + OS::TripleO::Services::MistralApi: ../docker/services/mistral-api.yaml + OS::TripleO::Services::MistralEngine: ../docker/services/mistral-engine.yaml + OS::TripleO::Services::MistralExecutor: ../docker/services/mistral-executor.yaml + OS::TripleO::Services::Zaqar: ../docker/services/zaqar.yaml + OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml + OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml + OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml + OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml + OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml + OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml OS::TripleO::PostDeploySteps: ../docker/post.yaml + OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml + OS::TripleO::Services: ../docker/services/services.yaml parameter_defaults: diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml index 77fa5a49..f59b0414 100644 --- a/environments/hyperconverged-ceph.yaml +++ b/environments/hyperconverged-ceph.yaml @@ -11,6 +11,7 @@ parameter_defaults: - OS::TripleO::Services::Timezone - OS::TripleO::Services::Ntp - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd - OS::TripleO::Services::NovaCompute - OS::TripleO::Services::NovaLibvirt - OS::TripleO::Services::Kernel @@ -25,4 +26,8 @@ parameter_defaults: - OS::TripleO::Services::OpenDaylightOvs - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::AuditD + - OS::TripleO::Services::Collectd - OS::TripleO::Services::CephOSD + - OS::TripleO::Services::Vpp + - OS::TripleO::Services::MySQLClient diff --git a/environments/major-upgrade-composable-steps-docker.yaml b/environments/major-upgrade-composable-steps-docker.yaml new file mode 100644 index 00000000..5fa2f2d8 --- /dev/null +++ b/environments/major-upgrade-composable-steps-docker.yaml @@ -0,0 +1,10 @@ +resource_registry: + # FIXME(shardy) do we need to break major_upgrade_steps.yaml apart to + # enable docker specific logic, or is just overridding PostUpgradeSteps + # enough (as we want to share the ansible tasks steps etc) + OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml +parameter_defaults: + UpgradeLevelNovaCompute: auto + UpgradeInitCommonCommand: | + #!/bin/bash + # Ocata to Pike, put any needed host-level workarounds here diff --git a/environments/major-upgrade-composable-steps.yaml b/environments/major-upgrade-composable-steps.yaml index 9e3cddba..9ecc2251 100644 --- a/environments/major-upgrade-composable-steps.yaml +++ b/environments/major-upgrade-composable-steps.yaml @@ -7,9 +7,9 @@ parameter_defaults: # Newton to Ocata, we need to remove old hiera hook data and # install ansible heat agents and ansible-pacemaker set -eu + yum install -y openstack-heat-agents yum install -y python-heat-agent-* yum install -y ansible-pacemaker rm -f /usr/libexec/os-apply-config/templates/etc/puppet/hiera.yaml rm -f /usr/libexec/os-refresh-config/configure.d/40-hiera-datafiles rm -f /etc/puppet/hieradata/*.yaml - diff --git a/environments/major-upgrade-converge-docker.yaml b/environments/major-upgrade-converge-docker.yaml new file mode 100644 index 00000000..463206f1 --- /dev/null +++ b/environments/major-upgrade-converge-docker.yaml @@ -0,0 +1,7 @@ +# Use this to reset any mappings only used for upgrades after the +# update of all nodes is completed +resource_registry: + OS::TripleO::PostDeploySteps: ../docker/post.yaml +parameter_defaults: + UpgradeLevelNovaCompute: '' + UpgradeInitCommonCommand: '' diff --git a/environments/net-bond-with-vlans-no-external.yaml b/environments/net-bond-with-vlans-no-external.yaml index 75959a0b..cc27d4f0 100644 --- a/environments/net-bond-with-vlans-no-external.yaml +++ b/environments/net-bond-with-vlans-no-external.yaml @@ -20,7 +20,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-no-external.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml - -# NOTE: with no external interface we should be able to use the -# default Neutron l3_agent.ini setting for the external bridge (br-ex) -# i.e. No need to set: NeutronExternalNetworkBridge: "''" diff --git a/environments/net-bond-with-vlans-v6.yaml b/environments/net-bond-with-vlans-v6.yaml index 73dda3d9..dc6fdfe3 100644 --- a/environments/net-bond-with-vlans-v6.yaml +++ b/environments/net-bond-with-vlans-v6.yaml @@ -12,9 +12,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-v6.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml - -parameter_defaults: - # This sets 'external_network_bridge' in l3_agent.ini to an empty string - # so that external networks act like provider bridge networks (they - # will plug into br-int instead of br-ex) - NeutronExternalNetworkBridge: "''" diff --git a/environments/net-bond-with-vlans.yaml b/environments/net-bond-with-vlans.yaml index de8f8f74..38c31cac 100644 --- a/environments/net-bond-with-vlans.yaml +++ b/environments/net-bond-with-vlans.yaml @@ -11,9 +11,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml - -parameter_defaults: - # This sets 'external_network_bridge' in l3_agent.ini to an empty string - # so that external networks act like provider bridge networks (they - # will plug into br-int instead of br-ex) - NeutronExternalNetworkBridge: "''" diff --git a/environments/net-single-nic-linux-bridge-with-vlans.yaml b/environments/net-single-nic-linux-bridge-with-vlans.yaml index fd80bb9b..f34cfb92 100644 --- a/environments/net-single-nic-linux-bridge-with-vlans.yaml +++ b/environments/net-single-nic-linux-bridge-with-vlans.yaml @@ -11,9 +11,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/controller.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml - -parameter_defaults: - # This sets 'external_network_bridge' in l3_agent.ini to an empty string - # so that external networks act like provider bridge networks (they - # will plug into br-int instead of br-ex) - NeutronExternalNetworkBridge: "''" diff --git a/environments/net-single-nic-with-vlans-no-external.yaml b/environments/net-single-nic-with-vlans-no-external.yaml index c7594b32..65d38137 100644 --- a/environments/net-single-nic-with-vlans-no-external.yaml +++ b/environments/net-single-nic-with-vlans-no-external.yaml @@ -19,7 +19,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-no-external.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml - -# NOTE: with no external interface we should be able to use the -# default Neutron l3_agent.ini setting for the external bridge (br-ex) -# i.e. No need to set: NeutronExternalNetworkBridge: "''" diff --git a/environments/net-single-nic-with-vlans-v6.yaml b/environments/net-single-nic-with-vlans-v6.yaml index 8210bad3..966e5fe9 100644 --- a/environments/net-single-nic-with-vlans-v6.yaml +++ b/environments/net-single-nic-with-vlans-v6.yaml @@ -11,9 +11,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-v6.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml - -parameter_defaults: - # This sets 'external_network_bridge' in l3_agent.ini to an empty string - # so that external networks act like provider bridge networks (they - # will plug into br-int instead of br-ex) - NeutronExternalNetworkBridge: "''" diff --git a/environments/net-single-nic-with-vlans.yaml b/environments/net-single-nic-with-vlans.yaml index a61bc6e1..b087b3e4 100644 --- a/environments/net-single-nic-with-vlans.yaml +++ b/environments/net-single-nic-with-vlans.yaml @@ -11,9 +11,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml - -parameter_defaults: - # This sets 'external_network_bridge' in l3_agent.ini to an empty string - # so that external networks act like provider bridge networks (they - # will plug into br-int instead of br-ex) - NeutronExternalNetworkBridge: "''" diff --git a/environments/network-environment.yaml b/environments/network-environment.yaml index 796eb806..210b6b03 100644 --- a/environments/network-environment.yaml +++ b/environments/network-environment.yaml @@ -48,8 +48,6 @@ parameter_defaults: # ManagementInterfaceDefaultRoute: 10.0.1.1 # Define the DNS servers (maximum 2) for the overcloud nodes DnsServers: ["8.8.8.8","8.8.4.4"] - # Set to empty string to enable multiple external networks or VLANs - NeutronExternalNetworkBridge: "''" # List of Neutron network types for tenant networks (will be used in order) NeutronNetworkType: 'vxlan,vlan' # The tunnel type for the tenant network (vxlan or gre). Set to '' to disable tunneling. diff --git a/environments/services/vpp.yaml b/environments/services/vpp.yaml new file mode 100644 index 00000000..9bad70f8 --- /dev/null +++ b/environments/services/vpp.yaml @@ -0,0 +1,9 @@ +resource_registry: + OS::TripleO::Services::Vpp: ../../puppet/services/vpp.yaml + +#parameter_defaults: + #VPP main thread core pinning + #VppCpuMainCore: '1' + + #List of cores for VPP worker thread pinning + #VppCpuCorelistWorkers: ['3','4'] diff --git a/environments/undercloud.yaml b/environments/undercloud.yaml index 0fd01920..2540fbe5 100644 --- a/environments/undercloud.yaml +++ b/environments/undercloud.yaml @@ -16,3 +16,4 @@ parameter_defaults: NeutronDhcpAgentsPerNetwork: 2 HeatConvergenceEngine: false HeatMaxResourcesPerStack: -1 + HeatMaxJsonBodySize: 2097152 diff --git a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml index c388358a..24557517 100644 --- a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml +++ b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml @@ -21,3 +21,7 @@ parameter_defaults: rhel_reg_type: "" rhel_reg_method: "" rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms" + rhel_reg_http_proxy_host: "" + rhel_reg_http_proxy_port: "" + rhel_reg_http_proxy_username: "" + rhel_reg_http_proxy_password: "" diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml index fdf2e957..e8316c53 100644 --- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml +++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml @@ -45,6 +45,14 @@ parameters: type: string rhel_reg_sat_repo: type: string + rhel_reg_http_proxy_host: + type: string + rhel_reg_http_proxy_port: + type: string + rhel_reg_http_proxy_username: + type: string + rhel_reg_http_proxy_password: + type: string resources: @@ -71,6 +79,10 @@ resources: - name: REG_TYPE - name: REG_METHOD - name: REG_SAT_REPO + - name: REG_HTTP_PROXY_HOST + - name: REG_HTTP_PROXY_PORT + - name: REG_HTTP_PROXY_USERNAME + - name: REG_HTTP_PROXY_PASSWORD config: {get_file: scripts/rhel-registration} RHELRegistrationDeployment: @@ -99,6 +111,10 @@ resources: REG_TYPE: {get_param: rhel_reg_type} REG_METHOD: {get_param: rhel_reg_method} REG_SAT_REPO: {get_param: rhel_reg_sat_repo} + REG_HTTP_PROXY_HOST: {get_param: rhel_reg_http_proxy_host} + REG_HTTP_PROXY_PORT: {get_param: rhel_reg_http_proxy_port} + REG_HTTP_PROXY_USERNAME: {get_param: rhel_reg_http_proxy_username} + REG_HTTP_PROXY_PASSWORD: {get_param: rhel_reg_http_proxy_password} RHELUnregistration: type: OS::Heat::SoftwareConfig diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration index 2650a967..6f83cc4b 100644 --- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration +++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration @@ -13,10 +13,18 @@ fi retryCount=0 opts= +config_opts= attach_opts= sat5_opts= repos="repos --enable rhel-7-server-rpms" satellite_repo=${REG_SAT_REPO} +proxy_host= +proxy_port= +proxy_url= +proxy_username= +proxy_password= + +# process variables.. if [ -n "${REG_AUTO_ATTACH:-}" ]; then opts="$opts --auto-attach" @@ -97,6 +105,57 @@ if [ -n "${REG_TYPE:-}" ]; then opts="$opts --type=$REG_TYPE" fi +# Proxy settings (host and port) +if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then + proxy_host="${REG_HTTP_PROXY_HOST}" +fi + +if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then + proxy_port="${REG_HTTP_PROXY_PORT}" +fi + +# Proxy settings (user and password) +if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then + proxy_username="${REG_HTTP_PROXY_USERNAME}" +fi + +if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then + proxy_password="${REG_HTTP_PROXY_PASSWORD}" +fi + +# Sanity Checks for proxy host/port/user/password +if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then + if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then + # Good both values are not empty + proxy_url="http://${proxy_host}:${proxy_port}" + config_opts="--server.proxy_hostname=${proxy_host} --server.proxy_port=${proxy_port}" + sat5_opts="${sat5_opts} --proxy_hostname=${proxy_url}" + echo "RHSM Proxy set to: ${proxy_url}" + if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then + if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then + config_opts="${config_opts} --server.proxy_user=${proxy_username} --server.proxy_password=${proxy_password}" + sat5_opts="${sat5_opts} --proxyUser=${proxy_username} --proxyPassword=${proxy_password}" + else + echo "Warning: REG_HTTP_PROXY_PASSWORD cannot be null with non-empty REG_HTTP_PROXY_USERNAME! Skipping..." + proxy_username= ; proxy_password= + fi + else + if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then + echo "Warning: REG_HTTP_PROXY_USERNAME cannot be null with non-empty REG_HTTP_PROXY_PASSWORD! Skipping..." + proxy_username= ; proxy_password= + fi + fi + else + echo "Warning: REG_HTTP_PROXY_PORT cannot be null with non-empty REG_HTTP_PROXY_HOST! Skipping..." + proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password= + fi +else + if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then + echo "Warning: REG_HTTP_PROXY_HOST cannot be null with non-empty REG_HTTP_PROXY_PORT! Skipping..." + proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password= + fi +fi + function retry() { if [[ $retryCount < 3 ]]; then $@ @@ -127,13 +186,34 @@ function detect_satellite_version { fi } +if [ "x${proxy_url}" != "x" ];then + # Config subscription-manager for proxy + subscription-manager config ${config_opts} + + # Config yum for proxy.. + sed -i -e '/^proxy=/d' /etc/yum.conf + echo "proxy=${proxy_url}" >> /etc/yum.conf + + # Handle optional username/password + if [ -n "${proxy_username}" ]; then + sed -i -e '/^proxy_username=/d' /etc/yum.conf + echo "proxy_username=${proxy_username}" >> /etc/yum.conf + fi + + if [ -n "${proxy_password}" ]; then + sed -i -e '/^proxy_password=/d' /etc/yum.conf + echo "proxy_password=${proxy_password}" >> /etc/yum.conf + fi + +fi + case "${REG_METHOD:-}" in portal) retry subscription-manager register $opts if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then retry subscription-manager attach $attach_opts fi - retry subscription-manager repos --disable '*' + retry subscription-manager repos --disable='*' retry subscription-manager $repos ;; satellite) diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh index 6bfe1239..4b323854 100755 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh @@ -41,7 +41,7 @@ done # https://bugzilla.redhat.com/show_bug.cgi?id=1341968 # # The default is to determine automatically if upgrade is needed based -# on mysql package versionning, but this can be overriden manually +# on mysql package versioning, but this can be overridden manually # to support specific upgrade scenario # Calling this function will set the DO_MYSQL_UPGRADE variable which is used @@ -50,6 +50,7 @@ mysql_need_update if [[ -n $(is_bootstrap_node) ]]; then if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql" cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR" fi @@ -108,7 +109,7 @@ yum -y -q update # We need to ensure at least those two configuration settings, otherwise # mariadb 10.1+ won't activate galera replication. # wsrep_cluster_address must only be set though, its value does not -# matter because it's overriden by the galera resource agent. +# matter because it's overridden by the galera resource agent. cat >> /etc/my.cnf.d/galera.cnf <<EOF [mysqld] wsrep_on = ON diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml index 8c91027d..74d3be71 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker.yaml +++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml @@ -18,10 +18,6 @@ parameters: constraints: - allowed_values: ['auto', 'yes', 'no'] default: 'auto' - IgnoreCephUpgradeWarnings: - type: boolean - default: false - description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean KeepSaharaServicesOnUpgrade: type: boolean default: true diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index f8ab10c2..4c87373e 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -47,7 +47,10 @@ if [[ "$list_updates" == "" ]]; then exit 0 fi -pacemaker_status=$(systemctl is-active pacemaker || :) +pacemaker_status="" +if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then + pacemaker_status=$(systemctl is-active pacemaker) +fi # Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455 # and https://bugs.launchpad.net/tripleo/+bug/1634851 diff --git a/firstboot/os-net-config-mappings.yaml b/firstboot/os-net-config-mappings.yaml index d7e0c524..f82bc19f 100644 --- a/firstboot/os-net-config-mappings.yaml +++ b/firstboot/os-net-config-mappings.yaml @@ -9,8 +9,28 @@ description: > nic1: "00:c8:7c:e6:f0:2e" node2: nic1: "00:18:7d:99:0c:b6" - This will result in the first nodeN entry where a mac matches a - local device being written as a mapping file for os-net-config in + node3: + dmiString: 'system-uuid' + id: 'A8C85861-1B16-4803-8689-AFC62984F8F6' + nic1: em3 + # Dell PowerEdge + nodegroup1: + dmiString: "system-product-name" + id: "PowerEdge R630" + nic1: em3 + nic2: em1 + nic3: em2 + # Cisco UCS B200-M4" + nodegroup2: + dmiString: "system-product-name" + id: "UCSB-B200-M4" + nic1: enp7s0 + nic2: enp6s0 + + This will result in the first node* entry where either: + a) a mac matches a local device + or b) a DMI String matches the specified id + being written as a mapping file for os-net-config in /etc/os-net-config/mapping.yaml parameters: @@ -47,15 +67,36 @@ resources: echo '$node_lookup' | python -c " import json import sys + import copy + from subprocess import PIPE, Popen import yaml + + def write_mapping_file(interface_mapping): + with open('/etc/os-net-config/mapping.yaml', 'w') as f: + yaml.safe_dump(interface_mapping, f, default_flow_style=False) + input = sys.stdin.readline() or '{}' data = json.loads(input) for node in data: + interface_mapping = {'interface_mapping': + copy.deepcopy(data[node])} + if 'dmiString' in interface_mapping['interface_mapping']: + del interface_mapping['interface_mapping']['dmiString'] + if 'id' in interface_mapping['interface_mapping']: + del interface_mapping['interface_mapping']['id'] + # Match on mac addresses first if any(x in '$eth_addr'.split(',') for x in data[node].values()): - interface_mapping = {'interface_mapping': data[node]} - with open('/etc/os-net-config/mapping.yaml', 'w') as f: - yaml.safe_dump(interface_mapping, f, default_flow_style=False) + write_mapping_file(interface_mapping) break + # If data contain dmiString and id keys, try to match node(group) + if 'dmiString' in data[node] and 'id' in data[node]: + ps = Popen([ 'dmidecode', + '--string', data[node].get('dmiString') ], + stdout=PIPE) + out, err = ps.communicate() + if data[node].get('id') == out.rstrip(): + write_mapping_file(interface_mapping) + break " params: $node_lookup: {get_param: NetConfigDataLookup} diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml index 5782bbe9..83d875e8 100644 --- a/network/ports/net_ip_list_map.yaml +++ b/network/ports/net_ip_list_map.yaml @@ -35,6 +35,32 @@ parameters: default: [] type: json + InternalApiNetName: + default: internal_api + description: The name of the internal API network. + type: string + ExternalNetName: + default: external + description: The name of the external network. + type: string + ManagementNetName: + default: management + description: The name of the management network. + type: string + StorageNetName: + default: storage + description: The name of the storage network. + type: string + StorageMgmtNetName: + default: storage_mgmt + description: The name of the Storage management network. + type: string + TenantNetName: + default: tenant + description: The name of the tenant network. + type: string + + resources: # This adds the extra "services" on for keystone # so that keystone_admin_api_network and @@ -58,19 +84,33 @@ resources: - keystone_admin_api - keystone_public_api + NetIpMapValue: + type: OS::Heat::Value + properties: + type: json + value: + map_replace: + - ctlplane: {get_param: ControlPlaneIpList} + external: {get_param: ExternalIpList} + internal_api: {get_param: InternalApiIpList} + storage: {get_param: StorageIpList} + storage_mgmt: {get_param: StorageMgmtIpList} + tenant: {get_param: TenantIpList} + management: {get_param: ManagementIpList} + - keys: + external: {get_param: ExternalNetName} + internal_api: {get_param: InternalApiNetName} + storage: {get_param: StorageNetName} + storage_mgmt: {get_param: StorageMgmtNetName} + tenant: {get_param: TenantNetName} + management: {get_param: ManagementNetName} + outputs: net_ip_map: description: > A Hash containing a mapping of network names to assigned lists of IP addresses. - value: - ctlplane: {get_param: ControlPlaneIpList} - external: {get_param: ExternalIpList} - internal_api: {get_param: InternalApiIpList} - storage: {get_param: StorageIpList} - storage_mgmt: {get_param: StorageMgmtIpList} - tenant: {get_param: TenantIpList} - management: {get_param: ManagementIpList} + value: {get_attr: [NetIpMapValue, value]} service_ips: description: > Map of enabled services to a list of their IP addresses @@ -92,14 +132,7 @@ outputs: for_each: SERVICE: {get_attr: [EnabledServicesValue, value]} - values: {get_param: ServiceNetMap} - - values: - ctlplane: {get_param: ControlPlaneIpList} - external: {get_param: ExternalIpList} - internal_api: {get_param: InternalApiIpList} - storage: {get_param: StorageIpList} - storage_mgmt: {get_param: StorageMgmtIpList} - tenant: {get_param: TenantIpList} - management: {get_param: ManagementIpList} + - values: {get_attr: [NetIpMapValue, value]} service_hostnames: description: > Map of enabled services to a list of hostnames where they're running diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml index c8cf733f..c974d72e 100644 --- a/network/ports/net_ip_map.yaml +++ b/network/ports/net_ip_map.yaml @@ -69,35 +69,136 @@ parameters: type: string description: IP address with brackets in case of IPv6 + InternalApiNetName: + default: internal_api + description: The name of the internal API network. + type: string + ExternalNetName: + default: external + description: The name of the external network. + type: string + ManagementNetName: + default: management + description: The name of the management network. + type: string + StorageNetName: + default: storage + description: The name of the storage network. + type: string + StorageMgmtNetName: + default: storage_mgmt + description: The name of the Storage management network. + type: string + TenantNetName: + default: tenant + description: The name of the tenant network. + type: string + +resources: + + NetIpMapValue: + type: OS::Heat::Value + properties: + type: json + value: + map_replace: + - ctlplane: {get_param: ControlPlaneIp} + external: {get_param: ExternalIp} + internal_api: {get_param: InternalApiIp} + storage: {get_param: StorageIp} + storage_mgmt: {get_param: StorageMgmtIp} + tenant: {get_param: TenantIp} + management: {get_param: ManagementIp} + ctlplane_subnet: + list_join: + - '' + - - {get_param: ControlPlaneIp} + - '/' + - {get_param: ControlPlaneSubnetCidr} + external_subnet: {get_param: ExternalIpSubnet} + internal_api_subnet: {get_param: InternalApiIpSubnet} + storage_subnet: {get_param: StorageIpSubnet} + storage_mgmt_subnet: {get_param: StorageMgmtIpSubnet} + tenant_subnet: {get_param: TenantIpSubnet} + management_subnet: {get_param: ManagementIpSubnet} + ctlplane_uri: {get_param: ControlPlaneIp} + external_uri: {get_param: ExternalIpUri} + internal_api_uri: {get_param: InternalApiIpUri} + storage_uri: {get_param: StorageIpUri} + storage_mgmt_uri: {get_param: StorageMgmtIpUri} + tenant_uri: {get_param: TenantIpUri} + management_uri: {get_param: ManagementIpUri} + - keys: + external: {get_param: ExternalNetName} + internal_api: {get_param: InternalApiNetName} + storage: {get_param: StorageNetName} + storage_mgmt: {get_param: StorageMgmtNetName} + tenant: {get_param: TenantNetName} + management: {get_param: ManagementNetName} + external_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: ExternalNetName} + internal_api_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: InternalApiNetName} + storage_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: StorageNetName} + storage_mgmt_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: StorageMgmtNetName} + tenant_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: TenantNetName} + management_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: ManagementNetName} + external_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: ExternalNetName} + internal_api_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: InternalApiNetName} + storage_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: StorageNetName} + storage_mgmt_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: StorageMgmtNetName} + tenant_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: TenantNetName} + management_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: ManagementNetName} + outputs: net_ip_map: description: > A Hash containing a mapping of network names to assigned IPs for a specific machine. - value: - ctlplane: {get_param: ControlPlaneIp} - external: {get_param: ExternalIp} - internal_api: {get_param: InternalApiIp} - storage: {get_param: StorageIp} - storage_mgmt: {get_param: StorageMgmtIp} - tenant: {get_param: TenantIp} - management: {get_param: ManagementIp} - ctlplane_subnet: - list_join: - - '' - - - {get_param: ControlPlaneIp} - - '/' - - {get_param: ControlPlaneSubnetCidr} - external_subnet: {get_param: ExternalIpSubnet} - internal_api_subnet: {get_param: InternalApiIpSubnet} - storage_subnet: {get_param: StorageIpSubnet} - storage_mgmt_subnet: {get_param: StorageMgmtIpSubnet} - tenant_subnet: {get_param: TenantIpSubnet} - management_subnet: {get_param: ManagementIpSubnet} - ctlplane_uri: {get_param: ControlPlaneIp} - external_uri: {get_param: ExternalIpUri} - internal_api_uri: {get_param: InternalApiIpUri} - storage_uri: {get_param: StorageIpUri} - storage_mgmt_uri: {get_param: StorageMgmtIpUri} - tenant_uri: {get_param: TenantIpUri} - management_uri: {get_param: ManagementIpUri} + value: {get_attr: [NetIpMapValue, value]} diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml index 1360d0be..ae012b21 100644 --- a/overcloud-resource-registry-puppet.j2.yaml +++ b/overcloud-resource-registry-puppet.j2.yaml @@ -248,6 +248,8 @@ resource_registry: OS::TripleO::Services::OctaviaHousekeeping: OS::Heat::None OS::TripleO::Services::OctaviaWorker: OS::Heat::None OS::TripleO::Services::MySQLClient: puppet/services/database/mysql-client.yaml + OS::TripleO::Services::Vpp: OS::Heat::None + OS::TripleO::Services::Docker: OS::Heat::None parameter_defaults: EnablePackageInstall: false diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml index e9447b94..e99f770f 100644 --- a/overcloud.j2.yaml +++ b/overcloud.j2.yaml @@ -243,6 +243,12 @@ resources: NetIpMap: {get_attr: [VipMap, net_ip_map]} ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]} + EndpointMapData: + type: OS::Heat::Value + properties: + type: json + value: {get_attr: [EndpointMap, endpoint_map]} + # Jinja loop for Role in roles_data.yaml {% for role in roles %} # Resources generated for {{role.name}} Role @@ -634,7 +640,7 @@ outputs: value: true KeystoneURL: description: URL for the Overcloud Keystone service - value: {get_attr: [EndpointMap, endpoint_map, KeystonePublic, uri]} + value: {get_attr: [EndpointMapData, value, KeystonePublic, uri]} KeystoneAdminVip: description: Keystone Admin VIP endpoint value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]} @@ -643,7 +649,7 @@ outputs: Mapping of the resources with the needed info for their endpoints. This includes the protocol used, the IP, port and also a full representation of the URI. - value: {get_attr: [EndpointMap, endpoint_map]} + value: {get_attr: [EndpointMapData, value]} HostsEntry: description: | The content that should be appended to your /etc/hosts if you want to get diff --git a/plan-environment.yaml b/plan-environment.yaml new file mode 100644 index 00000000..f629eff3 --- /dev/null +++ b/plan-environment.yaml @@ -0,0 +1,5 @@ +version: 1.0
+
+template: overcloud.yaml
+environments:
+- path: overcloud-resource-registry-puppet.yaml
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index ee43c3a5..7edf17af 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -68,6 +68,32 @@ parameters: type: boolean default: false + InternalApiNetName: + default: internal_api + description: The name of the internal API network. + type: string + ExternalNetName: + default: external + description: The name of the external network. + type: string + ManagementNetName: + default: management + description: The name of the management network. + type: string + StorageNetName: + default: storage + description: The name of the storage network. + type: string + StorageMgmtNetName: + default: storage_mgmt + description: The name of the Storage management network. + type: string + TenantNetName: + default: tenant + description: The name of the tenant network. + type: string + + resources: allNodesConfigImpl: @@ -175,21 +201,21 @@ resources: get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_admin_api_network]}] keystone_public_api_vip: get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_public_api_network]}] - public_virtual_ip: {get_param: [NetVipMap, external]} + public_virtual_ip: {get_param: [NetVipMap, {get_param: ExternalNetName}]} controller_virtual_ip: {get_param: [NetVipMap, ctlplane]} - internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]} - storage_virtual_ip: {get_param: [NetVipMap, storage]} - storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]} + internal_api_virtual_ip: {get_param: [NetVipMap, {get_param: InternalApiNetName}]} + storage_virtual_ip: {get_param: [NetVipMap, {get_param: StorageNetName}]} + storage_mgmt_virtual_ip: {get_param: [NetVipMap, {get_param: StorageMgmtNetName}]} redis_vip: {get_param: RedisVirtualIP} # public_virtual_ip and controller_virtual_ip are needed in # both HAproxy & keepalived. - tripleo::haproxy::public_virtual_ip: {get_param: [NetVipMap, external]} + tripleo::haproxy::public_virtual_ip: {get_param: [NetVipMap, {get_param: ExternalNetName}]} tripleo::haproxy::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]} - tripleo::keepalived::public_virtual_ip: {get_param: [NetVipMap, external]} + tripleo::keepalived::public_virtual_ip: {get_param: [NetVipMap, {get_param: ExternalNetName}]} tripleo::keepalived::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]} - tripleo::keepalived::internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]} - tripleo::keepalived::storage_virtual_ip: {get_param: [NetVipMap, storage]} - tripleo::keepalived::storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]} + tripleo::keepalived::internal_api_virtual_ip: {get_param: [NetVipMap, {get_param: InternalApiNetName}]} + tripleo::keepalived::storage_virtual_ip: {get_param: [NetVipMap, {get_param: StorageNetName}]} + tripleo::keepalived::storage_mgmt_virtual_ip: {get_param: [NetVipMap, {get_param: StorageMgmtNetName}]} tripleo::keepalived::redis_virtual_ip: {get_param: RedisVirtualIP} tripleo::redis_notification::haproxy_monitor_ip: {get_param: [NetVipMap, ctlplane]} cloud_name_external: {get_param: cloud_name_external} diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml index a5218dbe..51f9abac 100644 --- a/puppet/blockstorage-role.yaml +++ b/puppet/blockstorage-role.yaml @@ -448,6 +448,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: name: UpdateDeployment config: {get_resource: UpdateConfig} diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml index 0867e17f..d7d7f478 100644 --- a/puppet/cephstorage-role.yaml +++ b/puppet/cephstorage-role.yaml @@ -460,6 +460,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: config: {get_resource: UpdateConfig} server: {get_resource: CephStorage} diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml index 1a0294af..ebdd762d 100644 --- a/puppet/compute-role.yaml +++ b/puppet/compute-role.yaml @@ -483,6 +483,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: name: UpdateDeployment config: {get_resource: UpdateConfig} diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml index 825006ba..2f4f583c 100644 --- a/puppet/controller-role.yaml +++ b/puppet/controller-role.yaml @@ -523,6 +523,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: name: UpdateDeployment config: {get_resource: UpdateConfig} diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml index 3daf3fd3..b6d1239a 100644 --- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml +++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml @@ -53,41 +53,40 @@ resources: NetworkMidoNetConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - midonet_data: - mapped_data: - enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController} - enable_cassandra_on_controller: {get_param: EnableCassandraOnController} - midonet_tunnelzone_name: {get_param: TunnelZoneName} - midonet_tunnelzone_type: {get_param: TunnelZoneType} - midonet_libvirt_qemu_data: | - user = "root" - group = "root" - cgroup_device_acl = [ - "/dev/null", "/dev/full", "/dev/zero", - "/dev/random", "/dev/urandom", - "/dev/ptmx", "/dev/kvm", "/dev/kqemu", - "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", - "/dev/net/tun" - ] - tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort} - tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} - tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} - tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} - tripleo::haproxy::midonet_api: true - # Missed Neutron Puppet data - neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' - neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' - neutron::plugins::midonet::midonet_api_port: 8081 - neutron::params::midonet_server_package: 'python-networking-midonet' + datafiles: + midonet_data: + mapped_data: + enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController} + enable_cassandra_on_controller: {get_param: EnableCassandraOnController} + midonet_tunnelzone_name: {get_param: TunnelZoneName} + midonet_tunnelzone_type: {get_param: TunnelZoneType} + midonet_libvirt_qemu_data: | + user = "root" + group = "root" + cgroup_device_acl = [ + "/dev/null", "/dev/full", "/dev/zero", + "/dev/random", "/dev/urandom", + "/dev/ptmx", "/dev/kvm", "/dev/kqemu", + "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", + "/dev/net/tun" + ] + tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort} + tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} + tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} + tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} + tripleo::haproxy::midonet_api: true + # Missed Neutron Puppet data + neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' + neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' + neutron::plugins::midonet::midonet_api_port: 8081 + neutron::params::midonet_server_package: 'python-networking-midonet' - # Make sure the l3 agent does not run - l3_agent_service: false - neutron::agents::l3::manage_service: false - neutron::agents::l3::enabled: false + # Make sure the l3 agent does not run + l3_agent_service: false + neutron::agents::l3::manage_service: false + neutron::agents::l3::enabled: false NetworkMidonetDeploymentControllers: diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml index 9b900bc4..b05fa636 100644 --- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml +++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml @@ -101,31 +101,30 @@ resources: NetworkCiscoConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - neutron_cisco_data: - mapped_data: - neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip} - neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username} - neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password} - neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list} - neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs} - neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig} - neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork} - neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix} - neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin} - neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix} - neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig} - neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime} - neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount} - neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate} - neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk} - neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig} - neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks} - neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges} - neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges} + datafiles: + neutron_cisco_data: + mapped_data: + neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip} + neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username} + neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password} + neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list} + neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs} + neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig} + neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork} + neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix} + neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin} + neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix} + neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig} + neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime} + neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount} + neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate} + neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk} + neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig} + neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks} + neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges} + neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges} NetworkCiscoDeployment: type: OS::Heat::StructuredDeployments diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml index 7fe2a842..533c0ee9 100644 --- a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml @@ -20,14 +20,13 @@ resources: NeutronBigswitchConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - neutron_bigswitch_data: - mapped_data: - neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} - neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} + datafiles: + neutron_bigswitch_data: + mapped_data: + neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} + neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} NeutronBigswitchDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml index 47c782c7..1d16e909 100644 --- a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml +++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml @@ -50,22 +50,21 @@ resources: NovaNuageConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - nova_nuage_data: - mapped_data: - nuage::vrs::active_controller: {get_input: ActiveController} - nuage::vrs::standby_controller: {get_input: StandbyController} - nuage::metadataagent::metadata_port: {get_input: MetadataPort} - nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort} - nuage::metadataagent::metadata_secret: {get_input: SharedSecret} - nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion} - nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername} - nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs} - nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType} - nuage::metadataagent::nova_region_name: {get_input: NovaRegionName} + datafiles: + nova_nuage_data: + mapped_data: + nuage::vrs::active_controller: {get_input: ActiveController} + nuage::vrs::standby_controller: {get_input: StandbyController} + nuage::metadataagent::metadata_port: {get_input: MetadataPort} + nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort} + nuage::metadataagent::metadata_secret: {get_input: SharedSecret} + nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion} + nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername} + nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs} + nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType} + nuage::metadataagent::nova_region_name: {get_input: NovaRegionName} NovaNuageDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml index 763ae39a..378f7f98 100644 --- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml +++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml @@ -91,35 +91,34 @@ resources: CinderNetappConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - cinder_netapp_data: - mapped_data: - tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend} - cinder::backend::netapp::title: {get_input: NetappBackendName} - cinder::backend::netapp::netapp_login: {get_input: NetappLogin} - cinder::backend::netapp::netapp_password: {get_input: NetappPassword} - cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname} - cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort} - cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier} - cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily} - cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol} - cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType} - cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler} - cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList} - cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver} - cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName} - cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares} - cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig} - cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions} - cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath} - cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps} - cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword} - cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools} - cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType} - cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath} + datafiles: + cinder_netapp_data: + mapped_data: + tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend} + cinder::backend::netapp::title: {get_input: NetappBackendName} + cinder::backend::netapp::netapp_login: {get_input: NetappLogin} + cinder::backend::netapp::netapp_password: {get_input: NetappPassword} + cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname} + cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort} + cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier} + cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily} + cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol} + cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType} + cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler} + cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList} + cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver} + cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName} + cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares} + cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig} + cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions} + cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath} + cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps} + cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword} + cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools} + cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType} + cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath} CinderNetappDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml index 0f4806db..1456337f 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml @@ -38,19 +38,18 @@ resources: NeutronBigswitchConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - neutron_bigswitch_data: - mapped_data: - neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} - neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} - neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} - neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval} - neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id} - neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl} - neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory} + datafiles: + neutron_bigswitch_data: + mapped_data: + neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} + neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} + neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} + neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval} + neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id} + neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl} + neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory} NeutronBigswitchDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml index 6eae812f..bca6010a 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml @@ -96,48 +96,47 @@ resources: CiscoN1kvConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - cisco_n1kv_data: - mapped_data: - #enable_cisco_n1kv: {get_input: EnableCiscoN1kv} - # VEM Parameters - n1kv_vem_source: {get_input: n1kv_vem_source} - n1kv_vem_version: {get_input: n1kv_vem_version} - neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} - neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id} - neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6} - neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf} - neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile} - neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config} - neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb} - neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet} - neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood} - #VSM Parameter - n1kv_vsm_source: {get_input: n1kv_vsm_source} - n1kv_vsm_version: {get_input: n1kv_vsm_version} - n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf} - n1k_vsm::vsm_role: {get_input: n1kv_vsm_role} - n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl} - n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br} - n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password} - n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id} - n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip} - n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask} - n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip} - n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip} - n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan} - # Cisco N1KV driver Parameters - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username} - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password} - neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration} - neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size} - neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout} - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval} - neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries} + datafiles: + cisco_n1kv_data: + mapped_data: + #enable_cisco_n1kv: {get_input: EnableCiscoN1kv} + # VEM Parameters + n1kv_vem_source: {get_input: n1kv_vem_source} + n1kv_vem_version: {get_input: n1kv_vem_version} + neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} + neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id} + neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6} + neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf} + neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile} + neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config} + neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb} + neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet} + neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood} + #VSM Parameter + n1kv_vsm_source: {get_input: n1kv_vsm_source} + n1kv_vsm_version: {get_input: n1kv_vsm_version} + n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf} + n1k_vsm::vsm_role: {get_input: n1kv_vsm_role} + n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl} + n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br} + n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password} + n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id} + n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip} + n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask} + n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip} + n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip} + n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan} + # Cisco N1KV driver Parameters + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password} + neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration} + neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size} + neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval} + neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries} CiscoN1kvDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml index 172484dc..6ee06d78 100644 --- a/puppet/objectstorage-role.yaml +++ b/puppet/objectstorage-role.yaml @@ -447,6 +447,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: config: {get_resource: UpdateConfig} server: {get_resource: SwiftStorage} diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml index 2e1bd6f1..1f68f41f 100644 --- a/puppet/role.role.j2.yaml +++ b/puppet/role.role.j2.yaml @@ -481,6 +481,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: config: {get_resource: UpdateConfig} server: {get_resource: {{role}}} diff --git a/puppet/services/README.rst b/puppet/services/README.rst index e5c11535..f19b6cca 100644 --- a/puppet/services/README.rst +++ b/puppet/services/README.rst @@ -100,11 +100,26 @@ step, "step2" for the second, etc. Steps/tages correlate to the following: - 1) Quiesce the control-plane, e.g disable LoadBalancer, stop pacemaker cluster - - 2) Stop all control-plane services, ready for upgrade - - 3) Perform a package update, (either specific packages or the whole system) + 1) Stop all control-plane services. + + 2) Quiesce the control-plane, e.g disable LoadBalancer, stop + pacemaker cluster: this will stop the following resource: + - ocata: + - galera + - rabbit + - redis + - haproxy + - vips + - cinder-volumes + - cinder-backup + - manilla-share + - rbd-mirror + + The exact order is controlled by the cluster constraints. + + 3) Perform a package update and install new packages: A general + upgrade is done, and only new package should go into service + ansible tasks. 4) Start services needed for migration tasks (e.g DB) diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml index 4bd9fc47..d7c87b61 100644 --- a/puppet/services/aodh-api.yaml +++ b/puppet/services/aodh-api.yaml @@ -87,5 +87,5 @@ outputs: get_attr: [ApacheServiceBase, role_data, metadata_settings] upgrade_tasks: - name: Stop aodh_api service (running under httpd) - tags: step2 + tags: step1 service: name=httpd state=stopped diff --git a/puppet/services/aodh-evaluator.yaml b/puppet/services/aodh-evaluator.yaml index 56dbb558..b8be4a91 100644 --- a/puppet/services/aodh-evaluator.yaml +++ b/puppet/services/aodh-evaluator.yaml @@ -41,9 +41,16 @@ outputs: step_config: | include tripleo::profile::base::aodh::evaluator upgrade_tasks: + - name: Check if aodh_evaluator is deployed + command: systemctl is-enabled openstack-aodh-evaluator + tags: common + ignore_errors: True + register: aodh_evaluator_enabled - name: "PreUpgrade step0,validation: Check service openstack-aodh-evaluator is running" shell: /usr/bin/systemctl show 'openstack-aodh-evaluator' --property ActiveState | grep '\bactive\b' + when: aodh_evaluator_enabled.rc == 0 tags: step0,validation - name: Stop aodh_evaluator service - tags: step2 + tags: step1 + when: aodh_evaluator_enabled.rc == 0 service: name=openstack-aodh-evaluator state=stopped diff --git a/puppet/services/aodh-listener.yaml b/puppet/services/aodh-listener.yaml index 76db0ca8..f5c9330d 100644 --- a/puppet/services/aodh-listener.yaml +++ b/puppet/services/aodh-listener.yaml @@ -41,9 +41,16 @@ outputs: step_config: | include tripleo::profile::base::aodh::listener upgrade_tasks: + - name: Check if aodh_listener is deployed + command: systemctl is-enabled openstack-aodh-listener + tags: common + ignore_errors: True + register: aodh_listener_enabled - name: "PreUpgrade step0,validation: Check service openstack-aodh-listener is running" shell: /usr/bin/systemctl show 'openstack-aodh-listener' --property ActiveState | grep '\bactive\b' + when: aodh_listener_enabled.rc == 0 tags: step0,validation - name: Stop aodh_listener service - tags: step2 + tags: step1 + when: aodh_listener_enabled.rc == 0 service: name=openstack-aodh-listener state=stopped diff --git a/puppet/services/aodh-notifier.yaml b/puppet/services/aodh-notifier.yaml index 30c67635..84c50dd6 100644 --- a/puppet/services/aodh-notifier.yaml +++ b/puppet/services/aodh-notifier.yaml @@ -41,9 +41,16 @@ outputs: step_config: | include tripleo::profile::base::aodh::notifier upgrade_tasks: + - name: Check if aodh_notifier is deployed + command: systemctl is-enabled openstack-aodh-notifier + tags: common + ignore_errors: True + register: aodh_notifier_enabled - name: "PreUpgrade step0,validation: Check service openstack-aodh-notifier is running" shell: /usr/bin/systemctl show 'openstack-aodh-notifier' --property ActiveState | grep '\bactive\b' + when: aodh_notifier_enabled.rc == 0 tags: step0,validation - name: Stop aodh_notifier service - tags: step2 + tags: step1 + when: aodh_notifier_enabled.rc == 0 service: name=openstack-aodh-notifier state=stopped diff --git a/puppet/services/apache-internal-tls-certmonger.yaml b/puppet/services/apache-internal-tls-certmonger.yaml index 4c21e02a..4c94f440 100644 --- a/puppet/services/apache-internal-tls-certmonger.yaml +++ b/puppet/services/apache-internal-tls-certmonger.yaml @@ -64,6 +64,12 @@ outputs: for_each: $NETWORK: {get_attr: [ApacheNetworks, value]} upgrade_tasks: + - name: Check if httpd is deployed + command: systemctl is-enabled httpd + tags: common + ignore_errors: True + register: httpd_enabled - name: "PreUpgrade step0,validation: Check service httpd is running" shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b' + when: httpd_enabled.rc == 0 tags: step0,validation diff --git a/puppet/services/apache.yaml b/puppet/services/apache.yaml index 74ddbde8..2d950151 100644 --- a/puppet/services/apache.yaml +++ b/puppet/services/apache.yaml @@ -67,6 +67,12 @@ outputs: metadata_settings: get_attr: [ApacheTLS, role_data, metadata_settings] upgrade_tasks: + - name: Check if httpd is deployed + command: systemctl is-enabled httpd + tags: common + ignore_errors: True + register: httpd_enabled - name: "PreUpgrade step0,validation: Check service httpd is running" shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b' + when: httpd_enabled.rc == 0 tags: step0,validation diff --git a/puppet/services/auditd.yaml b/puppet/services/auditd.yaml index 639631e1..8085ac8b 100644 --- a/puppet/services/auditd.yaml +++ b/puppet/services/auditd.yaml @@ -32,3 +32,19 @@ outputs: auditd::rules: {get_param: AuditdRules} step_config: | include ::tripleo::profile::base::auditd + upgrade_tasks: + - name: Check if auditd is deployed + command: systemctl is-enabled auditd + tags: common + ignore_errors: True + register: auditd_enabled + - name: "PreUpgrade step0,validation: Check if auditd is running" + shell: > + /usr/bin/systemctl show 'auditd' --property ActiveState | + grep '\bactive\b' + when: auditd_enabled.rc == 0 + tags: step0,validation + - name: Stop auditd service + tags: step2 + when: auditd_enabled.rc == 0 + service: name=auditd state=stopped diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml index ffc4c83a..cba92415 100644 --- a/puppet/services/barbican-api.yaml +++ b/puppet/services/barbican-api.yaml @@ -146,6 +146,16 @@ outputs: metadata_settings: get_attr: [ApacheServiceBase, role_data, metadata_settings] upgrade_tasks: + - name: Check if barbican_api is deployed + command: systemctl is-enabled openstack-barbican-api + tags: common + ignore_errors: True + register: barbican_api_enabled - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running" shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b' + when: barbican_api_enabled.rc == 0 tags: step0,validation + - name: Install openstack-barbican-api package if it was disabled + tags: step3 + yum: name=openstack-barbican-api state=latest + when: barbican_api_enabled.rc != 0 diff --git a/puppet/services/ceilometer-agent-central.yaml b/puppet/services/ceilometer-agent-central.yaml index cf8a8a8e..80823526 100644 --- a/puppet/services/ceilometer-agent-central.yaml +++ b/puppet/services/ceilometer-agent-central.yaml @@ -52,12 +52,20 @@ outputs: map_merge: - get_attr: [CeilometerServiceBase, role_data, config_settings] - ceilometer_redis_password: {get_param: RedisPassword} + central_namespace: true step_config: | - include ::tripleo::profile::base::ceilometer::agent::central + include ::tripleo::profile::base::ceilometer::agent::polling upgrade_tasks: + - name: Check if ceilometer_agent_central is deployed + command: systemctl is-enabled openstack-ceilometer-central + tags: common + ignore_errors: True + register: ceilometer_agent_central_enabled - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-central is running" shell: /usr/bin/systemctl show 'openstack-ceilometer-central' --property ActiveState | grep '\bactive\b' + when: ceilometer_agent_central_enabled.rc == 0 tags: step0,validation - name: Stop ceilometer_agent_central service - tags: step2 + tags: step1 + when: ceilometer_agent_central_enabled.rc == 0 service: name=openstack-ceilometer-central state=stopped diff --git a/puppet/services/ceilometer-agent-compute.yaml b/puppet/services/ceilometer-agent-compute.yaml index 00042914..546bcd98 100644 --- a/puppet/services/ceilometer-agent-compute.yaml +++ b/puppet/services/ceilometer-agent-compute.yaml @@ -46,12 +46,20 @@ outputs: map_merge: - get_attr: [CeilometerServiceBase, role_data, config_settings] - ceilometer::agent::compute::instance_discovery_method: {get_param: InstanceDiscoveryMethod} + compute_namespace: true step_config: | - include ::tripleo::profile::base::ceilometer::agent::compute + include ::tripleo::profile::base::ceilometer::agent::polling upgrade_tasks: + - name: Check if ceilometer_agent_compute is deployed + command: systemctl is-enabled openstack-ceilometer-compute + tags: common + ignore_errors: True + register: ceilometer_agent_compute_enabled - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-compute is running" shell: /usr/bin/systemctl show 'openstack-ceilometer-compute' --property ActiveState | grep '\bactive\b' + when: ceilometer_agent_compute_enabled.rc == 0 tags: step0,validation - name: Stop ceilometer_agent_compute service - tags: step2 + tags: step1 + when: ceilometer_agent_compute_enabled.rc == 0 service: name=openstack-ceilometer-compute state=stopped diff --git a/puppet/services/ceilometer-agent-notification.yaml b/puppet/services/ceilometer-agent-notification.yaml index 760acd65..4ee43f49 100644 --- a/puppet/services/ceilometer-agent-notification.yaml +++ b/puppet/services/ceilometer-agent-notification.yaml @@ -50,9 +50,16 @@ outputs: step_config: | include ::tripleo::profile::base::ceilometer::agent::notification upgrade_tasks: + - name: Check if ceilometer_agent_notification is deployed + command: systemctl is-enabled openstack-ceilometer-notification + tags: common + ignore_errors: True + register: ceilometer_agent_notification_enabled - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-notification is running" shell: /usr/bin/systemctl show 'openstack-ceilometer-notification' --property ActiveState | grep '\bactive\b' + when: ceilometer_agent_notification_enabled.rc == 0 tags: step0,validation - name: Stop ceilometer_agent_notification service - tags: step2 + tags: step1 + when: ceilometer_agent_notification_enabled.rc == 0 service: name=openstack-ceilometer-notification state=stopped diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml index 741f8da1..f5ee9d40 100644 --- a/puppet/services/ceilometer-api.yaml +++ b/puppet/services/ceilometer-api.yaml @@ -94,5 +94,5 @@ outputs: get_attr: [ApacheServiceBase, role_data, metadata_settings] upgrade_tasks: - name: Stop ceilometer_api service (running under httpd) - tags: step2 + tags: step1 service: name=httpd state=stopped diff --git a/puppet/services/ceilometer-collector.yaml b/puppet/services/ceilometer-collector.yaml index a219f9eb..b0ec971f 100644 --- a/puppet/services/ceilometer-collector.yaml +++ b/puppet/services/ceilometer-collector.yaml @@ -60,9 +60,16 @@ outputs: step_config: | include ::tripleo::profile::base::ceilometer::collector upgrade_tasks: + - name: Check if ceilometer_collector is deployed + command: systemctl is-enabled openstack-ceilometer-collector + tags: common + ignore_errors: True + register: ceilometer_collector_enabled - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-collector is running" shell: /usr/bin/systemctl show 'openstack-ceilometer-collector' --property ActiveState | grep '\bactive\b' + when: ceilometer_collector_enabled.rc == 0 tags: step0,validation - name: Stop ceilometer_collector service - tags: step2 + tags: step1 + when: ceilometer_collector_enabled.rc == 0 service: name=openstack-ceilometer-collector state=stopped diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml index 1ce58335..d589ef89 100644 --- a/puppet/services/ceph-mon.yaml +++ b/puppet/services/ceph-mon.yaml @@ -59,6 +59,14 @@ parameters: } default: {} type: json + CephValidationRetries: + type: number + default: 5 + description: Number of retry attempts for Ceph validation + CephValidationDelay: + type: number + default: 10 + description: Interval (in seconds) in between validation checks MonitoringSubscriptionCephMon: default: 'overcloud-ceph-mon' type: string @@ -119,21 +127,32 @@ outputs: # rolling upgrade of all osd nodes in step1 - name: Check status tags: step0,validation - shell: ceph health | grep -qv HEALTH_ERR - # FIXME(shardy) I suspect we can use heat or ansible facts here instead? - - name: Get hostname + shell: ceph health | egrep -sq "HEALTH_OK|HEALTH_WARN" + - name: Stop CephMon tags: step0 - shell: hostname -s - register: mon_id - - name: Stop Ceph Mon + service: + name: ceph-mon@{{ ansible_hostname }} + state: stopped + - name: Update Ceph packages tags: step0 - service: name=ceph-mon@{{mon_id.stdout}} pattern=ceph-mon state=stopped - - name: Update ceph packages + yum: + name: ceph-mon + state: latest + - name: Start CephMon tags: step0 - yum: name=ceph-mon state=latest - - name: Start ceph-mon service - tags: step0 - service: name=ceph-mon@{{mon_id.stdout}} state=started + service: + name: ceph-mon@{{ ansible_hostname }} + state: started + # ceph-ansible + # https://github.com/ceph/ceph-ansible/blob/master/infrastructure-playbooks/rolling_update.yml#L149-L157 + - name: Wait for the monitor to join the quorum... + tags: step0,ceph_quorum_validation + shell: | + ceph -s | grep monmap | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }} + register: ceph_quorum_nodecheck + until: ceph_quorum_nodecheck.rc == 0 + retries: {get_param: CephValidationRetries} + delay: {get_param: CephValidationDelay} - name: ceph osd crush tunables default tags: step0 shell: ceph osd crush tunables default diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml index 9bd83aab..a97fa116 100644 --- a/puppet/services/ceph-osd.yaml +++ b/puppet/services/ceph-osd.yaml @@ -21,6 +21,24 @@ parameters: MonitoringSubscriptionCephOsd: default: 'overcloud-ceph-osd' type: string + CephValidationRetries: + type: number + default: 40 + description: Number of retry attempts for Ceph validation + CephValidationDelay: + type: number + default: 30 + description: Interval (in seconds) in between validation checks + IgnoreCephUpgradeWarnings: + type: boolean + default: false + description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean + +parameter_groups: +- label: deprecated + description: Do not use deprecated params, they will be removed. + parameters: + - IgnoreCephUpgradeWarnings resources: CephBase: @@ -66,17 +84,37 @@ outputs: - name: ceph osd set noscrub tags: step1 command: ceph osd set noscrub - - name: Stop Ceph OSD + - name: Stop CephOSD tags: step1 - service: name=ceph-osd@{{ item }} state=stopped + service: + name: ceph-osd@{{ item }} + state: stopped with_items: "{{osd_ids.stdout.strip().split()}}" - - name: Update ceph OSD packages + - name: Update Ceph packages tags: step1 - yum: name=ceph-osd state=latest - - name: Start ceph-osd service + yum: + name: ceph-osd + state: latest + - name: Start CephOSD tags: step1 - service: name=ceph-osd@{{ item }} state=started + service: + name: ceph-osd@{{ item }} + state: started with_items: "{{osd_ids.stdout.strip().split()}}" + # with awk we are meant to check if $2 and $4 are *the same* but it returns 1 when + # they are, so the check is inverted to produce an useful exit code + - name: Wait for clean pgs... + tags: step1,ceph_pgs_clean_validation + vars: + ignore_warnings: {get_param: IgnoreCephUpgradeWarnings} + shell: | + ceph pg stat | awk '{exit($2!=$4)}' && ceph health | egrep -sq "HEALTH_OK|HEALTH_WARN" + register: ceph_pgs_healthcheck + until: ceph_pgs_healthcheck.rc == 0 + retries: {get_param: CephValidationRetries} + delay: {get_param: CephValidationDelay} + when: + - not ignore_warnings - name: ceph osd unset noout tags: step1 command: ceph osd unset noout diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml index d7014e54..01531971 100644 --- a/puppet/services/ceph-rgw.yaml +++ b/puppet/services/ceph-rgw.yaml @@ -79,12 +79,21 @@ outputs: ceph::rgw::keystone::auth::password: {get_param: SwiftPassword} upgrade_tasks: - name: Gather RGW instance ID - tags: step0 + tags: common shell: hiera -c /etc/puppet/hiera.yaml ceph::profile::params::rgw_name radosgw.gateway register: rgw_id + - name: Check if ceph_rgw is deployed + command: systemctl is-enabled ceph-radosgw@{{rgw_id.stdout}} + tags: common + ignore_errors: True + register: ceph_rgw_enabled - name: Check status shell: /usr/bin/systemctl show ceph-radosgw@{{rgw_id.stdout}} --property ActiveState | grep '\bactive\b' + when: ceph_rgw_enabled.rc == 0 tags: step0,validation - name: Stop RGW instance tags: step1 - service: name=ceph-radosgw@{{rgw_id.stdout}} state=stopped + when: ceph_rgw_enabled.rc == 0 + service: + name: ceph-radosgw@{{rgw_id.stdout}} + state: stopped diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml index 8c5a07ac..49a5f613 100644 --- a/puppet/services/cinder-api.yaml +++ b/puppet/services/cinder-api.yaml @@ -86,7 +86,8 @@ outputs: cinder::keystone::authtoken::project_name: 'service' cinder::api::enable_proxy_headers_parsing: true - cinder::api::nova_catalog_info: 'compute:Compute Service:internalURL' + cinder::api::nova_catalog_info: 'compute:nova:internalURL' + cinder::api::nova_catalog_admin_info: 'compute:nova:adminURL' # TODO(emilien) move it to puppet-cinder cinder::config: DEFAULT/swift_catalog_info: @@ -149,18 +150,25 @@ outputs: metadata_settings: get_attr: [ApacheServiceBase, role_data, metadata_settings] upgrade_tasks: + - name: Check if cinder_api is deployed + command: systemctl is-enabled openstack-cinder-api + tags: common + ignore_errors: True + register: cinder_api_enabled - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running" shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b' + when: cinder_api_enabled.rc == 0 tags: step0,validation - name: check for cinder running under apache (post upgrade) - tags: step2 - shell: "apachectl -t -D DUMP_VHOSTS | grep -q cinder" + tags: step1 + shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder" register: cinder_apache ignore_errors: true - name: Stop cinder_api service (running under httpd) - tags: step2 + tags: step1 service: name=httpd state=stopped when: "cinder_apache.rc == 0" - name: Stop and disable cinder_api service (pre-upgrade not under httpd) - tags: step2 + tags: step1 + when: cinder_api_enabled.rc == 0 service: name=openstack-cinder-api state=stopped enabled=no diff --git a/puppet/services/cinder-scheduler.yaml b/puppet/services/cinder-scheduler.yaml index f102810e..f8361f6f 100644 --- a/puppet/services/cinder-scheduler.yaml +++ b/puppet/services/cinder-scheduler.yaml @@ -52,9 +52,16 @@ outputs: step_config: | include ::tripleo::profile::base::cinder::scheduler upgrade_tasks: + - name: Check if cinder_scheduler is deployed + command: systemctl is-enabled openstack-cinder-scheduler + tags: common + ignore_errors: True + register: cinder_scheduler_enabled - name: "PreUpgrade step0,validation: Check service openstack-cinder-scheduler is running" shell: /usr/bin/systemctl show 'openstack-cinder-scheduler' --property ActiveState | grep '\bactive\b' + when: cinder_scheduler_enabled.rc == 0 tags: step0,validation - name: Stop cinder_scheduler service - tags: step2 + tags: step1 + when: cinder_scheduler_enabled.rc == 0 service: name=openstack-cinder-scheduler state=stopped diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml index 3a06afb8..b52955ef 100644 --- a/puppet/services/cinder-volume.yaml +++ b/puppet/services/cinder-volume.yaml @@ -116,9 +116,16 @@ outputs: step_config: | include ::tripleo::profile::base::cinder::volume upgrade_tasks: + - name: Check if cinder_volume is deployed + command: systemctl is-enabled openstack-cinder-volume + tags: common + ignore_errors: True + register: cinder_volume_enabled - name: "PreUpgrade step0,validation: Check service openstack-cinder-volume is running" shell: /usr/bin/systemctl show 'openstack-cinder-volume' --property ActiveState | grep '\bactive\b' + when: cinder_volume_enabled.rc == 0 tags: step0,validation - name: Stop cinder_volume service - tags: step2 + tags: step1 + when: cinder_volume_enabled.rc == 0 service: name=openstack-cinder-volume state=stopped diff --git a/puppet/services/congress.yaml b/puppet/services/congress.yaml index aa8d9a9a..8bc9f2e3 100644 --- a/puppet/services/congress.yaml +++ b/puppet/services/congress.yaml @@ -65,9 +65,6 @@ outputs: - {get_param: [EndpointMap, MysqlInternal, host]} - '/congress' - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' - congress::keystone::auth::tenant: 'service' - congress::keystone::auth::password: {get_param: CongressPassword} - congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} congress::debug: {get_param: Debug} congress::rpc_backend: rabbit congress::rabbit_userid: {get_param: RabbitUserName} @@ -76,6 +73,10 @@ outputs: congress::rabbit_port: {get_param: RabbitClientPort} congress::server::bind_host: {get_param: [ServiceNetMap, CongressApiNetwork]} + congress::keystone::authtoken::project_name: 'service' + congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + congress::db::mysql::password: {get_param: CongressPassword} congress::db::mysql::user: congress congress::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} @@ -84,6 +85,32 @@ outputs: - '%' - {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + service_config_settings: + keystone: + congress::keystone::auth::tenant: 'service' + congress::keystone::auth::password: {get_param: CongressPassword} + congress::keystone::auth::public_url: {get_param: [EndpointMap, CongressPublic, uri]} + congress::keystone::auth::internal_url: {get_param: [EndpointMap, CongressInternal, uri]} + congress::keystone::auth::admin_url: {get_param: [EndpointMap, CongressAdmin, uri]} step_config: | include ::tripleo::profile::base::congress + + upgrade_tasks: + - name: Check if congress is deployed + command: systemctl is-enabled openstack-congress-server + tags: common + ignore_errors: True + register: congress_enabled + - name: "PreUpgrade step0,validation: Check service openstack-congress-server is running" + shell: /usr/bin/systemctl show 'openstack-congress-server' --property ActiveState | grep '\bactive\b' + when: congress_enabled.rc == 0 + tags: step0,validation + - name: Stop congress service + tags: step1 + when: congress_enabled.rc == 0 + service: name=openstack-congress-server state=stopped + - name: Install openstack-congress package if it was disabled + tags: step3 + yum: name=openstack-congress state=latest + when: congress_enabled.rc != 0 diff --git a/puppet/services/database/mysql-client.yaml b/puppet/services/database/mysql-client.yaml index 1415391c..78456e28 100644 --- a/puppet/services/database/mysql-client.yaml +++ b/puppet/services/database/mysql-client.yaml @@ -18,6 +18,9 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + EnableInternalTLS: + type: boolean + default: false outputs: role_data: @@ -25,6 +28,7 @@ outputs: value: service_name: mysql_client config_settings: - tripleo::profile::base:database::mysql::client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]} + tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]} + tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS} step_config: | include ::tripleo::profile::base::database::mysql::client diff --git a/puppet/services/disabled/glance-registry.yaml b/puppet/services/disabled/glance-registry.yaml index 4d22bddc..7bf4a1fd 100644 --- a/puppet/services/disabled/glance-registry.yaml +++ b/puppet/services/disabled/glance-registry.yaml @@ -26,5 +26,5 @@ outputs: service_name: glance_registry upgrade_tasks: - name: Stop and disable glance_registry service on upgrade - tags: step2 + tags: step1 service: name=openstack-glance-registry state=stopped enabled=no diff --git a/puppet/services/docker.yaml b/puppet/services/docker.yaml new file mode 100644 index 00000000..e7da2383 --- /dev/null +++ b/puppet/services/docker.yaml @@ -0,0 +1,43 @@ +heat_template_version: ocata + +description: > + Configures docker on the host + +parameters: + DockerNamespace: + description: namespace + default: tripleoupstream + type: string + DockerNamespaceIsRegistry: + type: boolean + default: false + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +outputs: + role_data: + description: Role data for the docker service + value: + service_name: docker + config_settings: + tripleo::profile::base::docker::docker_namespace: {get_param: DockerNamespace} + tripleo::profile::base::docker::insecure_registry: {get_param: DockerNamespaceIsRegistry} + step_config: | + include ::tripleo::profile::base::docker + upgrade_tasks: + - name: Install docker packages on upgrade if missing + tags: step3 + yum: name=docker state=latest + diff --git a/puppet/services/ec2-api.yaml b/puppet/services/ec2-api.yaml index bb10140e..10f6d311 100644 --- a/puppet/services/ec2-api.yaml +++ b/puppet/services/ec2-api.yaml @@ -72,13 +72,13 @@ outputs: ec2api::api::ec2api_listen: str_replace: template: - '"%{::fqdn_$NETWORK}"' + "%{hiera('fqdn_$NETWORK')}" params: $NETWORK: {get_param: [ServiceNetMap, Ec2ApiNetwork]} ec2api::metadata::metadata_listen: str_replace: template: - '"%{::fqdn_$NETWORK}"' + "%{hiera('fqdn_$NETWORK')}" params: $NETWORK: {get_param: [ServiceNetMap, Ec2ApiMetadataNetwork]} ec2api::db::database_connection: @@ -115,3 +115,24 @@ outputs: ec2api::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + upgrade_tasks: + - name: Check if ec2-api is deployed + command: systemctl is-enabled openstack-ec2-api + tags: common + ignore_errors: True + register: ec2_api_enabled + - name: "PreUpgrade step0,validation: Check if openstack-ec2-api is running" + shell: > + /usr/bin/systemctl show 'openstack-ec2-api' --property ActiveState | + grep '\bactive\b' + when: ec2_api_enabled.rc == 0 + tags: step0,validation + - name: Stop openstack-ec2-api service + tags: step1 + when: ec2_api_enabled.rc == 0 + service: name=openstack-ec2-api state=stopped + - name: Install openstack-ec2-api package if it was disabled + tags: step3 + yum: name=openstack-ec2-api state=latest + when: ec2_api_enabled.rc != 0 + diff --git a/puppet/services/etcd.yaml b/puppet/services/etcd.yaml index f96fa723..7cdd8451 100644 --- a/puppet/services/etcd.yaml +++ b/puppet/services/etcd.yaml @@ -36,7 +36,7 @@ outputs: etcd::etcd_name: str_replace: template: - '"%{::fqdn_$NETWORK}"' + "%{hiera('fqdn_$NETWORK')}" params: $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]} # NOTE: bind IP is found in Heat replacing the network name with the local node IP @@ -56,3 +56,18 @@ outputs: - 2380 step_config: | include ::tripleo::profile::base::etcd + upgrade_tasks: + - name: Check if etcd is deployed + command: systemctl is-enabled etcd + tags: step0,validation + ignore_errors: True + register: etcd_enabled + - name: "PreUpgrade step0,validation: Check if etcd is running" + shell: > + /usr/bin/systemctl show 'etcd' --property ActiveState | + grep '\bactive\b' + when: etcd_enabled.rc == 0 + tags: step0,validation + - name: Stop etcd service + tags: step2 + service: name=etcd state=stopped diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index d26d96aa..ce389dc1 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -137,12 +137,26 @@ outputs: service_config_settings: get_attr: [GlanceBase, role_data, service_config_settings] upgrade_tasks: + - name: Check if glance_api is deployed + command: systemctl is-enabled openstack-glance-api + tags: common + ignore_errors: True + register: glance_api_enabled + #(TODO) Remove all glance-registry bits in Pike. + - name: Check if glance_registry is deployed + command: systemctl is-enabled openstack-glance-registry + tags: common + ignore_errors: True + register: glance_registry_enabled - name: "PreUpgrade step0,validation: Check service openstack-glance-api is running" shell: /usr/bin/systemctl show 'openstack-glance-api' --property ActiveState | grep '\bactive\b' tags: step0,validation + when: glance_api_enabled.rc == 0 - name: Stop glance_api service - tags: step2 + tags: step1 + when: glance_api_enabled.rc == 0 service: name=openstack-glance-api state=stopped - name: Stop and disable glance registry (removed for Ocata) - tags: step2 + tags: step1 + when: glance_registry_enabled.rc == 0 service: name=openstack-glance-registry state=stopped enabled=no diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml index 22c0967e..08a939a6 100644 --- a/puppet/services/gnocchi-api.yaml +++ b/puppet/services/gnocchi-api.yaml @@ -129,5 +129,5 @@ outputs: get_attr: [ApacheServiceBase, role_data, metadata_settings] upgrade_tasks: - name: Stop gnocchi_api service (running under httpd) - tags: step2 + tags: step1 service: name=httpd state=stopped diff --git a/puppet/services/gnocchi-metricd.yaml b/puppet/services/gnocchi-metricd.yaml index 1337b0cb..9d76c2e7 100644 --- a/puppet/services/gnocchi-metricd.yaml +++ b/puppet/services/gnocchi-metricd.yaml @@ -47,9 +47,16 @@ outputs: step_config: | include ::tripleo::profile::base::gnocchi::metricd upgrade_tasks: + - name: Check if gnocchi_metricd is deployed + command: systemctl is-enabled openstack-gnocchi-metricd + tags: common + ignore_errors: True + register: gnocchi_metricd_enabled - name: "PreUpgrade step0,validation: Check service openstack-gnocchi-metricd is running" shell: /usr/bin/systemctl show 'openstack-gnocchi-metricd' --property ActiveState | grep '\bactive\b' + when: gnocchi_metricd_enabled.rc == 0 tags: step0,validation - name: Stop gnocchi_metricd service - tags: step2 + tags: step1 + when: gnocchi_metricd_enabled.rc == 0 service: name=openstack-gnocchi-metricd state=stopped diff --git a/puppet/services/gnocchi-statsd.yaml b/puppet/services/gnocchi-statsd.yaml index 41222a79..bb8d3bce 100644 --- a/puppet/services/gnocchi-statsd.yaml +++ b/puppet/services/gnocchi-statsd.yaml @@ -46,9 +46,16 @@ outputs: step_config: | include ::tripleo::profile::base::gnocchi::statsd upgrade_tasks: + - name: Check if gnocchi_statsd is deployed + command: systemctl is-enabled openstack-gnocchi-statsd + tags: common + ignore_errors: True + register: gnocchi_statsd_enabled - name: "PreUpgrade step0,validation: Check service openstack-gnocchi-statsd is running" shell: /usr/bin/systemctl show 'openstack-gnocchi-statsd' --property ActiveState | grep '\bactive\b' + when: gnocchi_statsd_enabled.rc == 0 tags: step0,validation - name: Stop gnocchi_statsd service - tags: step2 + tags: step1 + when: gnocchi_statsd_enabled.rc == 0 service: name=openstack-gnocchi-statsd state=stopped diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml index 358698dd..bd5b9ef6 100644 --- a/puppet/services/haproxy.yaml +++ b/puppet/services/haproxy.yaml @@ -78,14 +78,22 @@ outputs: step_config: | include ::tripleo::profile::base::haproxy upgrade_tasks: + - name: Check if haproxy is deployed + command: systemctl is-enabled haproxy + tags: common + ignore_errors: True + register: haproxy_enabled - name: "PreUpgrade step0,validation: Check service haproxy is running" shell: /usr/bin/systemctl show 'haproxy' --property ActiveState | grep '\bactive\b' + when: haproxy_enabled.rc == 0 tags: step0,validation - name: Stop haproxy service - tags: step1 + tags: step2 + when: haproxy_enabled.rc == 0 service: name=haproxy state=stopped - name: Start haproxy service tags: step4 # Needed at step 4 for mysql + when: haproxy_enabled.rc == 0 service: name=haproxy state=started metadata_settings: yaql: diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml index 7bd2fcf1..483f0a45 100644 --- a/puppet/services/heat-api-cfn.yaml +++ b/puppet/services/heat-api-cfn.yaml @@ -85,9 +85,16 @@ outputs: heat::keystone::auth_cfn::password: {get_param: HeatPassword} heat::keystone::auth_cfn::region: {get_param: KeystoneRegion} upgrade_tasks: + - name: Check if heat_api_cfn is deployed + command: systemctl is-enabled openstack-heat-api-cfn + tags: common + ignore_errors: True + register: heat_api_cfn_enabled - name: "PreUpgrade step0,validation: Check service openstack-heat-api-cfn is running" - shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b' + shell: /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b' + when: heat_api_cfn_enabled.rc == 0 tags: step0,validation - name: Stop heat_api_cfn service - tags: step2 + tags: step1 + when: heat_api_cfn_enabled.rc == 0 service: name=openstack-heat-api-cfn state=stopped diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml index 0954ad19..8879bcb2 100644 --- a/puppet/services/heat-api-cloudwatch.yaml +++ b/puppet/services/heat-api-cloudwatch.yaml @@ -67,9 +67,16 @@ outputs: step_config: | include ::tripleo::profile::base::heat::api_cloudwatch upgrade_tasks: + - name: Check if heat_api_cloudwatch is deployed + command: systemctl is-enabled openstack-heat-api-cloudwatch + tags: common + ignore_errors: True + register: heat_api_cloudwatch_enabled - name: "PreUpgrade step0,validation: Check service openstack-heat-api-cloudwatch is running" - shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b' + shell: /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b' + when: heat_api_cloudwatch_enabled.rc == 0 tags: step0,validation - name: Stop heat_api_cloudwatch service - tags: step2 + tags: step1 + when: heat_api_cloudwatch_enabled.rc == 0 service: name=openstack-heat-api-cloudwatch state=stopped diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml index ae656b1e..2464011b 100644 --- a/puppet/services/heat-api.yaml +++ b/puppet/services/heat-api.yaml @@ -85,9 +85,16 @@ outputs: heat::keystone::auth::password: {get_param: HeatPassword} heat::keystone::auth::region: {get_param: KeystoneRegion} upgrade_tasks: + - name: Check is heat_api is deployed + command: systemctl is-enabled openstack-heat-api + tags: common + ignore_errors: True + register: heat_api_enabled - name: "PreUpgrade step0,validation: Check service openstack-heat-api is running" - shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b' + shell: /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b' + when: heat_api_enabled.rc == 0 tags: step0,validation - name: Stop heat_api service - tags: step2 + tags: step1 + when: heat_api_enabled.rc == 0 service: name=openstack-heat-api state=stopped diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml index b4d314f4..e83a9edd 100644 --- a/puppet/services/heat-base.yaml +++ b/puppet/services/heat-base.yaml @@ -99,6 +99,10 @@ parameters: description: > Cron to purge db entries marked as deleted and older than $age - Log destination default: '/dev/null' + HeatMaxJsonBodySize: + default: 1048576 + description: Maximum raw byte size of the Heat API JSON request body. + type: number outputs: role_data: @@ -142,6 +146,7 @@ outputs: heat::cron::purge_deleted::age: {get_param: HeatCronPurgeDeletedAge} heat::cron::purge_deleted::age_type: {get_param: HeatCronPurgeDeletedAgeType} heat::cron::purge_deleted::destination: {get_param: HeatCronPurgeDeletedDestination} + heat::max_json_body_size: {get_param: HeatMaxJsonBodySize} service_config_settings: keystone: tripleo::profile::base::keystone::heat_admin_domain: 'heat_stack' diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml index f7ec9a41..a166f3a7 100644 --- a/puppet/services/heat-engine.yaml +++ b/puppet/services/heat-engine.yaml @@ -137,9 +137,16 @@ outputs: # This is needed because the keystone profile handles creating the domain tripleo::profile::base::keystone::heat_admin_password: {get_param: HeatStackDomainAdminPassword} upgrade_tasks: + - name: Check if heat_engine is deployed + command: systemctl is-enabled openstack-heat-engine + tags: common + ignore_errors: True + register: heat_engine_enabled - name: "PreUpgrade step0,validation: Check service openstack-heat-engine is running" - shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-engine' --property ActiveState | grep '\bactive\b' + shell: /usr/bin/systemctl show 'openstack-heat-engine' --property ActiveState | grep '\bactive\b' + when: heat_engine_enabled.rc == 0 tags: step0,validation - name: Stop heat_engine service - tags: step2 + tags: step1 + when: heat_engine_enabled.rc == 0 service: name=openstack-heat-engine state=stopped diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml index 2111021b..60b009a8 100644 --- a/puppet/services/horizon.yaml +++ b/puppet/services/horizon.yaml @@ -96,3 +96,20 @@ outputs: - horizon::django_debug: {get_param: Debug} step_config: | include ::tripleo::profile::base::horizon + # Ansible tasks to handle upgrade + upgrade_tasks: + - name: Check if httpd is deployed + command: systemctl is-enabled httpd + tags: common + ignore_errors: True + register: httpd_enabled + - name: "PreUpgrade step0,validation: Check if httpd is running" + shell: > + /usr/bin/systemctl show 'httpd' --property ActiveState | + grep '\bactive\b' + when: httpd_enabled.rc == 0 + tags: step0,validation + - name: Stop Horizon (under httpd) + tags: step1 + when: httpd_enabled.rc == 0 + service: name=httpd state=stopped diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml index a84df538..7aab6f8d 100644 --- a/puppet/services/ironic-api.yaml +++ b/puppet/services/ironic-api.yaml @@ -88,5 +88,5 @@ outputs: - "%{hiera('mysql_bind_host')}" upgrade_tasks: - name: Stop ironic_api service - tags: step2 + tags: step1 service: name=openstack-ironic-api state=stopped diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml index 739db13c..f9547bef 100644 --- a/puppet/services/ironic-conductor.yaml +++ b/puppet/services/ironic-conductor.yaml @@ -109,5 +109,5 @@ outputs: include ::tripleo::profile::base::ironic::conductor upgrade_tasks: - name: Stop ironic_conductor service - tags: step2 + tags: step1 service: name=openstack-ironic-conductor state=stopped diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml index 29157959..fec455d1 100644 --- a/puppet/services/kernel.yaml +++ b/puppet/services/kernel.yaml @@ -31,6 +31,7 @@ outputs: config_settings: kernel_modules: nf_conntrack: {} + ip_conntrack_proto_sctp: {} sysctl_settings: net.ipv4.tcp_keepalive_intvl: value: 1 diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index 9c4cc60f..f40c8d99 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -226,6 +226,7 @@ outputs: keystone::endpoint::internal_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} keystone::endpoint::region: {get_param: KeystoneRegion} + keystone::endpoint::version: '' keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge} keystone::rabbit_heartbeat_timeout_threshold: 60 keystone::cron::token_flush::maxdelay: 3600 @@ -307,7 +308,7 @@ outputs: # Ansible tasks to handle upgrade upgrade_tasks: - name: Stop keystone service (running under httpd) - tags: step2 + tags: step1 service: name=httpd state=stopped metadata_settings: get_attr: [ApacheServiceBase, role_data, metadata_settings] diff --git a/puppet/services/logging/fluentd-client.yaml b/puppet/services/logging/fluentd-client.yaml index 94c63d33..57595b82 100644 --- a/puppet/services/logging/fluentd-client.yaml +++ b/puppet/services/logging/fluentd-client.yaml @@ -63,11 +63,22 @@ outputs: step_config: | include ::tripleo::profile::base::logging::fluentd upgrade_tasks: + - name: Check if fluentd_client is deployed + command: systemctl is-enabled fluentd + tags: common + ignore_errors: True + register: fluentd_client_enabled - name: Check status of fluentd service shell: > /usr/bin/systemctl show fluentd --property ActiveState | grep '\bactive\b' + when: fluentd_client_enabled.rc == 0 tags: step0,validation - name: Stop fluentd service - tags: step2 + tags: step1 + when: fluentd_client_enabled.rc == 0 service: name=fluentd state=stopped + - name: Install fluentd package if it was disabled + tags: step3 + yum: name=fluentd state=latest + when: fluentd_client_enabled.rc != 0 diff --git a/puppet/services/metrics/collectd.yaml b/puppet/services/metrics/collectd.yaml index a3e3b842..49b2d4c2 100644 --- a/puppet/services/metrics/collectd.yaml +++ b/puppet/services/metrics/collectd.yaml @@ -110,11 +110,22 @@ outputs: step_config: | include ::tripleo::profile::base::metrics::collectd upgrade_tasks: + - name: Check if collectd is deployed + command: systemctl is-enabled collectd + tags: common + ignore_errors: True + register: collectd_enabled - name: Check status of collectd service shell: > /usr/bin/systemctl show collectd --property ActiveState | grep '\bactive\b' + when: collectd_enabled.rc == 0 tags: step0,validation - name: Stop collectd service - tags: step2 + tags: step1 + when: collectd_enabled.rc == 0 service: name=collectd state=stopped + - name: Install collectd package if it was disabled + tags: step3 + yum: name=collectd state=latest + when: collectd_enabled.rc != 0 diff --git a/puppet/services/mistral-api.yaml b/puppet/services/mistral-api.yaml index daa1dc7c..1c7d6bd3 100644 --- a/puppet/services/mistral-api.yaml +++ b/puppet/services/mistral-api.yaml @@ -50,3 +50,22 @@ outputs: get_attr: [MistralBase, role_data, service_config_settings] step_config: | include ::tripleo::profile::base::mistral::api + upgrade_tasks: + - name: Check if mistral api is deployed + command: systemctl is-enabled openstack-mistral-api + tags: common + ignore_errors: True + register: mistral_api_enabled + - name: "PreUpgrade step0,validation: Check if openstack-mistral-api is running" + shell: > + /usr/bin/systemctl show 'openstack-mistral-api' --property ActiveState | + grep '\bactive\b' + when: mistral_api_enabled.rc == 0 + tags: step0,validation + - name: Stop mistral_api service + tags: step1 + service: name=openstack-mistral-api state=stopped + - name: Install openstack-mistral-api package if it was disabled + tags: step3 + yum: name=openstack-mistral-api state=latest + when: mistral_api_enabled.rc != 0 diff --git a/puppet/services/mistral-engine.yaml b/puppet/services/mistral-engine.yaml index 4a92b863..03a2a55c 100644 --- a/puppet/services/mistral-engine.yaml +++ b/puppet/services/mistral-engine.yaml @@ -36,3 +36,22 @@ outputs: get_attr: [MistralBase, role_data, config_settings] step_config: | include ::tripleo::profile::base::mistral::engine + upgrade_tasks: + - name: Check if mistral engine is deployed + command: systemctl is-enabled openstack-mistral-engine + tags: common + ignore_errors: True + register: mistral_engine_enabled + - name: "PreUpgrade step0,validation: Check if openstack-mistral-engine is running" + shell: > + /usr/bin/systemctl show 'openstack-mistral-engine' --property ActiveState | + grep '\bactive\b' + when: mistral_engine_enabled.rc == 0 + tags: step0,validation + - name: Stop mistral_engine service + tags: step1 + service: name=openstack-mistral-engine state=stopped + - name: Install openstack-mistral-engine package if it was disabled + tags: step3 + yum: name=openstack-mistral-engine state=latest + when: mistral_engine_enabled.rc != 0 diff --git a/puppet/services/mistral-executor.yaml b/puppet/services/mistral-executor.yaml index 6e273b92..0f6adb07 100644 --- a/puppet/services/mistral-executor.yaml +++ b/puppet/services/mistral-executor.yaml @@ -36,3 +36,22 @@ outputs: get_attr: [MistralBase, role_data, config_settings] step_config: | include ::tripleo::profile::base::mistral::executor + upgrade_tasks: + - name: Check if mistral executor is deployed + command: systemctl is-enabled openstack-mistral-executor + tags: common + ignore_errors: True + register: mistral_executor_enabled + - name: "PreUpgrade step0,validation: Check if openstack-mistral-executor is running" + shell: > + /usr/bin/systemctl show 'openstack-mistral-executor' --property ActiveState | + grep '\bactive\b' + when: mistral_executor_enabled.rc == 0 + tags: step0,validation + - name: Stop mistral_executor service + tags: step1 + service: name=openstack-mistral-executor state=stopped + - name: Install openstack-mistral-executor package if it was disabled + tags: step3 + yum: name=openstack-mistral-executor state=latest + when: mistral_executor_enabled.rc != 0 diff --git a/puppet/services/monitoring/sensu-client.yaml b/puppet/services/monitoring/sensu-client.yaml index d74a68a2..aba2b1ed 100644 --- a/puppet/services/monitoring/sensu-client.yaml +++ b/puppet/services/monitoring/sensu-client.yaml @@ -63,11 +63,22 @@ outputs: step_config: | include ::tripleo::profile::base::monitoring::sensu upgrade_tasks: + - name: Check if sensu_client is deployed + command: systemctl is-enabled sensu-client + tags: common + ignore_errors: True + register: sensu_client_enabled - name: Check status of sensu-client service shell: > /usr/bin/systemctl show sensu-client --property ActiveState | grep '\bactive\b' + when: sensu_client_enabled.rc == 0 tags: step0,validation - name: Stop sensu-client service - tags: step2 + tags: step1 + when: sensu_client_enabled.rc == 0 service: name=sensu-client state=stopped + - name: Install sensu package if it was disabled + tags: step3 + yum: name=sensu state=latest + when: sensu_client.rc != 0 diff --git a/puppet/services/neutron-api.yaml b/puppet/services/neutron-api.yaml index 4d671e15..bb191ff0 100644 --- a/puppet/services/neutron-api.yaml +++ b/puppet/services/neutron-api.yaml @@ -189,9 +189,16 @@ outputs: - '%' - "%{hiera('mysql_bind_host')}" upgrade_tasks: + - name: Check if neutron_server is deployed + command: systemctl is-enabled neutron-server + tags: common + ignore_errors: True + register: neutron_server_enabled - name: "PreUpgrade step0,validation: Check service neutron-server is running" shell: /usr/bin/systemctl show 'neutron-server' --property ActiveState | grep '\bactive\b' + when: neutron_server_enabled.rc == 0 tags: step0,validation - name: Stop neutron_api service - tags: step2 + tags: step1 + when: neutron_server_enabled.rc == 0 service: name=neutron-server state=stopped diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml index 43657bd9..55361939 100644 --- a/puppet/services/neutron-base.yaml +++ b/puppet/services/neutron-base.yaml @@ -24,7 +24,7 @@ parameters: type: number NeutronDhcpAgentsPerNetwork: type: number - default: 3 + default: 0 description: The number of neutron dhcp agents to schedule per network NeutronCorePlugin: default: 'ml2' @@ -72,24 +72,31 @@ parameters: via parameter_defaults in the resource registry. type: json +conditions: + dhcp_agents_zero: {equals : [{get_param: NeutronDhcpAgentsPerNetwork}, 0]} + outputs: role_data: description: Role data for the Neutron base service. value: service_name: neutron_base config_settings: - neutron::rabbit_password: {get_param: RabbitPassword} - neutron::rabbit_user: {get_param: RabbitUserName} - neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL} - neutron::rabbit_port: {get_param: RabbitClientPort} - neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} - neutron::core_plugin: {get_param: NeutronCorePlugin} - neutron::service_plugins: {get_param: NeutronServicePlugins} - neutron::debug: {get_param: Debug} - neutron::purge_config: {get_param: EnableConfigPurge} - neutron::allow_overlapping_ips: true - neutron::rabbit_heartbeat_timeout_threshold: 60 - neutron::host: '%{::fqdn}' - neutron::db::database_db_max_retries: -1 - neutron::db::database_max_retries: -1 - neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu} + map_merge: + - neutron::rabbit_password: {get_param: RabbitPassword} + neutron::rabbit_user: {get_param: RabbitUserName} + neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + neutron::rabbit_port: {get_param: RabbitClientPort} + neutron::core_plugin: {get_param: NeutronCorePlugin} + neutron::service_plugins: {get_param: NeutronServicePlugins} + neutron::debug: {get_param: Debug} + neutron::purge_config: {get_param: EnableConfigPurge} + neutron::allow_overlapping_ips: true + neutron::rabbit_heartbeat_timeout_threshold: 60 + neutron::host: '%{::fqdn}' + neutron::db::database_db_max_retries: -1 + neutron::db::database_max_retries: -1 + neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu} + - if: + - dhcp_agents_zero + - {} + - tripleo::profile::base::neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml index 062edaa4..fe7f9f31 100644 --- a/puppet/services/neutron-dhcp.yaml +++ b/puppet/services/neutron-dhcp.yaml @@ -80,9 +80,16 @@ outputs: step_config: | include tripleo::profile::base::neutron::dhcp upgrade_tasks: + - name: Check if neutron_dhcp_agent is deployed + command: systemctl is-enabled neutron-dhcp-agent + tags: common + ignore_errors: True + register: neutron_dhcp_agent_enabled - name: "PreUpgrade step0,validation: Check service neutron-dhcp-agent is running" shell: /usr/bin/systemctl show 'neutron-dhcp-agent' --property ActiveState | grep '\bactive\b' + when: neutron_dhcp_agent_enabled.rc == 0 tags: step0,validation - name: Stop neutron_dhcp service - tags: step2 + tags: step1 + when: neutron_dhcp_agent_enabled.rc == 0 service: name=neutron-dhcp-agent state=stopped diff --git a/puppet/services/neutron-l3-compute-dvr.yaml b/puppet/services/neutron-l3-compute-dvr.yaml index 06927fe0..1d6a2371 100644 --- a/puppet/services/neutron-l3-compute-dvr.yaml +++ b/puppet/services/neutron-l3-compute-dvr.yaml @@ -22,10 +22,6 @@ parameters: Debug: type: string default: '' - NeutronExternalNetworkBridge: - description: Name of bridge used for external network traffic. - type: string - default: 'br-ex' MonitoringSubscriptionNeutronL3Dvr: default: 'overcloud-neutron-l3-dvr' type: string @@ -35,6 +31,19 @@ parameters: tag: openstack.neutron.agent.l3-compute path: /var/log/neutron/l3-agent.log + # DEPRECATED: the following options are deprecated and are currently maintained + # for backwards compatibility. They will be removed in the Pike cycle. + NeutronExternalNetworkBridge: + description: Name of bridge used for external network traffic. Usually L2 + agent handles port wiring into external bridge, and hence the + parameter should be unset. + type: string + default: '' + +conditions: + + external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]} + resources: NeutronBase: @@ -56,7 +65,11 @@ outputs: config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} - neutron::agents::l3::agent_mode : 'dvr' + - neutron::agents::l3::agent_mode : 'dvr' + - + if: + - external_network_bridge_empty + - {} + - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} step_config: | include tripleo::profile::base::neutron::l3 diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml index 69803551..cd9870bd 100644 --- a/puppet/services/neutron-l3.yaml +++ b/puppet/services/neutron-l3.yaml @@ -21,10 +21,6 @@ parameters: Debug: type: string default: '' - NeutronExternalNetworkBridge: - description: Name of bridge used for external network traffic. - type: string - default: 'br-ex' NeutronL3AgentMode: description: | Agent mode for L3 agent. Must be one of legacy or dvr_snat. @@ -43,6 +39,15 @@ parameters: tag: openstack.neutron.agent.l3 path: /var/log/neutron/l3-agent.log + # DEPRECATED: the following options are deprecated and are currently maintained + # for backwards compatibility. They will be removed in the Pike cycle. + NeutronExternalNetworkBridge: + description: Name of bridge used for external network traffic. Usually L2 + agent handles port wiring into external bridge, and hence the + parameter should be unset. + type: string + default: '' + conditions: external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]} @@ -80,9 +85,16 @@ outputs: step_config: | include tripleo::profile::base::neutron::l3 upgrade_tasks: + - name: Check if neutron_l3_agent is deployed + command: systemctl is-enabled neutron-l3-agent + tags: common + ignore_errors: True + register: neutron_l3_agent_enabled - name: "PreUpgrade step0,validation: Check service neutron-l3-agent is running" shell: /usr/bin/systemctl show 'neutron-l3-agent' --property ActiveState | grep '\bactive\b' + when: neutron_l3_agent_enabled.rc == 0 tags: step0,validation - name: Stop neutron_l3 service - tags: step2 + tags: step1 + when: neutron_l3_agent_enabled.rc == 0 service: name=neutron-l3-agent state=stopped diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml index 6f5debdd..32ef567c 100644 --- a/puppet/services/neutron-metadata.yaml +++ b/puppet/services/neutron-metadata.yaml @@ -76,9 +76,16 @@ outputs: step_config: | include tripleo::profile::base::neutron::metadata upgrade_tasks: + - name: Check if neutron_metadata_agent is deployed + command: systemctl is-enabled neutron-metadata-agent + tags: common + ignore_errors: True + register: neutron_metadata_agent_enabled - name: "PreUpgrade step0,validation: Check service neutron-metadata-agent is running" shell: /usr/bin/systemctl show 'neutron-metadata-agent' --property ActiveState | grep '\bactive\b' + when: neutron_metadata_agent_enabled.rc == 0 tags: step0,validation - name: Stop neutron_metadata service - tags: step2 + tags: step1 + when: neutron_metadata_agent_enabled.rc == 0 service: name=neutron-metadata-agent state=stopped diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml index c27bb909..01471ba2 100644 --- a/puppet/services/neutron-ovs-agent.yaml +++ b/puppet/services/neutron-ovs-agent.yaml @@ -121,9 +121,16 @@ outputs: step_config: | include ::tripleo::profile::base::neutron::ovs upgrade_tasks: + - name: Check if neutron_ovs_agent is deployed + command: systemctl is-enabled neutron-openvswitch-agent + tags: common + ignore_errors: True + register: neutron_ovs_agent_enabled - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running" shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b' + when: neutron_ovs_agent_enabled.rc == 0 tags: step0,validation - name: Stop neutron_ovs_agent service - tags: step2 + tags: step1 + when: neutron_ovs_agent_enabled.rc == 0 service: name=neutron-openvswitch-agent state=stopped diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml index 0adefecd..f27b53f2 100644 --- a/puppet/services/nova-api.yaml +++ b/puppet/services/nova-api.yaml @@ -197,9 +197,6 @@ outputs: - name: Stop and disable nova_api service (pre-upgrade not under httpd) tags: step2 service: name=openstack-nova-api state=stopped enabled=no - - name: update nova api - tags: step2 - yum: name=openstack-nova-api state=latest - name: Create puppet manifest to set transport_url in nova.conf tags: step5 when: is_bootstrap_node diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml index 9923e833..d208bede 100644 --- a/puppet/services/nova-compute.yaml +++ b/puppet/services/nova-compute.yaml @@ -152,7 +152,7 @@ outputs: collectd::plugins::virt::connection: "qemu:///system" upgrade_tasks: - name: Stop nova-compute service - tags: step2 + tags: step1 service: name=openstack-nova-compute state=stopped # If not already set by puppet (e.g a pre-ocata version), set the # upgrade_level for compute to "auto" diff --git a/puppet/services/nova-conductor.yaml b/puppet/services/nova-conductor.yaml index 7b086536..4574cae8 100644 --- a/puppet/services/nova-conductor.yaml +++ b/puppet/services/nova-conductor.yaml @@ -67,15 +67,12 @@ outputs: include tripleo::profile::base::nova::conductor upgrade_tasks: - name: Stop nova_conductor service - tags: step2 + tags: step1 service: name=openstack-nova-conductor state=stopped - - name: update nova conductor - tags: step2 - yum: name=openstack-nova-conductor state=latest # If not already set by puppet (e.g a pre-ocata version), set the # upgrade_level for compute to "auto" - name: Set compute upgrade level to auto - tags: step3 + tags: step1 ini_file: str_replace: template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL" diff --git a/puppet/services/nova-consoleauth.yaml b/puppet/services/nova-consoleauth.yaml index b5a1312a..82f329bc 100644 --- a/puppet/services/nova-consoleauth.yaml +++ b/puppet/services/nova-consoleauth.yaml @@ -50,5 +50,5 @@ outputs: include tripleo::profile::base::nova::consoleauth upgrade_tasks: - name: Stop nova_consoleauth service - tags: step2 + tags: step1 service: name=openstack-nova-consoleauth state=stopped diff --git a/puppet/services/nova-placement.yaml b/puppet/services/nova-placement.yaml index 9389c801..b59e2fc6 100644 --- a/puppet/services/nova-placement.yaml +++ b/puppet/services/nova-placement.yaml @@ -79,6 +79,10 @@ outputs: dport: - 8778 - 13778 + nova::keystone::authtoken::project_name: 'service' + nova::keystone::authtoken::password: {get_param: NovaPassword} + nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} nova::wsgi::apache_placement::api_port: '8778' nova::wsgi::apache_placement::ssl: {get_param: EnableInternalTLS} # NOTE: bind IP is found in Heat replacing the network name with the local node IP @@ -120,7 +124,7 @@ outputs: - "%{hiera('mysql_bind_host')}" upgrade_tasks: - name: Stop nova_placement service (running under httpd) - tags: step2 + tags: step1 service: name=httpd state=stopped # The nova placement API isn't installed in newton images, so install # it on upgrade diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml index 0e0b9d1e..e4b6bb43 100644 --- a/puppet/services/nova-scheduler.yaml +++ b/puppet/services/nova-scheduler.yaml @@ -65,8 +65,5 @@ outputs: include tripleo::profile::base::nova::scheduler upgrade_tasks: - name: Stop nova_scheduler service - tags: step2 + tags: step1 service: name=openstack-nova-scheduler state=stopped - - name: update nova scheduler - tags: step2 - yum: name=openstack-nova-scheduler state=latest diff --git a/puppet/services/nova-vnc-proxy.yaml b/puppet/services/nova-vnc-proxy.yaml index f6cf9649..42335ade 100644 --- a/puppet/services/nova-vnc-proxy.yaml +++ b/puppet/services/nova-vnc-proxy.yaml @@ -66,5 +66,5 @@ outputs: include tripleo::profile::base::nova::vncproxy upgrade_tasks: - name: Stop nova_vnc_proxy service - tags: step2 + tags: step1 service: name=openstack-nova-consoleauth state=stopped diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml index 0ed9d206..ceb56a81 100644 --- a/puppet/services/opendaylight-api.yaml +++ b/puppet/services/opendaylight-api.yaml @@ -17,6 +17,10 @@ parameters: type: string description: The password for the opendaylight server. hidden: true + OpenDaylightConnectionProtocol: + description: L7 protocol used for REST access + type: string + default: 'http' OpenDaylightEnableDHCP: description: Knob to enable/disable ODL DHCP Server type: boolean @@ -55,6 +59,7 @@ outputs: opendaylight::extra_features: {get_param: OpenDaylightFeatures} opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP} opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]} + opendaylight::nb_connection_protocol: {get_param: OpenDayLightConnectionProtocol} tripleo.opendaylight_api.firewall_rules: '137 opendaylight api': dport: @@ -63,3 +68,26 @@ outputs: - 6653 step_config: | include tripleo::profile::base::neutron::opendaylight + upgrade_tasks: + - name: Check if opendaylight is deployed + command: systemctl is-enabled opendaylight + tags: common + ignore_errors: True + register: opendaylight_enabled + - name: "PreUpgrade step0,validation: Check service opendaylight is running" + shell: /usr/bin/systemctl show 'opendaylight' --property ActiveState | grep '\bactive\b' + when: opendaylight_enabled.rc == 0 + tags: step0,validation + - name: Stop opendaylight service + tags: step1 + when: opendaylight_enabled.rc == 0 + service: name=opendaylight state=stopped + - name: Removes ODL snapshots, data, journal directories + file: + state: absent + path: /opt/opendaylight/{{item}} + tags: step2 + with_items: + - snapshots + - data + - journal diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml index cfec3c48..3db0848e 100644 --- a/puppet/services/opendaylight-ovs.yaml +++ b/puppet/services/opendaylight-ovs.yaml @@ -73,3 +73,17 @@ outputs: proto: 'gre' step_config: | include tripleo::profile::base::neutron::plugins::ovs::opendaylight + upgrade_tasks: + - name: Check if openvswitch is deployed + command: systemctl is-enabled openvswitch + tags: common + ignore_errors: True + register: openvswitch_enabled + - name: "PreUpgrade step0,validation: Check service openvswitch is running" + shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b' + when: openvswitch_enabled.rc == 0 + tags: step0,validation + - name: Stop openvswitch service + tags: step1 + when: openvswitch_enabled.rc == 0 + service: name=openvswitch state=stopped diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml index ca21cfbe..5be58c18 100644 --- a/puppet/services/pacemaker.yaml +++ b/puppet/services/pacemaker.yaml @@ -136,12 +136,12 @@ outputs: tags: step0,validation pacemaker_cluster: state=online check_and_fail=true - name: Stop pacemaker cluster - tags: step1 + tags: step2 pacemaker_cluster: state=offline - name: Start pacemaker cluster tags: step4 pacemaker_cluster: state=online - name: Check pacemaker resource tags: step4 - pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=200 + pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=500 with_items: {get_param: PacemakerResources} diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml index 03c2c83f..b018df35 100644 --- a/puppet/services/pacemaker/rabbitmq.yaml +++ b/puppet/services/pacemaker/rabbitmq.yaml @@ -39,3 +39,32 @@ outputs: - rabbitmq::service_manage: false step_config: | include ::tripleo::profile::pacemaker::rabbitmq + upgrade_tasks: + - name: get bootstrap nodeid + tags: common + command: hiera bootstrap_nodeid + register: bootstrap_node + - name: set is_bootstrap_node fact + tags: common + set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}} + - name: get rabbitmq policy + tags: common + shell: pcs resource show rabbitmq | grep -q -E "Attributes:.*\"ha-mode\":\"all\"" + register: rabbit_ha_mode + when: is_bootstrap_node + ignore_errors: true + - name: set migrate_rabbit_ha_mode fact + tags: common + set_fact: migrate_rabbit_ha_mode={{rabbit_ha_mode.rc == 0}} + when: is_bootstrap_node + - name: Fixup for rabbitmq ha-queues LP#1668600 + tags: step0,pre-upgrade + shell: | + nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1)) + nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2))) + if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then + echo "ERROR: The nr. of HA queues during the rabbit upgrade is out of range: $nr_queues" + exit 1 + fi + pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600 + when: is_bootstrap_node and migrate_rabbit_ha_mode diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml index 4b74ad45..eed98257 100644 --- a/puppet/services/panko-api.yaml +++ b/puppet/services/panko-api.yaml @@ -84,3 +84,22 @@ outputs: include tripleo::profile::base::panko::api metadata_settings: get_attr: [ApacheServiceBase, role_data, metadata_settings] + upgrade_tasks: + - name: Check if httpd is deployed + command: systemctl is-enabled httpd + tags: common + ignore_errors: True + register: httpd_enabled + - name: "PreUpgrade step0,validation: Check if httpd is running" + shell: > + /usr/bin/systemctl show 'httpd' --property ActiveState | + grep '\bactive\b' + when: httpd_enabled.rc == 0 + tags: step0,validation + - name: Stop panko-api service (running under httpd) + tags: step1 + service: name=httpd state=stopped + when: httpd_enabled.rc == 0 + - name: Install openstack-panko-api package if it was not installed + tags: step3 + yum: name=openstack-panko-api state=latest diff --git a/puppet/services/sahara-api.yaml b/puppet/services/sahara-api.yaml index 8573ea81..96b3d6e3 100644 --- a/puppet/services/sahara-api.yaml +++ b/puppet/services/sahara-api.yaml @@ -92,5 +92,5 @@ outputs: - "%{hiera('mysql_bind_host')}" upgrade_tasks: - name: Stop sahara_api service - tags: step2 + tags: step1 service: name=openstack-sahara-api state=stopped diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml index 987fe25b..c0b6b3e6 100644 --- a/puppet/services/sahara-engine.yaml +++ b/puppet/services/sahara-engine.yaml @@ -51,8 +51,5 @@ outputs: include ::tripleo::profile::base::sahara::engine upgrade_tasks: - name: Stop sahara_engine service - tags: step2 + tags: step1 service: name=openstack-sahara-engine state=stopped - - name: Sync sahara_engine DB - tags: step5 - command: sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head diff --git a/puppet/services/snmp.yaml b/puppet/services/snmp.yaml index fd6ed818..80c29f95 100644 --- a/puppet/services/snmp.yaml +++ b/puppet/services/snmp.yaml @@ -45,5 +45,5 @@ outputs: include ::tripleo::profile::base::snmp upgrade_tasks: - name: Stop snmp service - tags: step2 + tags: step1 service: name=snmpd state=stopped diff --git a/puppet/services/sshd.yaml b/puppet/services/sshd.yaml index 41e144a0..12998c33 100644 --- a/puppet/services/sshd.yaml +++ b/puppet/services/sshd.yaml @@ -29,6 +29,6 @@ outputs: value: service_name: sshd config_settings: - BannerText: {get_param: BannerText} + tripleo::profile::base::sshd::bannertext: {get_param: BannerText} step_config: | include ::tripleo::profile::base::sshd diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml index 526fa888..9b0d2de1 100644 --- a/puppet/services/swift-proxy.yaml +++ b/puppet/services/swift-proxy.yaml @@ -138,6 +138,7 @@ outputs: - '' - 'proxy-logging' - 'proxy-server' + swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL} swift::proxy::account_autocreate: true # NOTE: bind IP is found in Heat replacing the network name with the # local node IP for the given network; replacement examples @@ -166,5 +167,5 @@ outputs: - ResellerAdmin upgrade_tasks: - name: Stop swift_proxy service - tags: step2 + tags: step1 service: name=openstack-swift-proxy state=stopped diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml index 247b23ff..261aadeb 100644 --- a/puppet/services/swift-storage.yaml +++ b/puppet/services/swift-storage.yaml @@ -103,7 +103,7 @@ outputs: include ::tripleo::profile::base::swift::storage upgrade_tasks: - name: Stop swift storage services - tags: step2 + tags: step1 service: name={{ item }} state=stopped with_items: - openstack-swift-account-auditor diff --git a/puppet/services/tacker.yaml b/puppet/services/tacker.yaml index 6ceb9f19..6f92066e 100644 --- a/puppet/services/tacker.yaml +++ b/puppet/services/tacker.yaml @@ -66,9 +66,6 @@ outputs: - '/tacker' - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' - tacker::keystone::auth::tenant: 'service' - tacker::keystone::auth::password: {get_param: TackerPassword} - tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} tacker::debug: {get_param: Debug} tacker::rpc_backend: rabbit tacker::rabbit_userid: {get_param: RabbitUserName} @@ -77,6 +74,10 @@ outputs: tacker::rabbit_port: {get_param: RabbitClientPort} tacker::server::bind_host: {get_param: [ServiceNetMap, TackerApiNetwork]} + tacker::keystone::authtoken::project_name: 'service' + tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + tacker::db::mysql::password: {get_param: TackerPassword} tacker::db::mysql::user: tacker tacker::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} @@ -85,13 +86,31 @@ outputs: - '%' - {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + service_config_settings: + keystone: + tacker::keystone::auth::tenant: 'service' + tacker::keystone::auth::password: {get_param: TackerPassword} + tacker::keystone::auth::public_url: {get_param: [EndpointMap, TackerPublic, uri]} + tacker::keystone::auth::internal_url: {get_param: [EndpointMap, TackerInternal, uri]} + tacker::keystone::auth::admin_url: {get_param: [EndpointMap, TackerAdmin, uri]} step_config: | include ::tripleo::profile::base::tacker upgrade_tasks: + - name: Check if tacker is deployed + command: systemctl is-enabled openstack-tacker-server + tags: common + ignore_errors: True + register: tacker_enabled - name: "PreUpgrade step0,validation: Check service openstack-tacker-server is running" shell: /usr/bin/systemctl show 'openstack-tacker-server' --property ActiveState | grep '\bactive\b' + when: tacker_enabled.rc == 0 tags: step0,validation - name: Stop tacker service - tags: step2 + tags: step1 + when: tacker_enabled.rc == 0 service: name=openstack-tacker-server state=stopped + - name: Install openstack-tacker package if it was disabled + tags: step3 + yum: name=openstack-tacker state=latest + when: tacker_enabled.rc != 0 diff --git a/puppet/services/vpp.yaml b/puppet/services/vpp.yaml new file mode 100644 index 00000000..59866d39 --- /dev/null +++ b/puppet/services/vpp.yaml @@ -0,0 +1,47 @@ +heat_template_version: ocata + +description: > + Vpp service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + VppCpuMainCore: + default: '' + description: VPP main thread core pinning. + type: string + VppCpuCorelistWorkers: + default: '' + description: List of cores for VPP worker thread pinning + type: string + MonitoringSubscriptionVpp: + default: 'overcloud-vpp' + type: string + +outputs: + role_data: + description: Role data for the Vpp role. + value: + service_name: vpp + monitoring_subscription: {get_param: MonitoringSubscriptionVpp} + config_settings: + fdio::vpp_cpu_main_core: {get_param: VppCpuMainCore} + fdio::vpp_cpu_corelist_workers: {get_param: VppCpuCorelistWorkers} + step_config: | + include ::tripleo::profile::base::vpp + upgrade_tasks: + - name: Stop vpp service + tags: step2 + service: name=vpp state=stopped diff --git a/puppet/services/zaqar.yaml b/puppet/services/zaqar.yaml index cb860fa8..a320f694 100644 --- a/puppet/services/zaqar.yaml +++ b/puppet/services/zaqar.yaml @@ -64,3 +64,23 @@ outputs: step_config: | include ::tripleo::profile::base::zaqar + upgrade_tasks: + - name: Check if zaqar is deployed + command: systemctl is-enabled openstack-zaqar + tags: common + ignore_errors: True + register: zaqar_enabled + - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running" + shell: > + /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState | + grep '\bactive\b' + when: zaqar_enabled.rc == 0 + tags: step0,validation + - name: Stop zaqar service + tags: step1 + when: zaqar_enabled.rc == 0 + service: name=openstack-zaqar state=stopped + - name: Install openstack-zaqar package if it was disabled + tags: step3 + yum: name=openstack-zaqar state=latest + when: zaqar_enabled.rc != 0 diff --git a/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml index f9afb18d..9343d99e 100644 --- a/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml +++ b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml @@ -64,6 +64,8 @@ features: - Support for Octavia composable services for LBaaS with Neutron. - Support for Collectd composable services for performance monitoring. - Support for Tacker composable service for VNF management. + - Add the plan-environment.yaml file which will facilitate deployment plan + import and export. upgrade: - Update OpenDaylight deployment to use networking-odl v2 as a mechanism driver. diff --git a/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml new file mode 100644 index 00000000..ec22942a --- /dev/null +++ b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + NeutronDhcpAgents had a default value of 3 that, even though unused in + practice was a bad default value. Changing the default value to a + sentinel value and making the hiera conditional allows deploy-time + logic in puppet to provide a default value based on the number of dhcp + agents being deployed. diff --git a/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml b/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml new file mode 100644 index 00000000..59f1fb99 --- /dev/null +++ b/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml @@ -0,0 +1,11 @@ +--- +prelude: > + Support for Manila/CephFS with TripleO managed Ceph cluster +features: + - | + It is now possible to configure Manila with CephFS to use a + TripleO managed Ceph cluster. When using the Heat environment + file at environments/manila-cephfsnative-config.yaml Manila + will be configured to use the TripleO managed Ceph cluster + if CephMDS is deployed as well, which can be done using the + file environments/services/ceph-mds.yaml
\ No newline at end of file diff --git a/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml b/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml new file mode 100644 index 00000000..e9974a20 --- /dev/null +++ b/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Swift rings created or updated on the overcloud nodes will now be + stored on the undercloud at the end of each deployment. They will be + retrieved before any deployment update, and by doing this the Swift + rings will be in a consistent state across the cluster all the time. + This makes it possible to add, remove or replace nodes without + manual operator interaction. diff --git a/releasenotes/notes/vpp-84d35e51ff62a58c.yaml b/releasenotes/notes/vpp-84d35e51ff62a58c.yaml new file mode 100644 index 00000000..b78df17d --- /dev/null +++ b/releasenotes/notes/vpp-84d35e51ff62a58c.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add the ability to deploy VPP. Vector Packet Processing (VPP) is a high + performance packet processing stack that runs in user space in Linux. + VPP is used as an alternative to kernel networking stack for + accelerated network data path. diff --git a/roles_data.yaml b/roles_data.yaml index 9e3b0a18..1fddf72f 100644 --- a/roles_data.yaml +++ b/roles_data.yaml @@ -125,6 +125,8 @@ - OS::TripleO::Services::OctaviaHealthManager - OS::TripleO::Services::OctaviaHousekeeping - OS::TripleO::Services::OctaviaWorker + - OS::TripleO::Services::Vpp + - OS::TripleO::Services::Docker - name: Compute CountDefault: 1 @@ -154,6 +156,8 @@ - OS::TripleO::Services::FluentdClient - OS::TripleO::Services::AuditD - OS::TripleO::Services::Collectd + - OS::TripleO::Services::Vpp + - OS::TripleO::Services::MySQLClient - name: BlockStorage ServicesDefault: @@ -170,6 +174,7 @@ - OS::TripleO::Services::FluentdClient - OS::TripleO::Services::AuditD - OS::TripleO::Services::Collectd + - OS::TripleO::Services::MySQLClient - name: ObjectStorage disable_upgrade_deployment: True @@ -188,6 +193,7 @@ - OS::TripleO::Services::FluentdClient - OS::TripleO::Services::AuditD - OS::TripleO::Services::Collectd + - OS::TripleO::Services::MySQLClient - name: CephStorage ServicesDefault: @@ -204,3 +210,4 @@ - OS::TripleO::Services::FluentdClient - OS::TripleO::Services::AuditD - OS::TripleO::Services::Collectd + - OS::TripleO::Services::MySQLClient diff --git a/roles_data_undercloud.yaml b/roles_data_undercloud.yaml index 2759429c..5070ef38 100644 --- a/roles_data_undercloud.yaml +++ b/roles_data_undercloud.yaml @@ -26,6 +26,7 @@ - OS::TripleO::Services::MistralExecutor - OS::TripleO::Services::IronicApi - OS::TripleO::Services::IronicConductor + - OS::TripleO::Services::IronicPxe - OS::TripleO::Services::NovaIronic - OS::TripleO::Services::Zaqar - OS::TripleO::Services::NeutronServer diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py index 1d0dba02..32987cb2 100755 --- a/tools/yaml-validate.py +++ b/tools/yaml-validate.py @@ -54,6 +54,21 @@ def validate_endpoint_map(base_map, env_map): return sorted(base_map.keys()) == sorted(env_map.keys()) +def validate_hci_compute_services_default(env_filename, env_tpl): + env_services_list = env_tpl['parameter_defaults']['ComputeServices'] + env_services_list.remove('OS::TripleO::Services::CephOSD') + roles_filename = os.path.join(os.path.dirname(env_filename), + '../roles_data.yaml') + roles_tpl = yaml.load(open(roles_filename).read()) + for role in roles_tpl: + if role['name'] == 'Compute': + roles_services_list = role['ServicesDefault'] + if sorted(env_services_list) != sorted(roles_services_list): + print('ERROR: ComputeServices in %s is different ' + 'from ServicesDefault in roles_data.yaml' % env_filename) + return 1 + return 0 + def validate_mysql_connection(settings): no_op = lambda *args: False error_status = [0] @@ -143,6 +158,9 @@ def validate(filename): filename != './puppet/services/services.yaml'): retval = validate_service(filename, tpl) + if filename.endswith('hyperconverged-ceph.yaml'): + retval = validate_hci_compute_services_default(filename, tpl) + except Exception: print(traceback.format_exc()) return 1 |