diff options
-rw-r--r-- | ci/common/net-config-multinode-os-net-config.yaml | 2 | ||||
-rw-r--r-- | ci/common/net-config-multinode.yaml | 4 | ||||
-rw-r--r-- | ci/environments/multinode.yaml | 1 | ||||
-rw-r--r-- | ci/environments/multinode_major_upgrade.yaml | 1 | ||||
-rw-r--r-- | ci/pingtests/tenantvm_floatingip.yaml | 2 | ||||
-rw-r--r-- | environments/major-upgrade-all-in-one.yaml | 8 | ||||
-rw-r--r-- | environments/major-upgrade-composable-steps.yaml | 3 | ||||
-rw-r--r-- | network/service_net_map.j2.yaml | 1 | ||||
-rw-r--r-- | overcloud-resource-registry-puppet.j2.yaml | 2 | ||||
-rw-r--r-- | overcloud.j2.yaml | 20 | ||||
-rw-r--r-- | puppet/major_upgrade_steps.j2.yaml | 128 | ||||
-rw-r--r-- | puppet/post-upgrade.j2.yaml | 27 | ||||
-rw-r--r-- | puppet/post.j2.yaml | 90 | ||||
-rw-r--r-- | puppet/puppet-steps.j2 | 88 | ||||
-rw-r--r-- | puppet/services/README.rst | 18 | ||||
-rw-r--r-- | puppet/services/ceph-osd.yaml | 4 | ||||
-rw-r--r-- | puppet/services/ironic-conductor.yaml | 9 | ||||
-rw-r--r-- | puppet/services/keystone.yaml | 3 | ||||
-rw-r--r-- | puppet/services/nova-libvirt.yaml | 1 | ||||
-rw-r--r-- | releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml | 10 | ||||
-rwxr-xr-x | tools/yaml-validate.py | 11 |
21 files changed, 239 insertions, 194 deletions
diff --git a/ci/common/net-config-multinode-os-net-config.yaml b/ci/common/net-config-multinode-os-net-config.yaml index 227c5da2..8c50b641 100644 --- a/ci/common/net-config-multinode-os-net-config.yaml +++ b/ci/common/net-config-multinode-os-net-config.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2016-10-14 +heat_template_version: ocata description: > Software Config to drive os-net-config for a simple bridge configured diff --git a/ci/common/net-config-multinode.yaml b/ci/common/net-config-multinode.yaml index bf947d3e..dc31235a 100644 --- a/ci/common/net-config-multinode.yaml +++ b/ci/common/net-config-multinode.yaml @@ -47,7 +47,9 @@ resources: str_replace: template: | #!/bin/bash - ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name + if ! ip addr show dev $bridge_name | grep CONTROLPLANEIP/CONTROLPLANESUBNETCIDR; then + ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name + fi params: CONTROLPLANEIP: {get_param: ControlPlaneIp} CONTROLPLANESUBNETCIDR: {get_param: ControlPlaneSubnetCidr} diff --git a/ci/environments/multinode.yaml b/ci/environments/multinode.yaml index 11243c8a..212f6a23 100644 --- a/ci/environments/multinode.yaml +++ b/ci/environments/multinode.yaml @@ -45,3 +45,4 @@ parameter_defaults: # Required for Centos 7.3 and Qemu 2.6.0 nova::compute::libvirt::libvirt_cpu_mode: 'none' SwiftCeilometerPipelineEnabled: False + Debug: True diff --git a/ci/environments/multinode_major_upgrade.yaml b/ci/environments/multinode_major_upgrade.yaml index 520a0c65..56d04de5 100644 --- a/ci/environments/multinode_major_upgrade.yaml +++ b/ci/environments/multinode_major_upgrade.yaml @@ -45,3 +45,4 @@ parameter_defaults: nova::compute::libvirt::libvirt_cpu_mode: 'none' heat::rpc_response_timeout: 600 SwiftCeilometerPipelineEnabled: False + Debug: True diff --git a/ci/pingtests/tenantvm_floatingip.yaml b/ci/pingtests/tenantvm_floatingip.yaml index 0f31bc16..b910d6c1 100644 --- a/ci/pingtests/tenantvm_floatingip.yaml +++ b/ci/pingtests/tenantvm_floatingip.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2013-05-23 +heat_template_version: ocata description: > This template resides in tripleo-ci for Mitaka CI jobs only. diff --git a/environments/major-upgrade-all-in-one.yaml b/environments/major-upgrade-all-in-one.yaml index 69d72edd..4283b212 100644 --- a/environments/major-upgrade-all-in-one.yaml +++ b/environments/major-upgrade-all-in-one.yaml @@ -1,8 +1,2 @@ -# We run the upgrade steps without disabling the OS::TripleO::PostDeploySteps -# this means you can do a major upgrade in one pass, which may be useful -# e.g for all-in-one deployments where we can upgrade the compute services -# at the same time as the controlplane -# Note that it will be necessary to pass a mapping of OS::Heat::None again for -# any subsequent updates, or the upgrade steps will run again. resource_registry: - OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml + OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml diff --git a/environments/major-upgrade-composable-steps.yaml b/environments/major-upgrade-composable-steps.yaml index 7e10014b..4283b212 100644 --- a/environments/major-upgrade-composable-steps.yaml +++ b/environments/major-upgrade-composable-steps.yaml @@ -1,3 +1,2 @@ resource_registry: - OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml - OS::TripleO::PostDeploySteps: OS::Heat::None + OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml index 390b18b4..b2562c79 100644 --- a/network/service_net_map.j2.yaml +++ b/network/service_net_map.j2.yaml @@ -49,6 +49,7 @@ parameters: NovaPlacementNetwork: internal_api NovaMetadataNetwork: internal_api NovaVncProxyNetwork: internal_api + NovaLibvirtNetwork: internal_api Ec2ApiNetwork: internal_api Ec2ApiMetadataNetwork: internal_api SwiftStorageNetwork: storage_mgmt diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml index 1b9646fe..471a0ff1 100644 --- a/overcloud-resource-registry-puppet.j2.yaml +++ b/overcloud-resource-registry-puppet.j2.yaml @@ -2,6 +2,7 @@ resource_registry: OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment OS::TripleO::PostDeploySteps: puppet/post.yaml + OS::TripleO::PostUpgradeSteps: puppet/post.yaml OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml OS::TripleO::DefaultPasswords: default_passwords.yaml @@ -110,7 +111,6 @@ resource_registry: # Upgrade resources OS::TripleO::UpgradeConfig: puppet/upgrade_config.yaml - OS::TripleO::UpgradeSteps: OS::Heat::None # services OS::TripleO::Services: puppet/services/services.yaml diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml index f93c19a3..dea748ed 100644 --- a/overcloud.j2.yaml +++ b/overcloud.j2.yaml @@ -598,9 +598,9 @@ resources: {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]} {% endfor %} - # Upgrade steps for all roles - AllNodesUpgradeSteps: - type: OS::TripleO::UpgradeSteps + # Post deployment steps for all roles + AllNodesDeploySteps: + type: OS::TripleO::PostDeploySteps depends_on: {% for role in roles %} - {{role.name}}AllNodesDeployment @@ -615,20 +615,6 @@ resources: {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} {% endfor %} - # Post deployment steps for all roles - AllNodesDeploySteps: - type: OS::TripleO::PostDeploySteps - depends_on: AllNodesUpgradeSteps - properties: - servers: -{% for role in roles %} - {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]} -{% endfor %} - role_data: -{% for role in roles %} - {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} -{% endfor %} - outputs: ManagedEndpoints: description: Asserts that the keystone endpoints have been provisioned. diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml index eae85991..b879fafa 100644 --- a/puppet/major_upgrade_steps.j2.yaml +++ b/puppet/major_upgrade_steps.j2.yaml @@ -1,4 +1,6 @@ -{% set upgrade_steps_max = 8 -%} +{% set enabled_roles = roles|rejectattr('disable_upgrade_deployment')|list -%} +{% set batch_upgrade_steps_max = 3 -%} +{% set upgrade_steps_max = 6 -%} heat_template_version: ocata description: 'Upgrade steps for all roles' @@ -18,54 +20,53 @@ parameters: conditions: # Conditions to disable any steps where the task list is empty -{% for step in range(0, upgrade_steps_max) %} - {% for role in roles %} - UpgradeBatchConfig_Step{{step}}Enabled: +{%- for role in roles %} + {{role.name}}UpgradeBatchConfigEnabled: not: equals: - {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]} - [] - UpgradeConfig_Step{{step}}Enabled: + {{role.name}}UpgradeConfigEnabled: not: equals: - {get_param: [role_data, {{role.name}}, upgrade_tasks]} - [] - {% endfor %} -{% endfor %} +{%- endfor %} resources: # Upgrade Steps for all roles, batched updates -# FIXME(shardy): would be nice to make the number of steps configurable -{% for step in range(0, upgrade_steps_max) %} - {% for role in roles %} - # Step {{step}} resources +# The UpgradeConfig resources could actually be created without +# serialization, but the event output is easier to follow if we +# do, and there should be minimal performance hit (creating the +# config is cheap compared to the time to apply the deployment). +{% for step in range(0, batch_upgrade_steps_max) %} + # Batch config resources step {{step}} + {%- for role in roles %} {{role.name}}UpgradeBatchConfig_Step{{step}}: type: OS::TripleO::UpgradeConfig - condition: UpgradeBatchConfig_Step{{step}}Enabled - # The UpgradeConfig resources could actually be created without - # serialization, but the event output is easier to follow if we - # do, and there should be minimal performance hit (creating the - # config is cheap compared to the time to apply the deployment). - {% if step > 0 %} + {%- if step > 0 %} depends_on: - {% for dep in roles %} + {%- for dep in enabled_roles %} - {{dep.name}}UpgradeBatch_Step{{step -1}} - {% endfor %} - {% endif %} + {%- endfor %} + {%- endif %} properties: UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]} step: {{step}} + {%- endfor %} + # Batch deployment resources for step {{step}} (only for enabled roles) + {%- for role in enabled_roles %} {{role.name}}UpgradeBatch_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - condition: UpgradeBatchConfig_Step{{step}}Enabled - {% if step > 0 %} + type: OS::Heat::SoftwareDeploymentGroup + condition: {{role.name}}UpgradeBatchConfigEnabled + {%- if step > 0 %} depends_on: - {% for dep in roles %} + {%- for dep in enabled_roles %} - {{dep.name}}UpgradeBatch_Step{{step -1}} - {% endfor %} - {% endif %} + {%- endfor %} + {%- endif %} update_policy: batch_create: max_batch_size: {{role.upgrade_batch_size|default(1)}} @@ -78,52 +79,49 @@ resources: input_values: role: {{role.name}} update_identifier: {get_param: UpdateIdentifier} - {% endfor %} -{% endfor %} + {%- endfor %} +{%- endfor %} # Upgrade Steps for all roles -# FIXME(shardy): would be nice to make the number of steps configurable -{% for step in range(0, upgrade_steps_max) %} - {% for role in roles %} - # Step {{step}} resources +{%- for step in range(0, upgrade_steps_max) %} + # Config resources for step {{step}} + {%- for role in roles %} {{role.name}}UpgradeConfig_Step{{step}}: type: OS::TripleO::UpgradeConfig - condition: UpgradeConfig_Step{{step}}Enabled # The UpgradeConfig resources could actually be created without # serialization, but the event output is easier to follow if we # do, and there should be minimal performance hit (creating the # config is cheap compared to the time to apply the deployment). depends_on: - {% if step > 0 %} - {% for dep in roles %} - {% if not dep.disable_upgrade_deployment|default(false) %} + {%- if step > 0 %} + {%- for dep in enabled_roles %} - {{dep.name}}Upgrade_Step{{step -1}} - {% endif %} - {% endfor %} - {% else %} - {% for dep in roles %} - - {{dep.name}}UpgradeBatch_Step{{upgrade_steps_max -1}} - {% endfor %} - {% endif %} + {%- endfor %} + {%- else %} + {%- for dep in enabled_roles %} + - {{dep.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}} + {%- endfor %} + {%- endif %} properties: UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]} step: {{step}} - {% if not role.disable_upgrade_deployment|default(false) %} + {%- endfor %} + + # Deployment resources for step {{step}} (only for enabled roles) + {%- for role in enabled_roles %} {{role.name}}Upgrade_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - condition: UpgradeConfig_Step{{step}}Enabled + type: OS::Heat::SoftwareDeploymentGroup + condition: {{role.name}}UpgradeConfigEnabled depends_on: - {% if step > 0 %} - {% for dep in roles %} - {% if not dep.disable_upgrade_deployment|default(false) %} + {%- if step > 0 %} + {%- for dep in enabled_roles %} - {{dep.name}}Upgrade_Step{{step -1}} - {% endif %} - {% endfor %} - {% else %} - {% for dep in roles %} - - {{dep.name}}UpgradeBatch_Step{{upgrade_steps_max -1}} - {% endfor %} - {% endif %} + {%- endfor %} + {%- else %} + {%- for dep in enabled_roles %} + - {{dep.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}} + {%- endfor %} + {%- endif %} properties: name: {{role.name}}Upgrade_Step{{step}} servers: {get_param: [servers, {{role.name}}]} @@ -131,9 +129,21 @@ resources: input_values: role: {{role.name}} update_identifier: {get_param: UpdateIdentifier} - {% endif %} - {% endfor %} -{% endfor %} + {%- endfor %} +{%- endfor %} + + # Post upgrade deployment steps for all roles + # This runs the normal configuration (e.g puppet) steps unless upgrade + # is disabled for the role + AllNodesPostUpgradeSteps: + type: OS::TripleO::PostUpgradeSteps + depends_on: +{%- for dep in enabled_roles %} + - {{dep.name}}Upgrade_Step{{upgrade_steps_max - 1}} +{%- endfor %} + properties: + servers: {get_param: servers} + role_data: {get_param: role_data} outputs: # Output the config for each role, just use Step1 as the config should be diff --git a/puppet/post-upgrade.j2.yaml b/puppet/post-upgrade.j2.yaml new file mode 100644 index 00000000..b84039de --- /dev/null +++ b/puppet/post-upgrade.j2.yaml @@ -0,0 +1,27 @@ +heat_template_version: ocata + +description: > + Post-upgrade configuration steps via puppet for all roles + where upgrade is not disabled as defined in ../roles_data.yaml + +parameters: + servers: + type: json + description: Mapping of Role name e.g Controller to a list of servers + + role_data: + type: json + description: Mapping of Role name e.g Controller to the per-role data + + DeployIdentifier: + default: '' + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. + +resources: +# Note the include here is the same as post.j2.yaml but the data used at +# the time of rendering is different if any roles disable upgrades +{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%} +{% include 'puppet-steps.j2' %} diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml index 83c32868..39155c36 100644 --- a/puppet/post.j2.yaml +++ b/puppet/post.j2.yaml @@ -21,92 +21,4 @@ parameters: perform configuration on a Heat stack-update. resources: - # Post deployment steps for all roles - # A single config is re-applied with an incrementing step number -{% for role in roles %} - # {{role.name}} Role post deploy steps - {{role.name}}ArtifactsConfig: - type: deploy-artifacts.yaml - - {{role.name}}ArtifactsDeploy: - type: OS::Heat::StructuredDeployments - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ArtifactsConfig} - - {{role.name}}PreConfig: - type: OS::TripleO::Tasks::{{role.name}}PreConfig - properties: - servers: {get_param: [servers, {{role.name}}]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}Config: - type: OS::TripleO::{{role.name}}Config - properties: - StepConfig: {get_param: [role_data, {{role.name}}, step_config]} - - {% if role.name == 'Controller' %} - ControllerPrePuppet: - type: OS::TripleO::Tasks::ControllerPrePuppet - properties: - servers: {get_param: [servers, Controller]} - input_values: - update_identifier: {get_param: DeployIdentifier} - {% endif %} - - # Step through a series of configuration steps -{% for step in range(1, 6) %} - {{role.name}}Deployment_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - {% if step == 1 %} - depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] - {% else %} - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step{{step -1}} - {% endfor %} - {% endif %} - properties: - name: {{role.name}}Deployment_Step{{step}} - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: {{step}} - update_identifier: {get_param: DeployIdentifier} -{% endfor %} - - {{role.name}}PostConfig: - type: OS::TripleO::Tasks::{{role.name}}PostConfig - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step5 - {% endfor %} - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - # Note, this should come last, so use depends_on to ensure - # this is created after any other resources. - {{role.name}}ExtraConfigPost: - depends_on: - {% for dep in roles %} - - {{dep.name}}PostConfig - {% endfor %} - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, {{role.name}}]} - - {% if role.name == 'Controller' %} - ControllerPostPuppet: - depends_on: - - ControllerExtraConfigPost - type: OS::TripleO::Tasks::ControllerPostPuppet - properties: - servers: {get_param: [servers, Controller]} - input_values: - update_identifier: {get_param: DeployIdentifier} - {% endif %} - -{% endfor %} +{% include 'puppet-steps.j2' %} diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2 new file mode 100644 index 00000000..c3b54ccd --- /dev/null +++ b/puppet/puppet-steps.j2 @@ -0,0 +1,88 @@ + # Post deployment steps for all roles + # A single config is re-applied with an incrementing step number +{% for role in roles %} + # {{role.name}} Role post-deploy steps + {{role.name}}ArtifactsConfig: + type: deploy-artifacts.yaml + + {{role.name}}ArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}ArtifactsConfig} + + {{role.name}}PreConfig: + type: OS::TripleO::Tasks::{{role.name}}PreConfig + properties: + servers: {get_param: [servers, {{role.name}}]} + input_values: + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}Config: + type: OS::TripleO::{{role.name}}Config + properties: + StepConfig: {get_param: [role_data, {{role.name}}, step_config]} + + {% if role.name == 'Controller' %} + ControllerPrePuppet: + type: OS::TripleO::Tasks::ControllerPrePuppet + properties: + servers: {get_param: [servers, Controller]} + input_values: + update_identifier: {get_param: DeployIdentifier} + {% endif %} + + # Step through a series of configuration steps +{% for step in range(1, 6) %} + {{role.name}}Deployment_Step{{step}}: + type: OS::Heat::StructuredDeploymentGroup + {% if step == 1 %} + depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] + {% else %} + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step{{step -1}} + {% endfor %} + {% endif %} + properties: + name: {{role.name}}Deployment_Step{{step}} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}Config} + input_values: + step: {{step}} + update_identifier: {get_param: DeployIdentifier} +{% endfor %} + + {{role.name}}PostConfig: + type: OS::TripleO::Tasks::{{role.name}}PostConfig + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step5 + {% endfor %} + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: DeployIdentifier} + + # Note, this should come last, so use depends_on to ensure + # this is created after any other resources. + {{role.name}}ExtraConfigPost: + depends_on: + {% for dep in roles %} + - {{dep.name}}PostConfig + {% endfor %} + type: OS::TripleO::NodeExtraConfigPost + properties: + servers: {get_param: [servers, {{role.name}}]} + + {% if role.name == 'Controller' %} + ControllerPostPuppet: + depends_on: + - ControllerExtraConfigPost + type: OS::TripleO::Tasks::ControllerPostPuppet + properties: + servers: {get_param: [servers, Controller]} + input_values: + update_identifier: {get_param: DeployIdentifier} + {% endif %} +{% endfor %} diff --git a/puppet/services/README.rst b/puppet/services/README.rst index 34cb350b..9c2d8c5c 100644 --- a/puppet/services/README.rst +++ b/puppet/services/README.rst @@ -57,10 +57,14 @@ is a list of ansible tasks to be performed during the upgrade process. Similar to the step_config, we allow a series of steps for the per-service upgrade sequence, defined as ansible tasks with a tag e.g "step1" for the first -step, "step2" for the second, etc. Note that each step is performed in batches, -then we move on to the next step which is also performed in batches (we don't -perform all steps on one node, then move on to the next one which means you -can sequence rolling upgrades of dependent services via the step value). +step, "step2" for the second, etc (currently only two steps are supported, but +more may be added when required as additional services get converted to batched +upgrades). + +Note that each step is performed in batches, then we move on to the next step +which is also performed in batches (we don't perform all steps on one node, +then move on to the next one which means you can sequence rolling upgrades of +dependent services via the step value). The tasks performed at each step is service specific, but note that all batch upgrade steps are performed before the `upgrade_tasks` described below. This @@ -93,9 +97,9 @@ step, "step2" for the second, etc. 5) Perform any migration tasks, e.g DB sync commands - 6) Start control-plane services - - 7) Any additional online migration tasks (e.g data migrations) +Note that the services are not started in the upgrade tasks - we instead re-run +puppet which does any reconfiguration required for the new version, then starts +the services. Nova Server Metadata Settings ----------------------------- diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml index 98f83d08..9bd83aab 100644 --- a/puppet/services/ceph-osd.yaml +++ b/puppet/services/ceph-osd.yaml @@ -68,14 +68,14 @@ outputs: command: ceph osd set noscrub - name: Stop Ceph OSD tags: step1 - service: name=ceph-osd@$item state=stopped + service: name=ceph-osd@{{ item }} state=stopped with_items: "{{osd_ids.stdout.strip().split()}}" - name: Update ceph OSD packages tags: step1 yum: name=ceph-osd state=latest - name: Start ceph-osd service tags: step1 - service: name=ceph-osd@$item state=started + service: name=ceph-osd@{{ item }} state=started with_items: "{{osd_ids.stdout.strip().split()}}" - name: ceph osd unset noout tags: step1 diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml index a10c03a5..48d87209 100644 --- a/puppet/services/ironic-conductor.yaml +++ b/puppet/services/ironic-conductor.yaml @@ -24,6 +24,14 @@ parameters: "full" for full cleaning, "metadata" to clean only disk metadata (partition table). type: string + IronicCleaningNetwork: + default: 'provisioning' + description: Name or UUID of the *overcloud* network used for cleaning + bare metal nodes. The default value of "provisioning" can be + left during the initial deployment (when no networks are + created yet) and should be changed to an actual UUID in + a post-deployment stack update. + type: string IronicEnabledDrivers: default: ['pxe_ipmitool', 'pxe_drac', 'pxe_ilo'] description: Enabled Ironic drivers @@ -61,6 +69,7 @@ outputs: - ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]} ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]} ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase} + ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork} ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers} # We need an endpoint containing a real IP, not a VIP here ironic_conductor_http_host: {get_param: [ServiceNetMap, IronicNetwork]} diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index b989d502..7da4a9c2 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -313,8 +313,5 @@ outputs: - name: Sync keystone DB tags: step5 command: keystone-manage db_sync - - name: Start keystone service (running under httpd) - tags: step6 - service: name=httpd state=started metadata_settings: get_attr: [ApacheServiceBase, role_data, metadata_settings] diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml index a9b2b3f9..faf1ae48 100644 --- a/puppet/services/nova-libvirt.yaml +++ b/puppet/services/nova-libvirt.yaml @@ -62,6 +62,7 @@ outputs: nova::compute::libvirt::qemu::configure_qemu: true nova::compute::libvirt::qemu::max_files: 32768 nova::compute::libvirt::qemu::max_processes: 131072 + nova::compute::libvirt::vncserver_listen: {get_param: [ServiceNetMap, NovaLibvirtNetwork]} tripleo.nova_libvirt.firewall_rules: '200 nova_libvirt': dport: diff --git a/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml b/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml new file mode 100644 index 00000000..72601f9e --- /dev/null +++ b/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + New parameter "IronicCleaningNetwork" can be used to override the name + or UUID of the **overcloud** network Ironic uses for cleaning. +fixes: + - | + A default value is now provided for Ironic ``cleaning_network`` + configuration option. Not providing it on start up was deprecated since + Newton, and will result in a failure in the near future. diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py index 19e40d19..2769c152 100755 --- a/tools/yaml-validate.py +++ b/tools/yaml-validate.py @@ -94,10 +94,6 @@ def validate_mysql_connection(settings): def validate_service(filename, tpl): - if 'heat_template_version' in tpl and not str(tpl['heat_template_version']).isalpha(): - print('ERROR: heat_template_version needs to be the release alias not a date: %s' - % filename) - return 1 if 'outputs' in tpl and 'role_data' in tpl['outputs']: if 'value' not in tpl['outputs']['role_data']: print('ERROR: invalid role_data for filename: %s' @@ -135,6 +131,13 @@ def validate(filename): try: tpl = yaml.load(open(filename).read()) + # The template alias version should be used instead a date, this validation + # will be applied to all templates not just for those in the services folder. + if 'heat_template_version' in tpl and not str(tpl['heat_template_version']).isalpha(): + print('ERROR: heat_template_version needs to be the release alias not a date: %s' + % filename) + return 1 + if (filename.startswith('./puppet/services/') and filename != './puppet/services/services.yaml'): retval = validate_service(filename, tpl) |