aboutsummaryrefslogtreecommitdiffstats
path: root/puppet
diff options
context:
space:
mode:
Diffstat (limited to 'puppet')
-rw-r--r--puppet/blockstorage-role.yaml33
-rw-r--r--puppet/cephstorage-role.yaml33
-rw-r--r--puppet/compute-role.yaml35
-rw-r--r--puppet/controller-role.yaml42
-rw-r--r--puppet/deploy-artifacts.sh8
-rw-r--r--puppet/objectstorage-role.yaml34
-rw-r--r--puppet/post.j2.yaml7
-rw-r--r--puppet/puppet-steps.j282
-rw-r--r--puppet/role.role.j2.yaml43
-rw-r--r--puppet/services/README.rst24
-rw-r--r--puppet/services/haproxy.yaml5
-rw-r--r--puppet/services/nova-scheduler.yaml9
12 files changed, 339 insertions, 16 deletions
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index b20cdbac..612a4a01 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -142,6 +142,24 @@ parameters:
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
@@ -150,6 +168,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
BlockStorage:
@@ -178,6 +202,12 @@ resources:
- {get_param: BlockStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: BlockStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -615,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [BlockStorage, os_collect_config]}
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index f1320bc1..e7afcb40 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -148,6 +148,24 @@ parameters:
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
@@ -156,6 +174,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
CephStorage:
@@ -184,6 +208,12 @@ resources:
- {get_param: CephStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: CephStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -626,3 +656,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [CephStorage, os_collect_config]}
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index 6b0beab2..5a662e86 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -37,7 +37,7 @@ parameters:
type: string
NeutronPublicInterface:
default: nic1
- description: A port to add to the NeutronPhysicalBridge.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
NodeIndex:
type: number
@@ -160,9 +160,33 @@ parameters:
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
server_not_blacklisted:
not:
equals:
@@ -198,6 +222,12 @@ resources:
- {get_param: NovaComputeServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: NovaComputeSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -652,3 +682,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
value:
{get_resource: NovaCompute}
condition: server_not_blacklisted
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [NovaCompute, os_collect_config]}
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index b5c38287..09e5b2b9 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -58,9 +58,13 @@ parameters:
type: string
constraints:
- custom_constraint: nova.keypair
+ NeutronPhysicalBridge:
+ default: 'br-ex'
+ description: An OVS bridge to create for accessing external networks.
+ type: string
NeutronPublicInterface:
default: nic1
- description: What interface to bridge onto br-ex for network nodes.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
ServiceNetMap:
default: {}
@@ -174,6 +178,24 @@ parameters:
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
parameter_groups:
@@ -188,7 +210,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
-
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
@@ -218,6 +245,12 @@ resources:
- {get_param: ControllerServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ControllerSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -432,7 +465,7 @@ resources:
- {get_param: NetworkDeploymentActions}
- []
input_values:
- bridge_name: br-ex
+ bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
# Resource for site-specific injection of root certificate
@@ -698,3 +731,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
tls_cert_modulus_md5:
description: MD5 checksum of the TLS Certificate Modulus
value: {get_attr: [NodeTLSData, cert_modulus_md5]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [Controller, os_collect_config]}
diff --git a/puppet/deploy-artifacts.sh b/puppet/deploy-artifacts.sh
index 6e7f0ec1..e4d20b49 100644
--- a/puppet/deploy-artifacts.sh
+++ b/puppet/deploy-artifacts.sh
@@ -10,7 +10,9 @@ if [ -n "$artifact_urls" ]; then
for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
curl --globoff -o $TMP_DATA/file_data "$URL"
if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
- yum install -y $TMP_DATA/file_data
+ mv $TMP_DATA/file_data $TMP_DATA/file_data.rpm
+ yum install -y $TMP_DATA/file_data.rpm
+ rm $TMP_DATA/file_data.rpm
elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
pushd /
tar xvzf $TMP_DATA/file_data
@@ -19,7 +21,9 @@ if [ -n "$artifact_urls" ]; then
echo "ERROR: Unsupported file format: $URL"
exit 1
fi
- rm $TMP_DATA/file_data
+ if [ -f $TMP_DATA/file_data ]; then
+ rm $TMP_DATA/file_data
+ fi
done
else
echo "No artifact_urls was set. Skipping..."
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index 85567462..4a1670f8 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -143,6 +143,25 @@ parameters:
type: json
description: Role Specific Parameters
default: {}
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
@@ -150,6 +169,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
@@ -178,6 +203,12 @@ resources:
- {get_param: SwiftStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ObjectStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -614,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [SwiftStorage, os_collect_config]}
diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml
index 3a15cec6..67e1ecfd 100644
--- a/puppet/post.j2.yaml
+++ b/puppet/post.j2.yaml
@@ -8,7 +8,9 @@ parameters:
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
@@ -23,6 +25,7 @@ parameters:
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
+ ctlplane_service_ips:
+ type: json
-resources:
{% include 'puppet-steps.j2' %}
diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2
index 5567d65d..82c6171e 100644
--- a/puppet/puppet-steps.j2
+++ b/puppet/puppet-steps.j2
@@ -1,3 +1,19 @@
+{% set deploy_steps_max = 6 %}
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {% for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {% endfor %}
+{% endfor %}
+
+resources:
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
{% for role in roles %}
@@ -24,17 +40,26 @@
StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
# Step through a series of configuration steps
-{% for step in range(1, 6) %}
+{% for step in range(1, deploy_steps_max) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
depends_on:
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
{% for dep in roles %}
- {{dep.name}}Deployment_Step{{step -1}}
{% endfor %}
- {% endif %}
+ {% endif %}
properties:
name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
@@ -72,3 +97,50 @@
{% endfor %}
+
+# BEGIN service_workflow_tasks handling
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on:
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {% for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {% endfor %}
+
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ always_update: true
+{% endfor %}
+# END service_workflow_tasks handling
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 0a47d50b..b45736c1 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -28,9 +28,13 @@ parameters:
constraints:
- custom_constraint: nova.keypair
{% endif %}
+ NeutronPhysicalBridge:
+ default: 'br-ex'
+ description: An OVS bridge to create for accessing tenant networks.
+ type: string
NeutronPublicInterface:
default: nic1
- description: What interface to bridge onto br-ex for network nodes.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
ServiceNetMap:
default: {}
@@ -164,6 +168,24 @@ parameters:
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
@@ -172,10 +194,16 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
{{role}}:
- type: OS::TripleO::{{role.name}}Server
+ type: OS::TripleO::{{role}}Server
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
@@ -200,6 +228,12 @@ resources:
- {get_param: {{role}}ServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: {{role}}SchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -410,7 +444,7 @@ resources:
server: {get_resource: {{role}}}
actions: {get_param: NetworkDeploymentActions}
input_values:
- bridge_name: br-ex
+ bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
actions:
if:
@@ -649,3 +683,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [{{role}}, os_collect_config]}
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index 7a18ef0c..d55414b7 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -95,6 +95,30 @@ are re-asserted when applying latter ones.
5) Service activation (Pacemaker)
+It is also possible to use Mistral actions or workflows together with
+a deployment step, these are executed before the main configuration run.
+To describe actions or workflows from within a service use:
+
+ * service_workflow_tasks: One or more workflow task properties
+
+which expects a map where the key is the step and the value a list of
+dictionaries descrbing each a workflow task, for example::
+
+ service_workflow_tasks:
+ step2:
+ - name: echo
+ action: std.echo output=Hello
+ step3:
+ - name: external
+ workflow: my-pre-existing-workflow-name
+ input:
+ workflow_param1: value
+ workflow_param2: value
+
+The Heat guide for the `OS::Mistral::Workflow task property
+<https://docs.openstack.org/developer/heat/template_guide/openstack.html#OS::Mistral::Workflow-prop-tasks>`_
+has more details about the expected dictionary.
+
Batch Upgrade Steps
-------------------
diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml
index 619cf131..5bdc3b88 100644
--- a/puppet/services/haproxy.yaml
+++ b/puppet/services/haproxy.yaml
@@ -38,6 +38,10 @@ parameters:
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
+ HAProxyStatsEnabled:
+ default: true
+ description: Whether or not to enable the HAProxy stats interface.
+ type: boolean
RedisPassword:
description: The password for Redis
type: string
@@ -95,6 +99,7 @@ outputs:
tripleo::haproxy::redis_password: {get_param: RedisPassword}
tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
tripleo::haproxy::crl_file: {get_param: InternalTLSCRLPEMFile}
+ tripleo::haproxy::haproxy_stats: {get_param: HAProxyStatsEnabled}
tripleo::profile::base::haproxy::certificates_specs:
map_merge:
- get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml
index 5da6d43e..72a1fce7 100644
--- a/puppet/services/nova-scheduler.yaml
+++ b/puppet/services/nova-scheduler.yaml
@@ -45,6 +45,14 @@ parameters:
default:
tag: openstack.nova.scheduler
path: /var/log/nova/nova-scheduler.log
+ NovaSchedulerDiscoverHostsInCellsInterval:
+ type: number
+ default: -1
+ description: >
+ This value controls how often (in seconds) the scheduler should
+ attempt to discover new hosts that have been added to cells.
+ The default value of -1 disables the periodic task completely.
+ It is recommended to set this parameter for deployments using Ironic.
resources:
NovaBase:
@@ -71,6 +79,7 @@ outputs:
- nova::ram_allocation_ratio: '1.0'
nova::scheduler::filter::scheduler_available_filters: {get_param: NovaSchedulerAvailableFilters}
nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters}
+ nova::scheduler::discover_hosts_in_cells_interval: {get_param: NovaSchedulerDiscoverHostsInCellsInterval}
step_config: |
include tripleo::profile::base::nova::scheduler
upgrade_tasks: