summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--deployed-server/deployed-server.yaml6
-rw-r--r--docker/docker-steps.j286
-rw-r--r--environments/neutron-opendaylight-dpdk.yaml37
-rw-r--r--environments/neutron-ovs-dpdk.yaml37
-rw-r--r--environments/overcloud-baremetal.j2.yaml19
-rw-r--r--environments/overcloud-services.yaml7
-rw-r--r--extraconfig/pre_network/ansible_host_config.yaml2
-rw-r--r--extraconfig/pre_network/host_config_and_reboot.yaml167
-rw-r--r--net-config-bond.yaml13
-rw-r--r--network/config/bond-with-vlans/ceph-storage.yaml10
-rw-r--r--network/config/bond-with-vlans/cinder-storage.yaml10
-rw-r--r--network/config/bond-with-vlans/compute-dpdk.yaml5
-rw-r--r--network/config/bond-with-vlans/compute.yaml10
-rw-r--r--network/config/bond-with-vlans/controller-no-external.yaml10
-rw-r--r--network/config/bond-with-vlans/controller-v6.yaml10
-rw-r--r--network/config/bond-with-vlans/controller.yaml10
-rw-r--r--network/config/bond-with-vlans/swift-storage.yaml10
-rw-r--r--network/ports/ctlplane_vip.yaml2
-rw-r--r--network/ports/net_ip_list_map.yaml14
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml2
-rw-r--r--overcloud.j2.yaml46
-rw-r--r--puppet/blockstorage-role.yaml34
-rw-r--r--puppet/cephstorage-role.yaml34
-rw-r--r--puppet/compute-role.yaml34
-rw-r--r--puppet/controller-role.yaml35
-rw-r--r--puppet/objectstorage-role.yaml35
-rw-r--r--puppet/post.j2.yaml7
-rw-r--r--puppet/puppet-steps.j282
-rw-r--r--puppet/role.role.j2.yaml34
-rw-r--r--puppet/services/README.rst24
-rw-r--r--puppet/services/haproxy.yaml5
-rw-r--r--puppet/services/neutron-ovs-agent.yaml10
-rw-r--r--puppet/services/neutron-ovs-dpdk-agent.yaml51
-rw-r--r--puppet/services/opendaylight-ovs.yaml40
-rw-r--r--puppet/services/openvswitch-upgrade.yaml50
-rw-r--r--puppet/services/openvswitch.yaml178
-rw-r--r--releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml4
-rw-r--r--releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml6
-rw-r--r--releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml6
-rw-r--r--releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml8
-rw-r--r--releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml23
-rw-r--r--releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml8
-rw-r--r--releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml7
-rw-r--r--releasenotes/notes/vipmap-output-4a9ce99930960346.yaml5
-rw-r--r--services.yaml4
45 files changed, 1038 insertions, 199 deletions
diff --git a/deployed-server/deployed-server.yaml b/deployed-server/deployed-server.yaml
index 0847bfbf..16deb7d6 100644
--- a/deployed-server/deployed-server.yaml
+++ b/deployed-server/deployed-server.yaml
@@ -44,6 +44,9 @@ parameters:
Command or script snippet to run on all overcloud nodes to
initialize the upgrade process. E.g. a repository switch.
default: ''
+ deployment_swift_data:
+ type: json
+ default: {}
resources:
deployed-server:
@@ -51,6 +54,7 @@ resources:
properties:
name: {get_param: name}
software_config_transport: {get_param: software_config_transport}
+ deployment_swift_data: {get_param: deployment_swift_data}
UpgradeInitConfig:
type: OS::Heat::SoftwareConfig
@@ -133,3 +137,5 @@ outputs:
- {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
name:
value: {get_attr: [HostsEntryDeployment, hostname]}
+ os_collect_config:
+ value: {get_attr: [deployed-server, os_collect_config]}
diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2
index 3dd963b9..83772028 100644
--- a/docker/docker-steps.j2
+++ b/docker/docker-steps.j2
@@ -21,6 +21,9 @@ parameters:
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
@@ -35,6 +38,21 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ ctlplane_service_ips:
+ type: json
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {% for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {% endfor %}
+{% endfor %}
resources:
@@ -68,6 +86,53 @@ resources:
- name: bootstrap_server_id
config: {get_file: deploy-steps-playbook.yaml}
+{%- for step in range(1, deploy_steps_max) %}
+# BEGIN service_workflow_tasks handling
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on:
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {% for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {% endfor %}
+
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ always_update: true
+# END service_workflow_tasks handling
+{% endfor %}
+
{% for role in roles %}
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
@@ -195,14 +260,23 @@ resources:
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
depends_on:
- {% for dep in roles %}
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
- {{dep.name}}Deployment_Step{{step -1}}
- {% endfor %}
- {% endif %}
+ {% endfor %}
+ {% endif %}
properties:
name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
diff --git a/environments/neutron-opendaylight-dpdk.yaml b/environments/neutron-opendaylight-dpdk.yaml
new file mode 100644
index 00000000..9ee4eb7e
--- /dev/null
+++ b/environments/neutron-opendaylight-dpdk.yaml
@@ -0,0 +1,37 @@
+# A Heat environment that can be used to deploy OpenDaylight with L3 DVR and DPDK
+resource_registry:
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+ OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
+ OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+
+parameter_defaults:
+ NeutronEnableForceMetadata: true
+ NeutronMechanismDrivers: 'opendaylight_v2'
+ NeutronServicePlugins: 'odl-router_v2'
+ NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+ ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+ ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+ ## This can be done using ComputeKernelArgs as shown below.
+ ComputeParameters:
+ #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+ ## due to CPU contention of DPDK PMD threads.
+ OvsEnableDpdk: True
+ ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+ #OvsDpdkSocketMemory: "" # Sets the amount of hugepage memory to assign per NUMA node.
+ # It is recommended to use the socket closest to the PCIe slot used for the
+ # desired DPDK NIC. Format should be comma separated per socket string such as:
+ # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+ #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+ #OvsPmdCoreList: "" # List or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ # location to cores on socket, number of hyper-threaded logical cores, and
+ # desired number of PMD threads can all play a role in configuring this setting.
+ # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+ # If using hyperthreading then specify both logical cores that would equal the
+ # physical core. Also, specifying more than one core will trigger multiple PMD
+ # threads to be spawned, which may improve dataplane performance.
+ #NovaVcpuPinSet: "" # Cores to pin Nova instances to. For maximum performance, select cores
+ # on the same NUMA node(s) selected for previous settings.
diff --git a/environments/neutron-ovs-dpdk.yaml b/environments/neutron-ovs-dpdk.yaml
index 6706bccc..ecfd0fea 100644
--- a/environments/neutron-ovs-dpdk.yaml
+++ b/environments/neutron-ovs-dpdk.yaml
@@ -1,18 +1,31 @@
-## A Heat environment that can be used to deploy DPDK with OVS
+# A Heat environment that can be used to deploy DPDK with OVS
+# Deploying DPDK requires enabling hugepages for the overcloud nodes
resource_registry:
OS::TripleO::Services::ComputeNeutronOvsAgent: ../puppet/services/neutron-ovs-dpdk-agent.yaml
parameter_defaults:
- ## NeutronDpdkCoreList and NeutronDpdkMemoryChannels are REQUIRED settings.
- ## Attempting to deploy DPDK without appropriate values will cause deployment to fail or lead to unstable deployments.
- #NeutronDpdkCoreList: ""
- #NeutronDpdkMemoryChannels: ""
-
NeutronDatapathType: "netdev"
NeutronVhostuserSocketDir: "/var/lib/vhost_sockets"
-
- #NeutronDpdkSocketMemory: ""
- #NeutronDpdkDriverType: "vfio-pci"
- #NovaReservedHostMemory: 4096
- #NovaVcpuPinSet: ""
-
+ NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+ ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+ ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+ ## This can be done using ComputeKernelArgs as shown below.
+ #ComputeParameters:
+ #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+ ## due to CPU contention of DPDK PMD threads.
+ ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+ #OvsDpdkSocketMemory: "" # Sets the amount of hugepage memory to assign per NUMA node.
+ # It is recommended to use the socket closest to the PCIe slot used for the
+ # desired DPDK NIC. Format should be comma separated per socket string such as:
+ # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+ #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+ #OvsPmdCoreList: "" # List or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ # location to cores on socket, number of hyper-threaded logical cores, and
+ # desired number of PMD threads can all play a role in configuring this setting.
+ # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+ # If using hyperthreading then specify both logical cores that would equal the
+ # physical core. Also, specifying more than one core will trigger multiple PMD
+ # threads to be spawned, which may improve dataplane performance.
+ #NovaVcpuPinSet: "" # Cores to pin Nova instances to. For maximum performance, select cores
+ # on the same NUMA node(s) selected for previous settings.
diff --git a/environments/overcloud-baremetal.j2.yaml b/environments/overcloud-baremetal.j2.yaml
new file mode 100644
index 00000000..668e28de
--- /dev/null
+++ b/environments/overcloud-baremetal.j2.yaml
@@ -0,0 +1,19 @@
+resource_registry:
+ OS::TripleO::AllNodes::SoftwareConfig: OS::Heat::None
+ OS::TripleO::PostDeploySteps: OS::Heat::None
+ OS::TripleO::DefaultPasswords: OS::Heat::None
+ OS::TripleO::RandomString: OS::Heat::None
+ OS::TripleO::AllNodesDeployment: OS::Heat::None
+
+parameter_defaults:
+ # Deploy no services
+{% for role in roles %}
+ {{role.name}}Services: []
+{% endfor %}
+
+ # Consistent Hostname format
+ ControllerHostnameFormat: overcloud-controller-%index%
+ ComputeHostnameFormat: overcloud-novacompute-%index%
+ ObjectStorageHostnameFormat: overcloud-objectstorage-%index%
+ CephStorageHostnameFormat: overcloud-cephstorage-%index%
+ BlockStorageHostnameFormat: overcloud-blockstorage-%index%
diff --git a/environments/overcloud-services.yaml b/environments/overcloud-services.yaml
new file mode 100644
index 00000000..c409b899
--- /dev/null
+++ b/environments/overcloud-services.yaml
@@ -0,0 +1,7 @@
+parameter_defaults:
+ # Consistent Hostname format
+ ControllerDeployedServerHostnameFormat: overcloud-controller-%index%
+ ComputeDeployedServerHostnameFormat: overcloud-novacompute-%index%
+ ObjectStorageDeployedServerHostnameFormat: overcloud-objectstorage-%index%
+ CephStorageDeployedServerHostnameFormat: overcloud-cephstorage-%index%
+ BlockStorageDeployedServerHostnameFormat: overcloud-blockstorage-%index%
diff --git a/extraconfig/pre_network/ansible_host_config.yaml b/extraconfig/pre_network/ansible_host_config.yaml
index f4f1a14a..2d862613 100644
--- a/extraconfig/pre_network/ansible_host_config.yaml
+++ b/extraconfig/pre_network/ansible_host_config.yaml
@@ -31,7 +31,7 @@
line: 'isolated_cores={{ _TUNED_CORES_ }}'
when: _TUNED_CORES_|default("") != ""
- - name: Tune-d provile activation
+ - name: Tune-d profile activation
shell: tuned-adm profile {{ _TUNED_PROFILE_NAME_ }}
become: true
when: _TUNED_PROFILE_NAME_|default("") != ""
diff --git a/extraconfig/pre_network/host_config_and_reboot.yaml b/extraconfig/pre_network/host_config_and_reboot.yaml
index 74e716ad..009a0879 100644
--- a/extraconfig/pre_network/host_config_and_reboot.yaml
+++ b/extraconfig/pre_network/host_config_and_reboot.yaml
@@ -11,11 +11,136 @@ parameters:
type: json
description: Role Specific parameters
default: {}
+ ServiceNames:
+ type: comma_delimited_list
+ default: []
+ IsolCpusList:
+ default: "0"
+ description: List of cores to be isolated by tuned
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]+"
+ OvsEnableDpdk:
+ default: false
+ description: Whether or not to configure enable DPDK in OVS
+ type: boolean
+ OvsDpdkCoreList:
+ description: >
+ List of cores to be used for DPDK lcore threads. Note, these threads
+ are used by the OVS control path for validator and handling functions.
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ""
+ OvsDpdkMemoryChannels:
+ description: Number of memory channels per socket to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ""
+ OvsDpdkSocketMemory:
+ default: ""
+ description: >
+ Sets the amount of hugepage memory to assign per NUMA node. It is
+ recommended to use the socket closest to the PCIe slot used for the
+ desired DPDK NIC. The format should be in "<socket 0 mem>, <socket 1
+ mem>, <socket n mem>", where the value is specified in MB. For example:
+ "1024,0".
+ type: string
+ OvsDpdkDriverType:
+ default: "vfio-pci"
+ description: >
+ DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+ this UIO/PMD driver.
+ type: string
+ OvsPmdCoreList:
+ description: >
+ A list or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ location to cores on socket, number of hyper-threaded logical cores, and
+ desired number of PMD threads can all play a role in configuring this
+ setting. These cores should be on the same socket where
+ OvsDpdkSocketMemory is assigned. If using hyperthreading then specify
+ both logical cores that would equal the physical core. Also, specifying
+ more than one core will trigger multiple PMD threads to be spawned which
+ may improve dataplane performance.
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ type: string
+ default: ""
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Queens cycle.
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]+"
+ default: '0'
+ NeutronDpdkCoreList:
+ description: List of cores to be used for DPDK Poll Mode Driver
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkMemoryChannels:
+ description: Number of memory channels to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ''
+ NeutronDpdkSocketMemory:
+ default: ''
+ description: Memory allocated for each socket
+ type: string
+ NeutronDpdkDriverType:
+ default: "vfio-pci"
+ description: DPDK Driver type
+ type: string
conditions:
is_host_config_required: {not: {equals: [{get_param: [RoleParameters, KernelArgs]}, ""]}}
+ # YAQL is enabled in conditions with https://review.openstack.org/#/c/467506/
+ is_dpdk_config_required:
+ or:
+ - yaql:
+ expression: $.data.service_names.contains('neutron_ovs_dpdk_agent')
+ data:
+ service_names: {get_param: ServiceNames}
+ - {get_param: OvsEnableDpdk}
+ - {get_param: [RoleParameters, OvsEnableDpdk]}
+ is_reboot_config_required:
+ or:
+ - is_host_config_required
+ - is_dpdk_config_required
+ l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+ pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+ mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+ socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+ driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+ isol_cpus_empty: {equals: [{get_param: IsolCpusList}, '0']}
resources:
+ RoleParametersValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - map_replace:
+ - IsolCpusList: IsolCpusList
+ OvsDpdkCoreList: OvsDpdkCoreList
+ OvsDpdkMemoryChannels: OvsDpdkMemoryChannels
+ OvsDpdkSocketMemory: OvsDpdkSocketMemory
+ OvsDpdkDriverType: OvsDpdkDriverType
+ OvsPmdCoreList: OvsDpdkCoreList
+ - values: {get_param: [RoleParameters]}
+ - values:
+ IsolCpusList: {if: [isol_cpus_empty, {get_param: HostCpusList}, {get_param: IsolCpusList}]}
+ OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+ OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+ OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+ OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+ OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
HostParametersConfig:
type: OS::Heat::SoftwareConfig
condition: is_host_config_required
@@ -41,11 +166,47 @@ resources:
input_values:
_KERNEL_ARGS_: {get_param: [RoleParameters, KernelArgs]}
_TUNED_PROFILE_NAME_: {get_param: [RoleParameters, TunedProfileName]}
- _TUNED_CORES_: {get_param: [RoleParameters, HostIsolatedCoreList]}
+ _TUNED_CORES_: {get_param: [RoleParameters, IsolCpusList]}
+
+ EnableDpdkConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: is_dpdk_config_required
+ properties:
+ group: script
+ config:
+ str_replace:
+ template: |
+ #!/bin/bash
+ set -x
+ # DO NOT use --detailed-exitcodes
+ puppet apply --logdest console \
+ --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+ -e '
+ class {"vswitch::dpdk":
+ host_core_list => "$HOST_CORES",
+ pmd_core_list => "$PMD_CORES",
+ memory_channels => "$MEMORY_CHANNELS",
+ socket_mem => "$SOCKET_MEMORY",
+ }
+ '
+ params:
+ $HOST_CORES: {get_attr: [RoleParametersValue, value, OvsDpdkCoreList]}
+ $PMD_CORES: {get_attr: [RoleParametersValue, value, OvsPmdCoreList]}
+ $MEMORY_CHANNELS: {get_attr: [RoleParametersValue, value, OvsDpdkMemoryChannels]}
+ $SOCKET_MEMORY: {get_attr: [RoleParametersValue, value, OvsDpdkSocketMemory]}
+
+ EnableDpdkDeployment:
+ type: OS::Heat::SoftwareDeployment
+ condition: is_dpdk_config_required
+ properties:
+ name: EnableDpdkDeployment
+ server: {get_param: server}
+ config: {get_resource: EnableDpdkConfig}
+ actions: ['CREATE'] # Only do this on CREATE
RebootConfig:
type: OS::Heat::SoftwareConfig
- condition: is_host_config_required
+ condition: is_reboot_config_required
properties:
group: script
config: |
@@ -58,7 +219,7 @@ resources:
RebootDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: HostParametersDeployment
- condition: is_host_config_required
+ condition: is_reboot_config_required
properties:
name: RebootDeployment
server: {get_param: server}
diff --git a/net-config-bond.yaml b/net-config-bond.yaml
index f92f9a13..95b47455 100644
--- a/net-config-bond.yaml
+++ b/net-config-bond.yaml
@@ -4,19 +4,14 @@ description: >
parameters:
BondInterfaceOvsOptions:
default: ''
- description: 'The ovs_options string for the bond interface. Set things like
-
- lacp=active and/or bond_mode=balance-slb using this option.
-
- '
+ description: The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
- description: 'The balance-tcp bond mode is known to cause packet loss and
-
+ description: The balance-tcp bond mode is known to cause packet loss and
should not be used in BondInterfaceOvsOptions.
-
- '
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
diff --git a/network/config/bond-with-vlans/ceph-storage.yaml b/network/config/bond-with-vlans/ceph-storage.yaml
index 97177c41..9683456a 100644
--- a/network/config/bond-with-vlans/ceph-storage.yaml
+++ b/network/config/bond-with-vlans/ceph-storage.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/cinder-storage.yaml b/network/config/bond-with-vlans/cinder-storage.yaml
index 5456c2cc..3ad6d653 100644
--- a/network/config/bond-with-vlans/cinder-storage.yaml
+++ b/network/config/bond-with-vlans/cinder-storage.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/compute-dpdk.yaml b/network/config/bond-with-vlans/compute-dpdk.yaml
index 607d346f..095c4973 100644
--- a/network/config/bond-with-vlans/compute-dpdk.yaml
+++ b/network/config/bond-with-vlans/compute-dpdk.yaml
@@ -32,8 +32,9 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
ExternalNetworkVlanID:
default: 10
diff --git a/network/config/bond-with-vlans/compute.yaml b/network/config/bond-with-vlans/compute.yaml
index 448d4e2a..8fff1378 100644
--- a/network/config/bond-with-vlans/compute.yaml
+++ b/network/config/bond-with-vlans/compute.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/controller-no-external.yaml b/network/config/bond-with-vlans/controller-no-external.yaml
index 8ac5cda7..4901f94d 100644
--- a/network/config/bond-with-vlans/controller-no-external.yaml
+++ b/network/config/bond-with-vlans/controller-no-external.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/controller-v6.yaml b/network/config/bond-with-vlans/controller-v6.yaml
index 25796484..33c6fa65 100644
--- a/network/config/bond-with-vlans/controller-v6.yaml
+++ b/network/config/bond-with-vlans/controller-v6.yaml
@@ -34,16 +34,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: bond_mode=active-backup
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/controller.yaml b/network/config/bond-with-vlans/controller.yaml
index e4b30120..100821b7 100644
--- a/network/config/bond-with-vlans/controller.yaml
+++ b/network/config/bond-with-vlans/controller.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: bond_mode=active-backup
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/swift-storage.yaml b/network/config/bond-with-vlans/swift-storage.yaml
index 6371ceb5..0ede081f 100644
--- a/network/config/bond-with-vlans/swift-storage.yaml
+++ b/network/config/bond-with-vlans/swift-storage.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
- description: 'The balance-tcp bond mode is known to cause packet loss and
-
+ description: The balance-tcp bond mode is known to cause packet loss and
should not be used in BondInterfaceOvsOptions.
-
- '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml
index 386520cf..bb54ca62 100644
--- a/network/ports/ctlplane_vip.yaml
+++ b/network/ports/ctlplane_vip.yaml
@@ -34,7 +34,7 @@ parameters:
resources:
VipPort:
- type: OS::Neutron::Port
+ type: OS::TripleO::Network::Ports::ControlPlaneVipPort
properties:
network: {get_param: ControlPlaneNetwork}
name: {get_param: PortName}
diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml
index c3734afe..a9111ed9 100644
--- a/network/ports/net_ip_list_map.yaml
+++ b/network/ports/net_ip_list_map.yaml
@@ -133,6 +133,20 @@ outputs:
SERVICE: {get_attr: [EnabledServicesValue, value]}
- values: {get_param: ServiceNetMap}
- values: {get_attr: [NetIpMapValue, value]}
+ ctlplane_service_ips:
+ description: >
+ Map of enabled services to a list of their ctlplane IP addresses
+ value:
+ yaql:
+ expression: dict($.data.map.items().where(len($[1]) > 0))
+ data:
+ map:
+ map_merge:
+ repeat:
+ template:
+ SERVICE_ctlplane_node_ips: {get_param: ControlPlaneIpList}
+ for_each:
+ SERVICE: {get_attr: [EnabledServicesValue, value]}
service_hostnames:
description: >
Map of enabled services to a list of hostnames where they're running
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index 4aee571e..b1a35293 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -4,10 +4,12 @@ resource_registry:
OS::TripleO::PostDeploySteps: puppet/post.yaml
OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
+ OS::TripleO::AllNodesDeployment: OS::Heat::StructuredDeployments
OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
+ OS::TripleO::RandomString: OS::Heat::RandomString
# Tasks (for internal TripleO usage)
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index e4c04b4e..1848e09a 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -242,15 +242,15 @@ resources:
HOST: {get_param: CloudNameStorageManagement}
HeatAuthEncryptionKey:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
PcsdPassword:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 16
HorizonSecret:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 10
@@ -334,7 +334,7 @@ resources:
servers: {get_attr: [{{role.name}}Servers, value]}
{{role.name}}AllNodesDeployment:
- type: OS::Heat::StructuredDeployments
+ type: OS::TripleO::AllNodesDeployment
depends_on:
{% for role_inner in roles %}
- {{role_inner.name}}HostsDeployment
@@ -575,12 +575,12 @@ resources:
UpdateIdentifier: {get_param: UpdateIdentifier}
MysqlRootPassword:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 10
RabbitCookie:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 20
salt: {get_param: RabbitCookieSalt}
@@ -735,12 +735,34 @@ resources:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{% endfor %}
+ stack_name: {get_param: 'OS::stack_name'}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ ctlplane_service_ips:
+ # Note (shardy) this somewhat complex yaql may be replaced
+ # with a map_deep_merge function in ocata. It merges the
+ # list of maps, but appends to colliding lists when a service
+ # is deployed on more than one role
+ yaql:
+ expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+ data:
+ l:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, ctlplane_service_ips]}
+{% endfor %}
role_data:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
{% endfor %}
+ ServerOsCollectConfigData:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+
outputs:
ManagedEndpoints:
description: Asserts that the keystone endpoints have been provisioned.
@@ -791,3 +813,15 @@ outputs:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
{% endfor %}
+ ServerOsCollectConfigData:
+ description: The os-collect-config configuration associated with each server resource
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+ VipMap:
+ description: Mapping of each network to VIP addresses. Also includes the Redis VIP.
+ value:
+ map_merge:
+ - {get_attr: [VipMap, net_ip_map]}
+ - redis: {get_attr: [RedisVirtualIP, ip_address]}
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index 551a88ca..612a4a01 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -142,6 +142,24 @@ parameters:
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
@@ -150,6 +168,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
BlockStorage:
@@ -178,6 +202,12 @@ resources:
- {get_param: BlockStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: BlockStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -377,6 +407,7 @@ resources:
properties:
server: {get_resource: BlockStorage}
RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -614,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [BlockStorage, os_collect_config]}
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index 4336f3e7..e7afcb40 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -148,6 +148,24 @@ parameters:
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
@@ -156,6 +174,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
CephStorage:
@@ -184,6 +208,12 @@ resources:
- {get_param: CephStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: CephStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -383,6 +413,7 @@ resources:
properties:
server: {get_resource: CephStorage}
RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -625,3 +656,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [CephStorage, os_collect_config]}
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index e2cce5fb..5a662e86 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -160,9 +160,33 @@ parameters:
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
server_not_blacklisted:
not:
equals:
@@ -198,6 +222,12 @@ resources:
- {get_param: NovaComputeServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: NovaComputeSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -386,6 +416,7 @@ resources:
properties:
server: {get_resource: NovaCompute}
RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkConfig:
type: OS::TripleO::Compute::Net::SoftwareConfig
@@ -651,3 +682,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
value:
{get_resource: NovaCompute}
condition: server_not_blacklisted
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [NovaCompute, os_collect_config]}
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index 10cfac79..09e5b2b9 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -178,6 +178,24 @@ parameters:
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
parameter_groups:
@@ -192,7 +210,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
-
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
@@ -222,6 +245,12 @@ resources:
- {get_param: ControllerServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ControllerSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -410,6 +439,7 @@ resources:
properties:
server: {get_resource: Controller}
RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkConfig:
type: OS::TripleO::Controller::Net::SoftwareConfig
@@ -701,3 +731,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
tls_cert_modulus_md5:
description: MD5 checksum of the TLS Certificate Modulus
value: {get_attr: [NodeTLSData, cert_modulus_md5]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [Controller, os_collect_config]}
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index 40a5d441..4a1670f8 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -143,6 +143,25 @@ parameters:
type: json
description: Role Specific Parameters
default: {}
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
@@ -150,6 +169,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
@@ -178,6 +203,12 @@ resources:
- {get_param: SwiftStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ObjectStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -377,6 +408,7 @@ resources:
properties:
server: {get_resource: SwiftStorage}
RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -613,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [SwiftStorage, os_collect_config]}
diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml
index 3a15cec6..67e1ecfd 100644
--- a/puppet/post.j2.yaml
+++ b/puppet/post.j2.yaml
@@ -8,7 +8,9 @@ parameters:
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
@@ -23,6 +25,7 @@ parameters:
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
+ ctlplane_service_ips:
+ type: json
-resources:
{% include 'puppet-steps.j2' %}
diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2
index 5567d65d..82c6171e 100644
--- a/puppet/puppet-steps.j2
+++ b/puppet/puppet-steps.j2
@@ -1,3 +1,19 @@
+{% set deploy_steps_max = 6 %}
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {% for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {% endfor %}
+{% endfor %}
+
+resources:
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
{% for role in roles %}
@@ -24,17 +40,26 @@
StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
# Step through a series of configuration steps
-{% for step in range(1, 6) %}
+{% for step in range(1, deploy_steps_max) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
depends_on:
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
{% for dep in roles %}
- {{dep.name}}Deployment_Step{{step -1}}
{% endfor %}
- {% endif %}
+ {% endif %}
properties:
name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
@@ -72,3 +97,50 @@
{% endfor %}
+
+# BEGIN service_workflow_tasks handling
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on:
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {% for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {% endfor %}
+
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ always_update: true
+{% endfor %}
+# END service_workflow_tasks handling
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 5ab763ba..b45736c1 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -168,6 +168,24 @@ parameters:
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
@@ -176,6 +194,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
{{role}}:
@@ -204,6 +228,12 @@ resources:
- {get_param: {{role}}ServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: {{role}}SchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -403,6 +433,7 @@ resources:
properties:
server: {get_resource: {{role}}}
RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -652,3 +683,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [{{role}}, os_collect_config]}
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index 7a18ef0c..d55414b7 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -95,6 +95,30 @@ are re-asserted when applying latter ones.
5) Service activation (Pacemaker)
+It is also possible to use Mistral actions or workflows together with
+a deployment step, these are executed before the main configuration run.
+To describe actions or workflows from within a service use:
+
+ * service_workflow_tasks: One or more workflow task properties
+
+which expects a map where the key is the step and the value a list of
+dictionaries descrbing each a workflow task, for example::
+
+ service_workflow_tasks:
+ step2:
+ - name: echo
+ action: std.echo output=Hello
+ step3:
+ - name: external
+ workflow: my-pre-existing-workflow-name
+ input:
+ workflow_param1: value
+ workflow_param2: value
+
+The Heat guide for the `OS::Mistral::Workflow task property
+<https://docs.openstack.org/developer/heat/template_guide/openstack.html#OS::Mistral::Workflow-prop-tasks>`_
+has more details about the expected dictionary.
+
Batch Upgrade Steps
-------------------
diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml
index 619cf131..5bdc3b88 100644
--- a/puppet/services/haproxy.yaml
+++ b/puppet/services/haproxy.yaml
@@ -38,6 +38,10 @@ parameters:
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
+ HAProxyStatsEnabled:
+ default: true
+ description: Whether or not to enable the HAProxy stats interface.
+ type: boolean
RedisPassword:
description: The password for Redis
type: string
@@ -95,6 +99,7 @@ outputs:
tripleo::haproxy::redis_password: {get_param: RedisPassword}
tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
tripleo::haproxy::crl_file: {get_param: InternalTLSCRLPEMFile}
+ tripleo::haproxy::haproxy_stats: {get_param: HAProxyStatsEnabled}
tripleo::profile::base::haproxy::certificates_specs:
map_merge:
- get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml
index 76d5c269..4493721c 100644
--- a/puppet/services/neutron-ovs-agent.yaml
+++ b/puppet/services/neutron-ovs-agent.yaml
@@ -92,8 +92,12 @@ resources:
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
@@ -138,7 +142,7 @@ outputs:
expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
data:
ovs_upgrade:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
neutron_ovs_upgrade:
- name: Check if neutron_ovs_agent is deployed
command: systemctl is-enabled neutron-openvswitch-agent
diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml
index 29c10469..da7a4d68 100644
--- a/puppet/services/neutron-ovs-dpdk-agent.yaml
+++ b/puppet/services/neutron-ovs-dpdk-agent.yaml
@@ -26,32 +26,6 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- HostCpusList:
- default: "0"
- description: List of cores to be used for host process
- type: string
- constraints:
- - allowed_pattern: "[0-9,-]+"
- NeutronDpdkCoreList:
- default: ""
- description: List of cores to be used for DPDK Poll Mode Driver
- type: string
- constraints:
- - allowed_pattern: "[0-9,-]*"
- NeutronDpdkMemoryChannels:
- default: ""
- description: Number of memory channels to be used for DPDK
- type: string
- constraints:
- - allowed_pattern: "[0-9]*"
- NeutronDpdkSocketMemory:
- default: ""
- description: Memory allocated for each socket
- type: string
- NeutronDpdkDriverType:
- default: "vfio-pci"
- description: DPDK Driver type
- type: string
# below parameters has to be set in neutron agent only for compute nodes.
# as of now there is no other usecase for these parameters except dpdk.
# should be moved to compute only ovs agent in case of any other usecases.
@@ -75,9 +49,6 @@ resources:
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
-
# Merging role-specific parameters (RoleParameters) with the default parameters.
# RoleParameters will have the precedence over the default parameters.
RoleParametersValue:
@@ -89,20 +60,19 @@ resources:
- map_replace:
- neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
- vswitch::dpdk::driver_type: NeutronDpdkDriverType
- vswitch::dpdk::host_core_list: HostCpusList
- vswitch::dpdk::pmd_core_list: NeutronDpdkCoreList
- vswitch::dpdk::memory_channels: NeutronDpdkMemoryChannels
- vswitch::dpdk::socket_mem: NeutronDpdkSocketMemory
- values: {get_param: [RoleParameters]}
- values:
NeutronDatapathType: {get_param: NeutronDatapathType}
NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
- NeutronDpdkDriverType: {get_param: NeutronDpdkDriverType}
- HostCpusList: {get_param: HostCpusList}
- NeutronDpdkCoreList: {get_param: NeutronDpdkCoreList}
- NeutronDpdkMemoryChannels: {get_param: NeutronDpdkMemoryChannels}
- NeutronDpdkSocketMemory: {get_param: NeutronDpdkSocketMemory}
+
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
@@ -116,7 +86,8 @@ outputs:
- keys:
tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
- neutron::agents::ml2::ovs::enable_dpdk: true
+ - get_attr: [Ovs, role_data, config_settings]
- get_attr: [RoleParametersValue, value]
step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
upgrade_tasks:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index 0d859be1..1a8754a5 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -57,8 +57,14 @@ parameters:
type: json
resources:
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
@@ -66,19 +72,21 @@ outputs:
value:
service_name: opendaylight_ovs
config_settings:
- opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
- opendaylight::username: {get_param: OpenDaylightUsername}
- opendaylight::password: {get_param: OpenDaylightPassword}
- opendaylight_check_url: {get_param: OpenDaylightCheckURL}
- opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
- neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
- tripleo.opendaylight_ovs.firewall_rules:
- '118 neutron vxlan networks':
- proto: 'udp'
- dport: 4789
- '136 neutron gre networks':
- proto: 'gre'
+ map_merge:
+ - opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
+ opendaylight::username: {get_param: OpenDaylightUsername}
+ opendaylight::password: {get_param: OpenDaylightPassword}
+ opendaylight_check_url: {get_param: OpenDaylightCheckURL}
+ opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
+ neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+ neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
+ tripleo.opendaylight_ovs.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '136 neutron gre networks':
+ proto: 'gre'
+ - get_attr: [Ovs, role_data, config_settings]
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
upgrade_tasks:
@@ -86,7 +94,7 @@ outputs:
expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
data:
ovs_upgrade:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
opendaylight_upgrade:
- name: Check if openvswitch is deployed
command: systemctl is-enabled openvswitch
diff --git a/puppet/services/openvswitch-upgrade.yaml b/puppet/services/openvswitch-upgrade.yaml
deleted file mode 100644
index f6e78462..00000000
--- a/puppet/services/openvswitch-upgrade.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-heat_template_version: pike
-
-description: >
- Openvswitch package special handling for upgrade.
-
-outputs:
- role_data:
- description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
- value:
- service_name: openvswitch_upgrade
- upgrade_tasks:
- - name: Check openvswitch version.
- tags: step2
- register: ovs_version
- ignore_errors: true
- shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
- - name: Check openvswitch packaging.
- tags: step2
- shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
- register: ovs_packaging_issue
- ignore_errors: true
- - block:
- - name: "Ensure empty directory: emptying."
- file:
- state: absent
- path: /root/OVS_UPGRADE
- - name: "Ensure empty directory: creating."
- file:
- state: directory
- path: /root/OVS_UPGRADE
- owner: root
- group: root
- mode: 0750
- - name: Download OVS packages.
- command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
- - name: Get rpm list for manual upgrade of OVS.
- shell: ls -1 /root/OVS_UPGRADE/*.rpm
- register: ovs_list_of_rpms
- - name: Manual upgrade of OVS
- shell: |
- rpm -U --test {{item}} 2>&1 | grep "already installed" || \
- rpm -U --replacepkgs --notriggerun --nopostun {{item}};
- args:
- chdir: /root/OVS_UPGRADE
- with_items:
- - "{{ovs_list_of_rpms.stdout_lines}}"
- tags: step2
- when: "'2.5.0-14' in '{{ovs_version.stdout}}'
- or
- ovs_packaging_issue|succeeded"
diff --git a/puppet/services/openvswitch.yaml b/puppet/services/openvswitch.yaml
new file mode 100644
index 00000000..36aa5db7
--- /dev/null
+++ b/puppet/services/openvswitch.yaml
@@ -0,0 +1,178 @@
+heat_template_version: pike
+
+description: >
+ Open vSwitch Configuration
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OvsDpdkCoreList:
+ description: >
+ List of cores to be used for DPDK lcore threads. Note, these threads
+ are used by the OVS control path for validator and handling functions.
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ""
+ OvsDpdkMemoryChannels:
+ description: Number of memory channels per socket to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ""
+ OvsDpdkSocketMemory:
+ default: ""
+ description: >
+ Sets the amount of hugepage memory to assign per NUMA node. It is
+ recommended to use the socket closest to the PCIe slot used for the
+ desired DPDK NIC. The format should be in "<socket 0 mem>, <socket 1
+ mem>, <socket n mem>", where the value is specified in MB. For example:
+ "1024,0".
+ type: string
+ OvsDpdkDriverType:
+ default: "vfio-pci"
+ description: >
+ DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+ this UIO/PMD driver.
+ type: string
+ OvsPmdCoreList:
+ description: >
+ A list or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ location to cores on socket, number of hyper-threaded logical cores, and
+ desired number of PMD threads can all play a role in configuring this
+ setting. These cores should be on the same socket where
+ OvsDpdkSocketMemory is assigned. If using hyperthreading then specify
+ both logical cores that would equal the physical core. Also, specifying
+ more than one core will trigger multiple PMD threads to be spawned which
+ may improve dataplane performance.
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ type: string
+ default: ""
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Queens cycle.
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkCoreList:
+ description: List of cores to be used for DPDK Poll Mode Driver
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkMemoryChannels:
+ description: Number of memory channels to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ''
+ NeutronDpdkSocketMemory:
+ default: ''
+ description: Memory allocated for each socket
+ type: string
+ NeutronDpdkDriverType:
+ default: "vfio-pci"
+ description: DPDK Driver type
+ type: string
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - HostCpusList
+ - NeutronDpdkCoreList
+ - NeutronDpdkMemoryChannels
+ - NeutronDpdkSocketMemory
+ - NeutronDpdkDriverType
+
+conditions:
+ l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+ pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+ mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+ socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+ driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+
+outputs:
+ role_data:
+ description: Role data for the Open vSwitch service.
+ value:
+ service_name: openvswitch
+ config_settings:
+ map_replace:
+ - map_replace:
+ - vswitch::dpdk::driver_type: OvsDpdkDriverType
+ vswitch::dpdk::host_core_list: OvsDpdkCoreList
+ vswitch::dpdk::pmd_core_list: OvsPmdCoreList
+ vswitch::dpdk::memory_channels: OvsDpdkMemoryChannels
+ vswitch::dpdk::socket_mem: OvsDpdkSocketMemory
+ - values: {get_param: [RoleParameters]}
+ - values:
+ OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+ OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+ OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+ OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+ OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
+ upgrade_tasks:
+ - name: Check openvswitch version.
+ tags: step2
+ register: ovs_version
+ ignore_errors: true
+ shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+ - name: Check openvswitch packaging.
+ tags: step2
+ shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+ register: ovs_packaging_issue
+ ignore_errors: true
+ - block:
+ - name: "Ensure empty directory: emptying."
+ file:
+ state: absent
+ path: /root/OVS_UPGRADE
+ - name: "Ensure empty directory: creating."
+ file:
+ state: directory
+ path: /root/OVS_UPGRADE
+ owner: root
+ group: root
+ mode: 0750
+ - name: Download OVS packages.
+ command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+ - name: Get rpm list for manual upgrade of OVS.
+ shell: ls -1 /root/OVS_UPGRADE/*.rpm
+ register: ovs_list_of_rpms
+ - name: Manual upgrade of OVS
+ shell: |
+ rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+ rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+ args:
+ chdir: /root/OVS_UPGRADE
+ with_items:
+ - "{{ovs_list_of_rpms.stdout_lines}}"
+ tags: step2
+ when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+ or
+ ovs_packaging_issue|succeeded"
diff --git a/releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml b/releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml
new file mode 100644
index 00000000..193154d0
--- /dev/null
+++ b/releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - The HAProxy stats interface can now be enabled/disabled with the
+ HAProxyStatsEnabled flag. Note that it's still enabled by default.
diff --git a/releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml b/releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml
new file mode 100644
index 00000000..67a55cd8
--- /dev/null
+++ b/releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Added new DeploymentSwiftDataMap parameter, which is used to set the
+ deployment_swift_data property on the Server resoures. The parameter is a
+ map where the keys are the Heat assigned hostnames, and the value is a map
+ of the container/object name in Swift.
diff --git a/releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml b/releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml
new file mode 100644
index 00000000..cd352ac1
--- /dev/null
+++ b/releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Adds a new output, ServerOsCollectConfigData, which is the
+ os-collect-config configuration associated with each server resource.
+ This can be used to [pre]configure the os-collect-config agents on
+ deployed-server's.
diff --git a/releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml b/releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml
new file mode 100644
index 00000000..4cb9b801
--- /dev/null
+++ b/releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - DPDK is enabled in OvS before the NetworkDeployment to ensure DPDK
+ is ready to handle new port additions.
+upgrade:
+ - A new parameter ServiceNames is added to the PreNeworkConfig resource.
+ All templates associated with PreNeworkConfig should add this new
+ parameter during the upgrade.
diff --git a/releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml b/releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml
new file mode 100644
index 00000000..1e44d926
--- /dev/null
+++ b/releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - Adds common openvswitch service template to be
+ inherited by other services.
+ - Adds environment file to be used for deploying
+ OpenDaylight + OVS DPDK.
+ - Adds first boot and ovs configuration scripts
+deprecations:
+ - The ``HostCpusList`` parameter is deprecated in
+ favor of ``OvsDpdkCoreList`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkCoreList`` parameter is deprecated in
+ favor of ``OvsPmdCoreList`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkMemoryChannels`` parameter is deprecated in
+ favor of ``OvsDpdkMemoryChannels`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkSocketMemory`` parameter is deprecated in
+ favor of ``OvsDpdkSocketMemory`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkDriverType`` parameter is deprecated in
+ favor of ``OvsDpdkDriverType`` and will be removed
+ in a future release.
diff --git a/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml b/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml
new file mode 100644
index 00000000..cf99ec5d
--- /dev/null
+++ b/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ It is now possible to trigger Mistral workflows or workflow actions
+ before a deployment step is applied. This can be defined within the
+ scope of a service template and is described as a task property
+ for the Heat OS::Mistral::Workflow resource, for more details also
+ see the puppet/services/README.rst file. \ No newline at end of file
diff --git a/releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml b/releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml
new file mode 100644
index 00000000..1bc99371
--- /dev/null
+++ b/releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - Add 2 new example environments to facilitate deploying split-stack,
+ environments/overcloud-baremetal.j2.yaml and
+ environments/overcloud-services.yaml. The environments are used to deploy two
+ separate Heat stacks, one for just the baremetal+network configuration and one
+ for the service configuration.
diff --git a/releasenotes/notes/vipmap-output-4a9ce99930960346.yaml b/releasenotes/notes/vipmap-output-4a9ce99930960346.yaml
new file mode 100644
index 00000000..1f49bacd
--- /dev/null
+++ b/releasenotes/notes/vipmap-output-4a9ce99930960346.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Add VipMap output to the top level stack output. VipMap is a mapping from
+ each network to the VIP address on that network. Also includes the Redis
+ VIP.
diff --git a/services.yaml b/services.yaml
index 724727bb..4d3ca8d1 100644
--- a/services.yaml
+++ b/services.yaml
@@ -116,6 +116,10 @@ outputs:
yaql:
expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
data: {role_data: {get_attr: [ServiceChain, role_data]}}
+ service_workflow_tasks:
+ yaql:
+ expression: $.data.role_data.where($ != null).select($.get('service_workflow_tasks')).where($ != null).reduce($1.mergeWith($2), {})
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
step_config: {get_attr: [ServiceChain, role_data, step_config]}
upgrade_tasks:
yaql: