summaryrefslogtreecommitdiffstats
path: root/puppet
diff options
context:
space:
mode:
Diffstat (limited to 'puppet')
-rw-r--r--puppet/all-nodes-config.yaml18
-rw-r--r--puppet/blockstorage-role.yaml69
-rw-r--r--puppet/cephstorage-role.yaml69
-rw-r--r--puppet/compute-role.yaml71
-rw-r--r--puppet/controller-role.yaml78
-rw-r--r--puppet/deploy-artifacts.sh10
-rw-r--r--puppet/major_upgrade_steps.j2.yaml9
-rw-r--r--puppet/objectstorage-role.yaml71
-rw-r--r--puppet/post.j2.yaml7
-rw-r--r--puppet/puppet-steps.j2105
-rw-r--r--puppet/role.role.j2.yaml78
-rw-r--r--puppet/services/README.rst24
-rw-r--r--puppet/services/certmonger-user.yaml17
-rw-r--r--puppet/services/cinder-backend-dellsc.yaml4
-rw-r--r--puppet/services/cinder-backend-netapp.yaml8
-rw-r--r--puppet/services/cinder-volume.yaml16
-rw-r--r--puppet/services/database/mysql.yaml13
-rw-r--r--puppet/services/database/redis.yaml20
-rw-r--r--puppet/services/disabled/ceilometer-expirer-disabled.yaml20
-rw-r--r--puppet/services/gnocchi-base.yaml10
-rw-r--r--puppet/services/haproxy.yaml11
-rw-r--r--puppet/services/horizon.yaml3
-rw-r--r--puppet/services/ironic-api.yaml31
-rw-r--r--puppet/services/ironic-conductor.yaml6
-rw-r--r--puppet/services/ironic-inspector.yaml151
-rw-r--r--puppet/services/keystone.yaml41
-rw-r--r--puppet/services/neutron-ovs-agent.yaml10
-rw-r--r--puppet/services/neutron-ovs-dpdk-agent.yaml51
-rw-r--r--puppet/services/neutron-sriov-agent.yaml22
-rw-r--r--puppet/services/nova-api.yaml220
-rw-r--r--puppet/services/nova-compute.yaml26
-rw-r--r--puppet/services/nova-scheduler.yaml9
-rw-r--r--puppet/services/opendaylight-ovs.yaml40
-rw-r--r--puppet/services/openvswitch-upgrade.yaml50
-rw-r--r--puppet/services/openvswitch.yaml178
-rw-r--r--puppet/services/ovn-dbs.yaml1
-rw-r--r--puppet/services/pacemaker/database/mysql.yaml7
-rw-r--r--puppet/services/pacemaker/ovn-dbs.yaml61
-rw-r--r--puppet/services/pacemaker_remote.yaml38
-rw-r--r--puppet/services/panko-api.yaml4
-rw-r--r--puppet/services/services.yaml129
-rw-r--r--puppet/services/swift-proxy.yaml29
42 files changed, 1357 insertions, 478 deletions
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index baafe03d..b1284452 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -12,10 +12,8 @@ parameters:
type: string
cloud_name_ctlplane:
type: string
- # FIXME(shardy) this can be comma_delimited_list when
- # https://bugs.launchpad.net/heat/+bug/1617019 is fixed
enabled_services:
- type: string
+ type: comma_delimited_list
controller_ips:
type: comma_delimited_list
logging_groups:
@@ -118,7 +116,10 @@ resources:
map_merge:
- tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
- tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
- - enabled_services: {get_param: enabled_services}
+ - enabled_services:
+ yaql:
+ expression: $.data.distinct()
+ data: {get_param: enabled_services}
# This writes out a mapping of service_name_enabled: 'true'
# For any services not enabled, hiera foo_enabled will
# return nil, as it's undefined
@@ -129,8 +130,7 @@ resources:
# https://bugs.launchpad.net/heat/+bug/1617203
SERVICE_enabled: 'true'
for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
+ SERVICE: {get_param: enabled_services}
# Dynamically generate per-service network data
# This works as follows (outer->inner functions)
# yaql - filters services where no mapping exists in ServiceNetMap
@@ -150,8 +150,7 @@ resources:
template:
SERVICE_network: SERVICE_network
for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
+ SERVICE: {get_param: enabled_services}
- values: {get_param: ServiceNetMap}
# Keystone doesn't provide separate entries for the public
# and admin endpoints, so we need to add them here manually
@@ -203,8 +202,7 @@ resources:
template:
SERVICE_vip: SERVICE_network
for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
+ SERVICE: {get_param: enabled_services}
- values: {get_param: ServiceNetMap}
- values: {get_param: NetVipMap}
- keystone_admin_api_vip:
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index 7b6fbb71..612a4a01 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -69,8 +69,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
BlockStorageServerMetadata:
default: {}
description: >
@@ -139,6 +139,28 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
@@ -146,6 +168,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
BlockStorage:
@@ -174,6 +202,12 @@ resources:
- {get_param: BlockStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: BlockStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -372,16 +406,21 @@ resources:
type: OS::TripleO::BlockStorage::PreNetworkConfig
properties:
server: {get_resource: BlockStorage}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: PreNetworkConfig
- condition: server_not_blacklisted
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: BlockStorage}
- actions: {get_param: NetworkDeploymentActions}
+ actions:
+ if:
+ - server_not_blacklisted
+ - {get_param: NetworkDeploymentActions}
+ - []
BlockStorageUpgradeInitConfig:
type: OS::Heat::SoftwareConfig
@@ -400,22 +439,30 @@ resources:
BlockStorageUpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
- condition: server_not_blacklisted
properties:
name: BlockStorageUpgradeInitDeployment
server: {get_resource: BlockStorage}
config: {get_resource: BlockStorageUpgradeInitConfig}
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
BlockStorageDeployment:
type: OS::Heat::StructuredDeployment
depends_on: BlockStorageUpgradeInitDeployment
- condition: server_not_blacklisted
properties:
name: BlockStorageDeployment
server: {get_resource: BlockStorage}
config: {get_resource: BlockStorageConfig}
input_values:
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
# Map heat metadata into hiera datafiles
BlockStorageConfig:
@@ -455,6 +502,7 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -477,7 +525,6 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
- condition: server_not_blacklisted
properties:
name: UpdateDeployment
config: {get_resource: UpdateConfig}
@@ -485,6 +532,11 @@ resources:
input_values:
update_identifier:
get_param: UpdateIdentifier
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
SshHostPubKey:
type: OS::TripleO::Ssh::HostPubKey
@@ -593,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [BlockStorage, os_collect_config]}
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index 8047e3dc..e7afcb40 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -75,8 +75,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
CephStorageServerMetadata:
default: {}
description: >
@@ -145,6 +145,28 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
@@ -152,6 +174,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
CephStorage:
@@ -180,6 +208,12 @@ resources:
- {get_param: CephStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: CephStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -378,16 +412,21 @@ resources:
type: OS::TripleO::CephStorage::PreNetworkConfig
properties:
server: {get_resource: CephStorage}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: PreNetworkConfig
- condition: server_not_blacklisted
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: CephStorage}
- actions: {get_param: NetworkDeploymentActions}
+ actions:
+ if:
+ - server_not_blacklisted
+ - {get_param: NetworkDeploymentActions}
+ - []
CephStorageUpgradeInitConfig:
type: OS::Heat::SoftwareConfig
@@ -406,22 +445,30 @@ resources:
CephStorageUpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
- condition: server_not_blacklisted
properties:
name: CephStorageUpgradeInitDeployment
server: {get_resource: CephStorage}
config: {get_resource: CephStorageUpgradeInitConfig}
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
CephStorageDeployment:
type: OS::Heat::StructuredDeployment
depends_on: CephStorageUpgradeInitDeployment
- condition: server_not_blacklisted
properties:
name: CephStorageDeployment
config: {get_resource: CephStorageConfig}
server: {get_resource: CephStorage}
input_values:
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
CephStorageConfig:
type: OS::Heat::StructuredConfig
@@ -460,6 +507,7 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -489,13 +537,17 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
- condition: server_not_blacklisted
properties:
config: {get_resource: UpdateConfig}
server: {get_resource: CephStorage}
input_values:
update_identifier:
get_param: UpdateIdentifier
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
SshHostPubKey:
type: OS::TripleO::Ssh::HostPubKey
@@ -604,3 +656,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [CephStorage, os_collect_config]}
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index e453508a..5a662e86 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -37,7 +37,7 @@ parameters:
type: string
NeutronPublicInterface:
default: nic1
- description: A port to add to the NeutronPhysicalBridge.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
NodeIndex:
type: number
@@ -90,8 +90,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
NovaComputeServerMetadata:
default: {}
description: >
@@ -157,8 +157,36 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
server_not_blacklisted:
not:
equals:
@@ -194,6 +222,12 @@ resources:
- {get_param: NovaComputeServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: NovaComputeSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -381,6 +415,8 @@ resources:
type: OS::TripleO::Compute::PreNetworkConfig
properties:
server: {get_resource: NovaCompute}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkConfig:
type: OS::TripleO::Compute::Net::SoftwareConfig
@@ -396,12 +432,15 @@ resources:
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: PreNetworkConfig
- condition: server_not_blacklisted
properties:
name: NetworkDeployment
+ actions:
+ if:
+ - server_not_blacklisted
+ - {get_param: NetworkDeploymentActions}
+ - []
config: {get_resource: NetworkConfig}
server: {get_resource: NovaCompute}
- actions: {get_param: NetworkDeploymentActions}
input_values:
bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
@@ -423,9 +462,13 @@ resources:
NovaComputeUpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
- condition: server_not_blacklisted
properties:
name: NovaComputeUpgradeInitDeployment
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
server: {get_resource: NovaCompute}
config: {get_resource: NovaComputeUpgradeInitConfig}
@@ -472,13 +515,18 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
NovaComputeDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: NovaComputeUpgradeInitDeployment
- condition: server_not_blacklisted
properties:
name: NovaComputeDeployment
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
config: {get_resource: NovaComputeConfig}
server: {get_resource: NovaCompute}
input_values:
@@ -512,9 +560,13 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
- condition: server_not_blacklisted
properties:
name: UpdateDeployment
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
config: {get_resource: UpdateConfig}
server: {get_resource: NovaCompute}
input_values:
@@ -630,3 +682,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
value:
{get_resource: NovaCompute}
condition: server_not_blacklisted
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [NovaCompute, os_collect_config]}
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index 4c0a70f6..09e5b2b9 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -58,9 +58,13 @@ parameters:
type: string
constraints:
- custom_constraint: nova.keypair
+ NeutronPhysicalBridge:
+ default: 'br-ex'
+ description: An OVS bridge to create for accessing external networks.
+ type: string
NeutronPublicInterface:
default: nic1
- description: What interface to bridge onto br-ex for network nodes.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
ServiceNetMap:
default: {}
@@ -104,8 +108,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
ControllerServerMetadata:
default: {}
description: >
@@ -171,6 +175,28 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
parameter_groups:
- label: deprecated
@@ -184,7 +210,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
-
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
@@ -214,6 +245,12 @@ resources:
- {get_param: ControllerServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ControllerSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -401,6 +438,8 @@ resources:
type: OS::TripleO::Controller::PreNetworkConfig
properties:
server: {get_resource: Controller}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkConfig:
type: OS::TripleO::Controller::Net::SoftwareConfig
@@ -415,15 +454,18 @@ resources:
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
- condition: server_not_blacklisted
depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: Controller}
- actions: {get_param: NetworkDeploymentActions}
+ actions:
+ if:
+ - server_not_blacklisted
+ - {get_param: NetworkDeploymentActions}
+ - []
input_values:
- bridge_name: br-ex
+ bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
# Resource for site-specific injection of root certificate
@@ -457,19 +499,27 @@ resources:
# but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
ControllerUpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
- condition: server_not_blacklisted
depends_on: NetworkDeployment
properties:
name: ControllerUpgradeInitDeployment
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
server: {get_resource: Controller}
config: {get_resource: ControllerUpgradeInitConfig}
ControllerDeployment:
type: OS::TripleO::SoftwareDeployment
- condition: server_not_blacklisted
depends_on: ControllerUpgradeInitDeployment
properties:
name: ControllerDeployment
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
config: {get_resource: ControllerConfig}
server: {get_resource: Controller}
input_values:
@@ -530,6 +580,7 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Hook for site-specific additional pre-deployment config, e.g extra hieradata
ControllerExtraConfigPre:
@@ -551,10 +602,14 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
- condition: server_not_blacklisted
depends_on: NetworkDeployment
properties:
name: UpdateDeployment
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
config: {get_resource: UpdateConfig}
server: {get_resource: Controller}
input_values:
@@ -676,3 +731,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
tls_cert_modulus_md5:
description: MD5 checksum of the TLS Certificate Modulus
value: {get_attr: [NodeTLSData, cert_modulus_md5]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [Controller, os_collect_config]}
diff --git a/puppet/deploy-artifacts.sh b/puppet/deploy-artifacts.sh
index 4e1ad89f..e4d20b49 100644
--- a/puppet/deploy-artifacts.sh
+++ b/puppet/deploy-artifacts.sh
@@ -10,16 +10,20 @@ if [ -n "$artifact_urls" ]; then
for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
curl --globoff -o $TMP_DATA/file_data "$URL"
if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
- yum install -y $TMP_DATA/file_data
+ mv $TMP_DATA/file_data $TMP_DATA/file_data.rpm
+ yum install -y $TMP_DATA/file_data.rpm
+ rm $TMP_DATA/file_data.rpm
elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
pushd /
tar xvzf $TMP_DATA/file_data
popd
else
- echo "ERROR: Unsupported file format."
+ echo "ERROR: Unsupported file format: $URL"
exit 1
fi
- rm $TMP_DATA/file_data
+ if [ -f $TMP_DATA/file_data ]; then
+ rm $TMP_DATA/file_data
+ fi
done
else
echo "No artifact_urls was set. Skipping..."
diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml
index b44095bd..574c41b0 100644
--- a/puppet/major_upgrade_steps.j2.yaml
+++ b/puppet/major_upgrade_steps.j2.yaml
@@ -8,11 +8,14 @@ description: 'Upgrade steps for all roles'
parameters:
servers:
type: json
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
-
+ ctlplane_service_ips:
+ type: json
UpdateIdentifier:
type: string
description: >
@@ -206,7 +209,9 @@ resources:
{%- endfor %}
properties:
servers: {get_param: servers}
+ stack_name: {get_param: stack_name}
role_data: {get_param: role_data}
+ ctlplane_service_ips: {get_param: ctlplane_service_ips}
outputs:
# Output the config for each role, just use Step1 as the config should be
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index 5ab6669f..4a1670f8 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -69,8 +69,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
SwiftStorageServerMetadata:
default: {}
description: >
@@ -139,6 +139,29 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ default: {}
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
@@ -146,6 +169,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
@@ -174,6 +203,12 @@ resources:
- {get_param: SwiftStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ObjectStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -372,16 +407,22 @@ resources:
type: OS::TripleO::ObjectStorage::PreNetworkConfig
properties:
server: {get_resource: SwiftStorage}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: PreNetworkConfig
- condition: server_not_blacklisted
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: SwiftStorage}
- actions: {get_param: NetworkDeploymentActions}
+ actions:
+ if:
+ - server_not_blacklisted
+ - {get_param: NetworkDeploymentActions}
+ - []
+
SwiftStorageUpgradeInitConfig:
type: OS::Heat::SoftwareConfig
@@ -400,11 +441,15 @@ resources:
SwiftStorageUpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
- condition: server_not_blacklisted
properties:
name: SwiftStorageUpgradeInitDeployment
server: {get_resource: SwiftStorage}
config: {get_resource: SwiftStorageUpgradeInitConfig}
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
SwiftStorageHieraConfig:
type: OS::Heat::StructuredConfig
@@ -443,17 +488,22 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
SwiftStorageHieraDeploy:
type: OS::Heat::StructuredDeployment
depends_on: SwiftStorageUpgradeInitDeployment
- condition: server_not_blacklisted
properties:
name: SwiftStorageHieraDeploy
server: {get_resource: SwiftStorage}
config: {get_resource: SwiftStorageHieraConfig}
input_values:
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -476,13 +526,17 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
- condition: server_not_blacklisted
properties:
config: {get_resource: UpdateConfig}
server: {get_resource: SwiftStorage}
input_values:
update_identifier:
get_param: UpdateIdentifier
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
SshHostPubKey:
type: OS::TripleO::Ssh::HostPubKey
@@ -591,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [SwiftStorage, os_collect_config]}
diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml
index 3a15cec6..67e1ecfd 100644
--- a/puppet/post.j2.yaml
+++ b/puppet/post.j2.yaml
@@ -8,7 +8,9 @@ parameters:
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
@@ -23,6 +25,7 @@ parameters:
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
+ ctlplane_service_ips:
+ type: json
-resources:
{% include 'puppet-steps.j2' %}
diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2
index 360c633a..82c6171e 100644
--- a/puppet/puppet-steps.j2
+++ b/puppet/puppet-steps.j2
@@ -1,3 +1,19 @@
+{% set deploy_steps_max = 6 %}
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {% for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {% endfor %}
+{% endfor %}
+
+resources:
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
{% for role in roles %}
@@ -24,17 +40,26 @@
StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
# Step through a series of configuration steps
-{% for step in range(1, 6) %}
+{% for step in range(1, deploy_steps_max) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
depends_on:
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
{% for dep in roles %}
- {{dep.name}}Deployment_Step{{step -1}}
{% endfor %}
- {% endif %}
+ {% endif %}
properties:
name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
@@ -44,26 +69,78 @@
update_identifier: {get_param: DeployIdentifier}
{% endfor %}
+ # Note, this should be the last step to execute configuration changes.
+ # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+ # after all the previous deployment steps.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ {% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+
+ # The {{role.name}}PostConfig steps are in charge of
+ # quiescing all services, i.e. in the Controller case,
+ # we should run a full service reload.
{{role.name}}PostConfig:
type: OS::TripleO::Tasks::{{role.name}}PostConfig
depends_on:
{% for dep in roles %}
- - {{dep.name}}Deployment_Step5
+ - {{dep.name}}ExtraConfigPost
{% endfor %}
properties:
servers: {get_param: servers}
input_values:
update_identifier: {get_param: DeployIdentifier}
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- {{role.name}}ExtraConfigPost:
+
+{% endfor %}
+
+# BEGIN service_workflow_tasks handling
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
depends_on:
- {% for dep in roles %}
- - {{dep.name}}PostConfig
- {% endfor %}
- type: OS::TripleO::NodeExtraConfigPost
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
properties:
- servers: {get_param: [servers, {{role.name}}]}
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {% for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {% endfor %}
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ always_update: true
{% endfor %}
+# END service_workflow_tasks handling
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 570efb3a..b45736c1 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -28,9 +28,13 @@ parameters:
constraints:
- custom_constraint: nova.keypair
{% endif %}
+ NeutronPhysicalBridge:
+ default: 'br-ex'
+ description: An OVS bridge to create for accessing tenant networks.
+ type: string
NeutronPublicInterface:
default: nic1
- description: What interface to bridge onto br-ex for network nodes.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
ServiceNetMap:
default: {}
@@ -85,8 +89,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
{{role}}ServerMetadata:
default: {}
description: >
@@ -161,6 +165,28 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
@@ -168,10 +194,16 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
{{role}}:
- type: OS::TripleO::{{role.name}}Server
+ type: OS::TripleO::{{role}}Server
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
@@ -196,6 +228,12 @@ resources:
- {get_param: {{role}}ServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: {{role}}SchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -394,19 +432,25 @@ resources:
type: OS::TripleO::{{role}}::PreNetworkConfig
properties:
server: {get_resource: {{role}}}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: PreNetworkConfig
- condition: server_not_blacklisted
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: {{role}}}
actions: {get_param: NetworkDeploymentActions}
input_values:
- bridge_name: br-ex
+ bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
+ actions:
+ if:
+ - server_not_blacklisted
+ - {get_param: NetworkDeploymentActions}
+ - []
{{role}}UpgradeInitConfig:
type: OS::Heat::SoftwareConfig
@@ -425,22 +469,30 @@ resources:
{{role}}UpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
- condition: server_not_blacklisted
properties:
name: {{role}}UpgradeInitDeployment
server: {get_resource: {{role}}}
config: {get_resource: {{role}}UpgradeInitConfig}
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
{{role}}Deployment:
type: OS::Heat::StructuredDeployment
depends_on: {{role}}UpgradeInitDeployment
- condition: server_not_blacklisted
properties:
name: {{role}}Deployment
config: {get_resource: {{role}}Config}
server: {get_resource: {{role}}}
input_values:
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
{{role}}Config:
type: OS::Heat::StructuredConfig
@@ -481,6 +533,7 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -510,7 +563,6 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
- condition: server_not_blacklisted
properties:
name: UpdateDeployment
config: {get_resource: UpdateConfig}
@@ -518,6 +570,11 @@ resources:
input_values:
update_identifier:
get_param: UpdateIdentifier
+ actions:
+ if:
+ - server_not_blacklisted
+ - ['CREATE', 'UPDATE']
+ - []
SshHostPubKey:
type: OS::TripleO::Ssh::HostPubKey
@@ -626,3 +683,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [{{role}}, os_collect_config]}
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index 7a18ef0c..d55414b7 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -95,6 +95,30 @@ are re-asserted when applying latter ones.
5) Service activation (Pacemaker)
+It is also possible to use Mistral actions or workflows together with
+a deployment step, these are executed before the main configuration run.
+To describe actions or workflows from within a service use:
+
+ * service_workflow_tasks: One or more workflow task properties
+
+which expects a map where the key is the step and the value a list of
+dictionaries descrbing each a workflow task, for example::
+
+ service_workflow_tasks:
+ step2:
+ - name: echo
+ action: std.echo output=Hello
+ step3:
+ - name: external
+ workflow: my-pre-existing-workflow-name
+ input:
+ workflow_param1: value
+ workflow_param2: value
+
+The Heat guide for the `OS::Mistral::Workflow task property
+<https://docs.openstack.org/developer/heat/template_guide/openstack.html#OS::Mistral::Workflow-prop-tasks>`_
+has more details about the expected dictionary.
+
Batch Upgrade Steps
-------------------
diff --git a/puppet/services/certmonger-user.yaml b/puppet/services/certmonger-user.yaml
index 6ad451a8..0508c557 100644
--- a/puppet/services/certmonger-user.yaml
+++ b/puppet/services/certmonger-user.yaml
@@ -26,11 +26,28 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ DefaultCRLURL:
+ default: 'http://ipa-ca/ipa/crl/MasterCRL.bin'
+ description: URI where to get the CRL to be configured in the nodes.
+ type: string
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
outputs:
role_data:
description: Role data for the certmonger-user service
value:
service_name: certmonger_user
+ config_settings:
+ tripleo::certmonger::ca::crl::crl_source:
+ if:
+ - internal_tls_enabled
+ - {get_param: DefaultCRLURL}
+ - null
step_config: |
include ::tripleo::profile::base::certmonger_user
diff --git a/puppet/services/cinder-backend-dellsc.yaml b/puppet/services/cinder-backend-dellsc.yaml
index a201134c..c0bffb18 100644
--- a/puppet/services/cinder-backend-dellsc.yaml
+++ b/puppet/services/cinder-backend-dellsc.yaml
@@ -61,6 +61,9 @@ parameters:
CinderDellScSecondaryScApiPort:
type: number
default: 3033
+ CinderDellScExcludedDomainIp:
+ type: string
+ default: ''
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -105,5 +108,6 @@ outputs:
cinder::backend::dellsc_iscsi::secondary_san_login: {get_param: CinderDellScSecondarySanLogin}
cinder::backend::dellsc_iscsi::secondary_san_password: {get_param: CinderDellScSecondarySanPassword}
cinder::backend::dellsc_iscsi::secondary_sc_api_port: {get_param: CinderDellScSecondaryScApiPort}
+ cinder::backend::dellsc_iscsi::excluded_domain_ip: {get_param: CinderDellScExcludedDomainIp}
step_config: |
include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-backend-netapp.yaml b/puppet/services/cinder-backend-netapp.yaml
index bddc8e1a..fbde4c0a 100644
--- a/puppet/services/cinder-backend-netapp.yaml
+++ b/puppet/services/cinder-backend-netapp.yaml
@@ -93,6 +93,12 @@ parameters:
CinderNetappWebservicePath:
type: string
default: '/devmgr/v2'
+ CinderNetappNasSecureFileOperations:
+ type: string
+ default: 'false'
+ CinderNetappNasSecureFilePermissions:
+ type: string
+ default: 'false'
# DEPRECATED options for compatibility with older versions
CinderNetappEseriesHostType:
type: string
@@ -133,5 +139,7 @@ outputs:
cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools}
cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType}
cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath}
+ cinder::backend::netapp::nas_secure_file_operations: {get_param: CinderNetappNasSecureFileOperations}
+ cinder::backend::netapp::nas_secure_file_permissions: {get_param: CinderNetappNasSecureFilePermissions}
step_config: |
include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml
index fe95222b..1f8c345d 100644
--- a/puppet/services/cinder-volume.yaml
+++ b/puppet/services/cinder-volume.yaml
@@ -40,6 +40,20 @@ parameters:
NFS servers used by Cinder NFS backend. Effective when
CinderEnableNfsBackend is true.
type: comma_delimited_list
+ CinderNasSecureFileOperations:
+ default: false
+ description: >
+ Controls whether security enhanced NFS file operations are enabled.
+ Valid values are 'auto', 'true' or 'false'. Effective when
+ CinderEnableNfsBackend is true.
+ type: string
+ CinderNasSecureFilePermissions:
+ default: false
+ description: >
+ Controls whether security enhanced NFS file permissions are enabled.
+ Valid values are 'auto', 'true' or 'false'. Effective when
+ CinderEnableNfsBackend is true.
+ type: string
CinderRbdPoolName:
default: volumes
type: string
@@ -105,6 +119,8 @@ outputs:
tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
+ tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_operations: {get_param: CinderNasSecureFileOperations}
+ tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_permissions: {get_param: CinderNasSecureFilePermissions}
tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
index 2bde9033..882ba299 100644
--- a/puppet/services/database/mysql.yaml
+++ b/puppet/services/database/mysql.yaml
@@ -118,6 +118,16 @@ outputs:
template: "%{hiera('cloud_name_NETWORK')}"
params:
NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ dnsnames:
+ - str_replace:
+ template: "%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ - str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
principal:
str_replace:
template: "mysql/%{hiera('cloud_name_NETWORK')}"
@@ -132,6 +142,9 @@ outputs:
- service: mysql
network: {get_param: [ServiceNetMap, MysqlNetwork]}
type: vip
+ - service: mysql
+ network: {get_param: [ServiceNetMap, MysqlNetwork]}
+ type: node
- null
upgrade_tasks:
- name: Check for galera root password
diff --git a/puppet/services/database/redis.yaml b/puppet/services/database/redis.yaml
index df406a8c..9567a73f 100644
--- a/puppet/services/database/redis.yaml
+++ b/puppet/services/database/redis.yaml
@@ -52,3 +52,23 @@ outputs:
- 26379
step_config: |
include ::tripleo::profile::base::database::redis
+ upgrade_tasks:
+ - name: Check if redis is deployed
+ command: systemctl is-enabled redis
+ tags: common
+ ignore_errors: True
+ register: redis_enabled
+ - name: "PreUpgrade step0,validation: Check if redis is running"
+ shell: >
+ /usr/bin/systemctl show 'redis' --property ActiveState |
+ grep '\bactive\b'
+ when: redis_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop redis service
+ tags: step1
+ when: redis_enabled.rc == 0
+ service: name=redis state=stopped
+ - name: Install redis package if it was disabled
+ tags: step3
+ yum: name=redis state=latest
+ when: redis_enabled.rc != 0
diff --git a/puppet/services/disabled/ceilometer-expirer-disabled.yaml b/puppet/services/disabled/ceilometer-expirer-disabled.yaml
index 9b7b47ef..7be394b6 100644
--- a/puppet/services/disabled/ceilometer-expirer-disabled.yaml
+++ b/puppet/services/disabled/ceilometer-expirer-disabled.yaml
@@ -27,24 +27,12 @@ parameters:
via parameter_defaults in the resource registry.
type: json
-resources:
- CeilometerServiceBase:
- type: ../ceilometer-base.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
- RoleName: {get_param: RoleName}
- RoleParameters: {get_param: RoleParameters}
-
outputs:
role_data:
description: Role data for the disabling Ceilometer Expirer role.
value:
service_name: ceilometer_expirer_disabled
- config_settings:
- map_merge:
- - get_attr: [CeilometerServiceBase, role_data, config_settings]
- - ceilometer::expirer::enable_cron: false
- step_config: |
- include ::tripleo::profile::base::ceilometer::expirer
+ upgrade_tasks:
+ - name: Remove ceilometer expirer cron tab on upgrade
+ tags: step1
+ shell: '/usr/bin/crontab -u ceilometer -r'
diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml
index f4067ef6..b4af7e85 100644
--- a/puppet/services/gnocchi-base.yaml
+++ b/puppet/services/gnocchi-base.yaml
@@ -34,6 +34,10 @@ parameters:
default: 30
description: Delay between processing metrics.
type: number
+ NumberOfStorageSacks:
+ default: 128
+ description: Number of storage sacks to create.
+ type: number
GnocchiPassword:
description: The password for the gnocchi service and db account.
type: string
@@ -87,7 +91,11 @@ outputs:
query:
read_default_file: /etc/my.cnf.d/tripleo.cnf
read_default_group: tripleo
- gnocchi::db::sync::extra_opts: ''
+ gnocchi::db::sync::extra_opts:
+ str_replace:
+ template: " --sacks-number NUM_SACKS"
+ params:
+ NUM_SACKS: {get_param: NumberOfStorageSacks}
gnocchi::storage::metric_processing_delay: {get_param: MetricProcessingDelay}
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 3
diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml
index a71491c0..5bdc3b88 100644
--- a/puppet/services/haproxy.yaml
+++ b/puppet/services/haproxy.yaml
@@ -38,6 +38,10 @@ parameters:
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
+ HAProxyStatsEnabled:
+ default: true
+ description: Whether or not to enable the HAProxy stats interface.
+ type: boolean
RedisPassword:
description: The password for Redis
type: string
@@ -50,6 +54,11 @@ parameters:
type: string
description: Specifies the default CA cert to use if TLS is used for
services in the internal network.
+ InternalTLSCRLPEMFile:
+ default: '/etc/pki/CA/crl/overcloud-crl.pem'
+ type: string
+ description: Specifies the default CRL PEM file to use for revocation if
+ TLS is used for services in the internal network.
resources:
@@ -89,6 +98,8 @@ outputs:
tripleo::haproxy::haproxy_stats_password: {get_param: HAProxyStatsPassword}
tripleo::haproxy::redis_password: {get_param: RedisPassword}
tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
+ tripleo::haproxy::crl_file: {get_param: InternalTLSCRLPEMFile}
+ tripleo::haproxy::haproxy_stats: {get_param: HAProxyStatsEnabled}
tripleo::profile::base::haproxy::certificates_specs:
map_merge:
- get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index 93bced8b..1f97b8ba 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -55,7 +55,7 @@ parameters:
HorizonSecureCookies:
description: Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon
type: boolean
- default: true
+ default: false
MemcachedIPv6:
default: false
description: Enable IPv6 features in Memcached.
@@ -89,7 +89,6 @@ outputs:
horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
horizon::vhost_extra_params:
- add_listen: false
priority: 10
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
options: ['FollowSymLinks','MultiViews']
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
index 945033a1..0e8eacf1 100644
--- a/puppet/services/ironic-api.yaml
+++ b/puppet/services/ironic-api.yaml
@@ -43,8 +43,21 @@ parameters:
e.g. { ironic-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
default: {}
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
IronicBase:
type: ./ironic-base.yaml
properties:
@@ -63,6 +76,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- ironic::api::authtoken::password: {get_param: IronicPassword}
ironic::api::authtoken::project_name: 'service'
ironic::api::authtoken::user_domain_name: 'Default'
@@ -80,7 +94,17 @@ outputs:
ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
# This is used to build links in responses
ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+ ironic::api::service_name: 'httpd'
ironic::policy::policies: {get_param: IronicApiPolicies}
+ ironic::wsgi::apache::bind_host: {get_param: [ServiceNetMap, IronicApiNetwork]}
+ ironic::wsgi::apache::port: {get_param: [EndpointMap, IronicInternal, port]}
+ ironic::wsgi::apache::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, IronicApiNetwork]}
+ ironic::wsgi::apache::ssl: {get_param: EnableInternalTLS}
tripleo.ironic_api.firewall_rules:
'133 ironic api':
dport:
@@ -106,6 +130,9 @@ outputs:
- '%'
- "%{hiera('mysql_bind_host')}"
upgrade_tasks:
- - name: Stop ironic_api service
+ - name: Stop ironic_api service (before httpd support)
+ tags: step1
+ service: name=openstack-ironic-api state=stopped enabled=no
+ - name: Stop ironic_api service (running under httpd)
tags: step1
- service: name=openstack-ironic-api state=stopped
+ service: name=httpd state=stopped
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index b1676715..0e8c8e12 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -164,6 +164,12 @@ outputs:
ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
# Credentials to access other services
+ ironic::cinder::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::cinder::username: 'ironic'
+ ironic::cinder::password: {get_param: IronicPassword}
+ ironic::cinder::project_name: 'service'
+ ironic::cinder::user_domain_name: 'Default'
+ ironic::cinder::project_domain_name: 'Default'
ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
ironic::glance::username: 'ironic'
ironic::glance::password: {get_param: IronicPassword}
diff --git a/puppet/services/ironic-inspector.yaml b/puppet/services/ironic-inspector.yaml
new file mode 100644
index 00000000..e8537a29
--- /dev/null
+++ b/puppet/services/ironic-inspector.yaml
@@ -0,0 +1,151 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Ironic Inspector configured with Puppet (EXPERIMENTAL)
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MonitoringSubscriptionIronicInspector:
+ default: 'overcloud-ironic-inspector'
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ IronicInspectorInterface:
+ default: br-ex
+ description: |
+ Network interface on which inspection dnsmasq will listen. Should allow
+ access to untagged traffic from nodes booted for inspection. The default
+ value only makes sense if you don't modify any networking configuration.
+ type: string
+ IronicInspectorIPXEEnabled:
+ default: true
+ description: Whether to use iPXE for inspection.
+ type: boolean
+ IronicInspectorIpRange:
+ description: |
+ Temporary IP range that will be given to nodes during the inspection
+ process. This should not overlap with any range that Neutron's DHCP
+ gives away, but it has to be routeable back to ironic-inspector API.
+ This option has no meaningful defaults, and thus is required.
+ type: string
+ IronicInspectorUseSwift:
+ default: true
+ description: Whether to use Swift for storing introspection data.
+ type: boolean
+ IronicIPXEPort:
+ default: 8088
+ description: Port to use for serving images when iPXE is used.
+ type: string
+ IronicPassword:
+ description: The password for the Ironic service and db account, used by the Ironic services
+ type: string
+ hidden: true
+
+conditions:
+ enable_ipxe: {equals : [{get_param: IronicInspectorIPXEEnabled}, true]}
+ use_swift: {equals : [{get_param: IronicInspectorUseSwift}, true]}
+
+outputs:
+ role_data:
+ description: Role data for the Ironic Inspector role.
+ value:
+ service_name: ironic_inspector
+ monitoring_subscription: {get_param: MonitoringSubscriptionIronicInspector}
+ config_settings:
+ map_merge:
+ - ironic::inspector::listen_address: {get_param: [ServiceNetMap, IronicInspectorNetwork]}
+ ironic::inspector::dnsmasq_local_ip: {get_param: [ServiceNetMap, IronicInspectorNetwork]}
+ ironic::inspector::dnsmasq_ip_range: {get_param: IronicInspectorIpRange}
+ ironic::inspector::dnsmasq_interface: {get_param: IronicInspectorInterface}
+ ironic::inspector::debug: {get_param: Debug}
+ ironic::inspector::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ironic::inspector::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::inspector::authtoken::username: 'ironic'
+ ironic::inspector::authtoken::password: {get_param: IronicPassword}
+ ironic::inspector::authtoken::project_name: 'service'
+ ironic::inspector::authtoken::user_domain_name: 'Default'
+ ironic::inspector::authtoken::project_domain_name: 'Default'
+ tripleo.ironic_inspector.firewall_rules:
+ '137 ironic-inspector':
+ dport:
+ - 5050
+ ironic::inspector::ironic_username: 'ironic'
+ ironic::inspector::ironic_password: {get_param: IronicPassword}
+ ironic::inspector::ironic_tenant_name: 'service'
+ ironic::inspector::ironic_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::inspector::ironic_max_retries: 6
+ ironic::inspector::ironic_retry_interval: 10
+ ironic::inspector::ironic_user_domain_name: 'Default'
+ ironic::inspector::ironic_project_domain_name: 'Default'
+ ironic::inspector::http_port: {get_param: IronicIPXEPort}
+ ironic::inspector::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://ironic-inspector:'
+ - {get_param: IronicPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/ironic-inspector'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ -
+ if:
+ - enable_ipxe
+ - ironic::inspector::pxe_transfer_protocol: 'http'
+ - {}
+ -
+ if:
+ - use_swift
+ - ironic::inspector::store_data: 'swift'
+ ironic::inspector::swift_username: 'ironic'
+ ironic::inspector::swift_password: {get_param: IronicPassword}
+ ironic::inspector::swift_tenant_name: 'service'
+ ironic::inspector::swift_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::inspector::swift_user_domain_name: 'Default'
+ ironic::inspector::swift_project_domain_name: 'Default'
+ - {}
+ step_config: |
+ include ::tripleo::profile::base::ironic_inspector
+ service_config_settings:
+ keystone:
+ ironic::keystone::auth_inspector::tenant: 'service'
+ ironic::keystone::auth_inspector::public_url: {get_param: [EndpointMap, IronicInspectorPublic, uri]}
+ ironic::keystone::auth_inspector::internal_url: {get_param: [EndpointMap, IronicInspectorInternal, uri]}
+ ironic::keystone::auth_inspector::admin_url: {get_param: [EndpointMap, IronicInspectorAdmin, uri]}
+ ironic::keystone::auth_inspector::password: {get_param: IronicPassword}
+ ironic::keystone::auth_inspector::region: {get_param: KeystoneRegion}
+ mysql:
+ ironic::inspector::db::mysql::password: {get_param: IronicPassword}
+ ironic::inspector::db::mysql::user: ironic-inspector
+ ironic::inspector::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ ironic::inspector::db::mysql::dbname: ironic-inspector
+ ironic::inspector::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index f3a9cbc4..60d194bc 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -113,10 +113,27 @@ parameters:
description: The second Keystone credential key. Must be a valid key.
KeystoneFernetKey0:
type: string
- description: The first Keystone fernet key. Must be a valid key.
+ default: ''
+ description: (DEPRECATED) The first Keystone fernet key. Must be a valid key.
KeystoneFernetKey1:
type: string
- description: The second Keystone fernet key. Must be a valid key.
+ default: ''
+ description: (DEPRECATED) The second Keystone fernet key. Must be a valid key.
+ KeystoneFernetKeys:
+ type: json
+ description: Mapping containing keystone's fernet keys and their paths.
+ KeystoneFernetMaxActiveKeys:
+ type: number
+ description: The maximum active keys in the keystone fernet key repository.
+ default: 5
+ ManageKeystoneFernetKeys:
+ type: boolean
+ default: true
+ description: Whether TripleO should manage the keystone fernet keys or not.
+ If set to true, the fernet keys will get the values from the
+ saved keys repository in mistral (the KeystoneFernetKeys
+ variable). If set to false, only the stack creation
+ initializes the keys, but subsequent updates won't touch them.
KeystoneLoggingSource:
type: json
default:
@@ -187,6 +204,17 @@ parameters:
default: {}
hidden: true
+parameter_groups:
+- label: deprecated
+ description: |
+ The following parameters are deprecated and will be removed. They should not
+ be relied on for new deployments. If you have concerns regarding deprecated
+ parameters, please contact the TripleO development team on IRC or the
+ OpenStack mailing list.
+ parameters:
+ - KeystoneFernetKey0
+ - KeystoneFernetKey1
+
resources:
ApacheServiceBase:
@@ -234,6 +262,7 @@ outputs:
keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
keystone::token_provider: {get_param: KeystoneTokenProvider}
keystone::enable_fernet_setup: {if: [keystone_fernet_tokens, true, false]}
+ keystone::fernet_max_active_keys: {get_param: KeystoneFernetMaxActiveKeys}
keystone::enable_proxy_headers_parsing: true
keystone::enable_credential_setup: true
keystone::credential_keys:
@@ -241,12 +270,8 @@ outputs:
content: {get_param: KeystoneCredential0}
'/etc/keystone/credential-keys/1':
content: {get_param: KeystoneCredential1}
- keystone::fernet_keys:
- '/etc/keystone/fernet-keys/0':
- content: {get_param: KeystoneFernetKey0}
- '/etc/keystone/fernet-keys/1':
- content: {get_param: KeystoneFernetKey1}
- keystone::fernet_replace_keys: false
+ keystone::fernet_keys: {get_param: KeystoneFernetKeys}
+ keystone::fernet_replace_keys: {get_param: ManageKeystoneFernetKeys}
keystone::debug:
if:
- service_debug_unset
diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml
index 76d5c269..4493721c 100644
--- a/puppet/services/neutron-ovs-agent.yaml
+++ b/puppet/services/neutron-ovs-agent.yaml
@@ -92,8 +92,12 @@ resources:
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
@@ -138,7 +142,7 @@ outputs:
expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
data:
ovs_upgrade:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
neutron_ovs_upgrade:
- name: Check if neutron_ovs_agent is deployed
command: systemctl is-enabled neutron-openvswitch-agent
diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml
index 29c10469..da7a4d68 100644
--- a/puppet/services/neutron-ovs-dpdk-agent.yaml
+++ b/puppet/services/neutron-ovs-dpdk-agent.yaml
@@ -26,32 +26,6 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- HostCpusList:
- default: "0"
- description: List of cores to be used for host process
- type: string
- constraints:
- - allowed_pattern: "[0-9,-]+"
- NeutronDpdkCoreList:
- default: ""
- description: List of cores to be used for DPDK Poll Mode Driver
- type: string
- constraints:
- - allowed_pattern: "[0-9,-]*"
- NeutronDpdkMemoryChannels:
- default: ""
- description: Number of memory channels to be used for DPDK
- type: string
- constraints:
- - allowed_pattern: "[0-9]*"
- NeutronDpdkSocketMemory:
- default: ""
- description: Memory allocated for each socket
- type: string
- NeutronDpdkDriverType:
- default: "vfio-pci"
- description: DPDK Driver type
- type: string
# below parameters has to be set in neutron agent only for compute nodes.
# as of now there is no other usecase for these parameters except dpdk.
# should be moved to compute only ovs agent in case of any other usecases.
@@ -75,9 +49,6 @@ resources:
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
-
# Merging role-specific parameters (RoleParameters) with the default parameters.
# RoleParameters will have the precedence over the default parameters.
RoleParametersValue:
@@ -89,20 +60,19 @@ resources:
- map_replace:
- neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
- vswitch::dpdk::driver_type: NeutronDpdkDriverType
- vswitch::dpdk::host_core_list: HostCpusList
- vswitch::dpdk::pmd_core_list: NeutronDpdkCoreList
- vswitch::dpdk::memory_channels: NeutronDpdkMemoryChannels
- vswitch::dpdk::socket_mem: NeutronDpdkSocketMemory
- values: {get_param: [RoleParameters]}
- values:
NeutronDatapathType: {get_param: NeutronDatapathType}
NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
- NeutronDpdkDriverType: {get_param: NeutronDpdkDriverType}
- HostCpusList: {get_param: HostCpusList}
- NeutronDpdkCoreList: {get_param: NeutronDpdkCoreList}
- NeutronDpdkMemoryChannels: {get_param: NeutronDpdkMemoryChannels}
- NeutronDpdkSocketMemory: {get_param: NeutronDpdkSocketMemory}
+
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
@@ -116,7 +86,8 @@ outputs:
- keys:
tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
- neutron::agents::ml2::ovs::enable_dpdk: true
+ - get_attr: [Ovs, role_data, config_settings]
- get_attr: [RoleParametersValue, value]
step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
upgrade_tasks:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
diff --git a/puppet/services/neutron-sriov-agent.yaml b/puppet/services/neutron-sriov-agent.yaml
index c124d1e6..090640ed 100644
--- a/puppet/services/neutron-sriov-agent.yaml
+++ b/puppet/services/neutron-sriov-agent.yaml
@@ -65,6 +65,24 @@ resources:
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
+ # Merging role-specific parameters (RoleParameters) with the default parameters.
+ # RoleParameters will have the precedence over the default parameters.
+ RoleParametersValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - map_replace:
+ - neutron::agents::ml2::sriov::physical_device_mappings: NeutronPhysicalDevMappings
+ neutron::agents::ml2::sriov::exclude_devices: NeutronExcludeDevices
+ tripleo::host::sriov::number_of_vfs: NeutronSriovNumVFs
+ - values: {get_param: [RoleParameters]}
+ - values:
+ NeutronPhysicalDevMappings: {get_param: NeutronPhysicalDevMappings}
+ NeutronExcludeDevices: {get_param: NeutronExcludeDevices}
+ NeutronSriovNumVFs: {get_param: NeutronSriovNumVFs}
+
outputs:
role_data:
description: Role data for the Neutron SR-IOV nic agent service.
@@ -73,8 +91,6 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::agents::ml2::sriov::physical_device_mappings: {get_param: NeutronPhysicalDevMappings}
- neutron::agents::ml2::sriov::exclude_devices: {get_param: NeutronExcludeDevices}
- tripleo::host::sriov::number_of_vfs: {get_param: NeutronSriovNumVFs}
+ - get_attr: [RoleParametersValue, value]
step_config: |
include ::tripleo::profile::base::neutron::sriov
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index 835edf0a..7e741d8c 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -81,17 +81,15 @@ conditions:
nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
resources:
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # ApacheServiceBase:
- # type: ./apache.yaml
- # properties:
- # ServiceNetMap: {get_param: ServiceNetMap}
- # DefaultPasswords: {get_param: DefaultPasswords}
- # EndpointMap: {get_param: EndpointMap}
- # RoleName: {get_param: RoleName}
- # RoleParameters: {get_param: RoleParameters}
- # EnableInternalTLS: {get_param: EnableInternalTLS}
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
NovaBase:
type: ./nova-base.yaml
@@ -114,9 +112,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- nova::cron::archive_deleted_rows::hour: '*/12'
nova::cron::archive_deleted_rows::destination: '/dev/null'
tripleo.nova_api.firewall_rules:
@@ -143,23 +139,21 @@ outputs:
"%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- nova_wsgi_enabled: false
- # nova::api::service_name: 'httpd'
- # nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+ nova_wsgi_enabled: true
+ nova::api::service_name: 'httpd'
+ nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- # nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
- # nova::wsgi::apache_api::servername:
- # str_replace:
- # template:
- # "%{hiera('fqdn_$NETWORK')}"
- # params:
- # $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::wsgi::apache_api::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
nova::api::instance_name_template: {get_param: InstanceNameTemplate}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
@@ -169,9 +163,7 @@ outputs:
- nova_workers_zero
- {}
- nova::api::osapi_compute_workers: {get_param: NovaWorkers}
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
+ nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
step_config: |
include tripleo::profile::base::nova::api
service_config_settings:
@@ -199,87 +191,91 @@ outputs:
nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
nova::keystone::auth::password: {get_param: NovaPassword}
nova::keystone::auth::region: {get_param: KeystoneRegion}
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # metadata_settings:
- # get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: get bootstrap nodeid
- tags: common
- command: hiera bootstrap_nodeid
- register: bootstrap_node
- - name: set is_bootstrap_node fact
- tags: common
- set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}}
- - name: Extra migration for nova tripleo/+bug/1656791
- tags: step0,pre-upgrade
- when: is_bootstrap_node
- command: nova-manage db online_data_migrations
- - name: Stop and disable nova_api service (pre-upgrade not under httpd)
- tags: step2
- service: name=openstack-nova-api state=stopped enabled=no
- - name: Create puppet manifest to set transport_url in nova.conf
- tags: step5
- when: is_bootstrap_node
- copy:
- dest: /root/nova-api_upgrade_manifest.pp
- mode: 0600
- content: >
- $transport_url = os_transport_url({
- 'transport' => hiera('messaging_service_name', 'rabbit'),
- 'hosts' => any2array(hiera('rabbitmq_node_names', undef)),
- 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
- 'username' => hiera('nova::rabbit_userid', 'guest'),
- 'password' => hiera('nova::rabbit_password'),
- 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
- })
- oslo::messaging::default { 'nova_config':
- transport_url => $transport_url
- }
- - name: Run puppet apply to set tranport_url in nova.conf
- tags: step5
- when: is_bootstrap_node
- command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
- register: puppet_apply_nova_api_upgrade
- failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
- changed_when: puppet_apply_nova_api_upgrade.rc == 2
- - name: Setup cell_v2 (map cell0)
- tags: step5
- when: is_bootstrap_node
- shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
- - name: Setup cell_v2 (create default cell)
- tags: step5
- when: is_bootstrap_node
- # (owalsh) puppet-nova expects the cell name 'default'
- # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
- shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
- register: nova_api_create_cell
- failed_when: nova_api_create_cell.rc not in [0,2]
- changed_when: nova_api_create_cell.rc == 0
- - name: Setup cell_v2 (sync nova/cell DB)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage db sync
- async: {get_param: NovaDbSyncTimeout}
- poll: 10
- - name: Setup cell_v2 (get cell uuid)
- tags: step5
- when: is_bootstrap_node
- shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
- register: nova_api_cell_uuid
- - name: Setup cell_v2 (migrate hosts)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
- - name: Setup cell_v2 (migrate instances)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
- - name: Sync nova_api DB
- tags: step5
- command: nova-manage api_db sync
- when: is_bootstrap_node
- - name: Online data migration for nova
- tags: step5
- when: is_bootstrap_node
- command: nova-manage db online_data_migrations
+ yaql:
+ expression: $.data.apache_upgrade + $.data.nova_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ nova_api_upgrade:
+ - name: get bootstrap nodeid
+ tags: common
+ command: hiera bootstrap_nodeid
+ register: bootstrap_node
+ - name: set is_bootstrap_node fact
+ tags: common
+ set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+ - name: Extra migration for nova tripleo/+bug/1656791
+ tags: step0,pre-upgrade
+ when: is_bootstrap_node
+ command: nova-manage db online_data_migrations
+ - name: Stop and disable nova_api service (pre-upgrade not under httpd)
+ tags: step2
+ service: name=openstack-nova-api state=stopped enabled=no
+ - name: Create puppet manifest to set transport_url in nova.conf
+ tags: step5
+ when: is_bootstrap_node
+ copy:
+ dest: /root/nova-api_upgrade_manifest.pp
+ mode: 0600
+ content: >
+ $transport_url = os_transport_url({
+ 'transport' => hiera('messaging_service_name', 'rabbit'),
+ 'hosts' => any2array(hiera('rabbitmq_node_names', undef)),
+ 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
+ 'username' => hiera('nova::rabbit_userid', 'guest'),
+ 'password' => hiera('nova::rabbit_password'),
+ 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
+ })
+ oslo::messaging::default { 'nova_config':
+ transport_url => $transport_url
+ }
+ - name: Run puppet apply to set tranport_url in nova.conf
+ tags: step5
+ when: is_bootstrap_node
+ command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+ register: puppet_apply_nova_api_upgrade
+ failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
+ changed_when: puppet_apply_nova_api_upgrade.rc == 2
+ - name: Setup cell_v2 (map cell0)
+ tags: step5
+ when: is_bootstrap_node
+ shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
+ - name: Setup cell_v2 (create default cell)
+ tags: step5
+ when: is_bootstrap_node
+ # (owalsh) puppet-nova expects the cell name 'default'
+ # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
+ shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
+ register: nova_api_create_cell
+ failed_when: nova_api_create_cell.rc not in [0,2]
+ changed_when: nova_api_create_cell.rc == 0
+ - name: Setup cell_v2 (sync nova/cell DB)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage db sync
+ async: {get_param: NovaDbSyncTimeout}
+ poll: 10
+ - name: Setup cell_v2 (get cell uuid)
+ tags: step5
+ when: is_bootstrap_node
+ shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
+ register: nova_api_cell_uuid
+ - name: Setup cell_v2 (migrate hosts)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
+ - name: Setup cell_v2 (migrate instances)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
+ - name: Sync nova_api DB
+ tags: step5
+ command: nova-manage api_db sync
+ when: is_bootstrap_node
+ - name: Online data migration for nova
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage db online_data_migrations
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index e39e997a..68a71e42 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -105,6 +105,22 @@ resources:
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
+ # Merging role-specific parameters (RoleParameters) with the default parameters.
+ # RoleParameters will have the precedence over the default parameters.
+ RoleParametersValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - map_replace:
+ - nova::compute::vcpu_pin_set: NovaVcpuPinSet
+ nova::compute::reserved_host_memory: NovaReservedHostMemory
+ - values: {get_param: [RoleParameters]}
+ - values:
+ NovaVcpuPinSet: {get_param: NovaVcpuPinSet}
+ NovaReservedHostMemory: {get_param: NovaReservedHostMemory}
+
outputs:
role_data:
description: Role data for the Nova Compute service.
@@ -117,14 +133,18 @@ outputs:
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
+ - get_attr: [RoleParametersValue, value]
- nova::compute::libvirt::manage_libvirt_services: false
nova::compute::pci_passthrough:
str_replace:
template: "JSON_PARAM"
params:
- JSON_PARAM: {get_param: NovaPCIPassthrough}
- nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
- nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
+ map_replace:
+ - map_replace:
+ - JSON_PARAM: NovaPCIPassthrough
+ - values: {get_param: [RoleParameters]}
+ - values:
+ NovaPCIPassthrough: {get_param: NovaPCIPassthrough}
# we manage migration in nova common puppet profile
nova::compute::libvirt::migration_support: false
tripleo::profile::base::nova::manage_migration: true
diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml
index 5da6d43e..72a1fce7 100644
--- a/puppet/services/nova-scheduler.yaml
+++ b/puppet/services/nova-scheduler.yaml
@@ -45,6 +45,14 @@ parameters:
default:
tag: openstack.nova.scheduler
path: /var/log/nova/nova-scheduler.log
+ NovaSchedulerDiscoverHostsInCellsInterval:
+ type: number
+ default: -1
+ description: >
+ This value controls how often (in seconds) the scheduler should
+ attempt to discover new hosts that have been added to cells.
+ The default value of -1 disables the periodic task completely.
+ It is recommended to set this parameter for deployments using Ironic.
resources:
NovaBase:
@@ -71,6 +79,7 @@ outputs:
- nova::ram_allocation_ratio: '1.0'
nova::scheduler::filter::scheduler_available_filters: {get_param: NovaSchedulerAvailableFilters}
nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters}
+ nova::scheduler::discover_hosts_in_cells_interval: {get_param: NovaSchedulerDiscoverHostsInCellsInterval}
step_config: |
include tripleo::profile::base::nova::scheduler
upgrade_tasks:
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index 0d859be1..1a8754a5 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -57,8 +57,14 @@ parameters:
type: json
resources:
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
@@ -66,19 +72,21 @@ outputs:
value:
service_name: opendaylight_ovs
config_settings:
- opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
- opendaylight::username: {get_param: OpenDaylightUsername}
- opendaylight::password: {get_param: OpenDaylightPassword}
- opendaylight_check_url: {get_param: OpenDaylightCheckURL}
- opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
- neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
- tripleo.opendaylight_ovs.firewall_rules:
- '118 neutron vxlan networks':
- proto: 'udp'
- dport: 4789
- '136 neutron gre networks':
- proto: 'gre'
+ map_merge:
+ - opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
+ opendaylight::username: {get_param: OpenDaylightUsername}
+ opendaylight::password: {get_param: OpenDaylightPassword}
+ opendaylight_check_url: {get_param: OpenDaylightCheckURL}
+ opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
+ neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+ neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
+ tripleo.opendaylight_ovs.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '136 neutron gre networks':
+ proto: 'gre'
+ - get_attr: [Ovs, role_data, config_settings]
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
upgrade_tasks:
@@ -86,7 +94,7 @@ outputs:
expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
data:
ovs_upgrade:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
opendaylight_upgrade:
- name: Check if openvswitch is deployed
command: systemctl is-enabled openvswitch
diff --git a/puppet/services/openvswitch-upgrade.yaml b/puppet/services/openvswitch-upgrade.yaml
deleted file mode 100644
index f6e78462..00000000
--- a/puppet/services/openvswitch-upgrade.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-heat_template_version: pike
-
-description: >
- Openvswitch package special handling for upgrade.
-
-outputs:
- role_data:
- description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
- value:
- service_name: openvswitch_upgrade
- upgrade_tasks:
- - name: Check openvswitch version.
- tags: step2
- register: ovs_version
- ignore_errors: true
- shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
- - name: Check openvswitch packaging.
- tags: step2
- shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
- register: ovs_packaging_issue
- ignore_errors: true
- - block:
- - name: "Ensure empty directory: emptying."
- file:
- state: absent
- path: /root/OVS_UPGRADE
- - name: "Ensure empty directory: creating."
- file:
- state: directory
- path: /root/OVS_UPGRADE
- owner: root
- group: root
- mode: 0750
- - name: Download OVS packages.
- command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
- - name: Get rpm list for manual upgrade of OVS.
- shell: ls -1 /root/OVS_UPGRADE/*.rpm
- register: ovs_list_of_rpms
- - name: Manual upgrade of OVS
- shell: |
- rpm -U --test {{item}} 2>&1 | grep "already installed" || \
- rpm -U --replacepkgs --notriggerun --nopostun {{item}};
- args:
- chdir: /root/OVS_UPGRADE
- with_items:
- - "{{ovs_list_of_rpms.stdout_lines}}"
- tags: step2
- when: "'2.5.0-14' in '{{ovs_version.stdout}}'
- or
- ovs_packaging_issue|succeeded"
diff --git a/puppet/services/openvswitch.yaml b/puppet/services/openvswitch.yaml
new file mode 100644
index 00000000..36aa5db7
--- /dev/null
+++ b/puppet/services/openvswitch.yaml
@@ -0,0 +1,178 @@
+heat_template_version: pike
+
+description: >
+ Open vSwitch Configuration
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OvsDpdkCoreList:
+ description: >
+ List of cores to be used for DPDK lcore threads. Note, these threads
+ are used by the OVS control path for validator and handling functions.
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ""
+ OvsDpdkMemoryChannels:
+ description: Number of memory channels per socket to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ""
+ OvsDpdkSocketMemory:
+ default: ""
+ description: >
+ Sets the amount of hugepage memory to assign per NUMA node. It is
+ recommended to use the socket closest to the PCIe slot used for the
+ desired DPDK NIC. The format should be in "<socket 0 mem>, <socket 1
+ mem>, <socket n mem>", where the value is specified in MB. For example:
+ "1024,0".
+ type: string
+ OvsDpdkDriverType:
+ default: "vfio-pci"
+ description: >
+ DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+ this UIO/PMD driver.
+ type: string
+ OvsPmdCoreList:
+ description: >
+ A list or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ location to cores on socket, number of hyper-threaded logical cores, and
+ desired number of PMD threads can all play a role in configuring this
+ setting. These cores should be on the same socket where
+ OvsDpdkSocketMemory is assigned. If using hyperthreading then specify
+ both logical cores that would equal the physical core. Also, specifying
+ more than one core will trigger multiple PMD threads to be spawned which
+ may improve dataplane performance.
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ type: string
+ default: ""
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Queens cycle.
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkCoreList:
+ description: List of cores to be used for DPDK Poll Mode Driver
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkMemoryChannels:
+ description: Number of memory channels to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ''
+ NeutronDpdkSocketMemory:
+ default: ''
+ description: Memory allocated for each socket
+ type: string
+ NeutronDpdkDriverType:
+ default: "vfio-pci"
+ description: DPDK Driver type
+ type: string
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - HostCpusList
+ - NeutronDpdkCoreList
+ - NeutronDpdkMemoryChannels
+ - NeutronDpdkSocketMemory
+ - NeutronDpdkDriverType
+
+conditions:
+ l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+ pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+ mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+ socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+ driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+
+outputs:
+ role_data:
+ description: Role data for the Open vSwitch service.
+ value:
+ service_name: openvswitch
+ config_settings:
+ map_replace:
+ - map_replace:
+ - vswitch::dpdk::driver_type: OvsDpdkDriverType
+ vswitch::dpdk::host_core_list: OvsDpdkCoreList
+ vswitch::dpdk::pmd_core_list: OvsPmdCoreList
+ vswitch::dpdk::memory_channels: OvsDpdkMemoryChannels
+ vswitch::dpdk::socket_mem: OvsDpdkSocketMemory
+ - values: {get_param: [RoleParameters]}
+ - values:
+ OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+ OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+ OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+ OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+ OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
+ upgrade_tasks:
+ - name: Check openvswitch version.
+ tags: step2
+ register: ovs_version
+ ignore_errors: true
+ shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+ - name: Check openvswitch packaging.
+ tags: step2
+ shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+ register: ovs_packaging_issue
+ ignore_errors: true
+ - block:
+ - name: "Ensure empty directory: emptying."
+ file:
+ state: absent
+ path: /root/OVS_UPGRADE
+ - name: "Ensure empty directory: creating."
+ file:
+ state: directory
+ path: /root/OVS_UPGRADE
+ owner: root
+ group: root
+ mode: 0750
+ - name: Download OVS packages.
+ command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+ - name: Get rpm list for manual upgrade of OVS.
+ shell: ls -1 /root/OVS_UPGRADE/*.rpm
+ register: ovs_list_of_rpms
+ - name: Manual upgrade of OVS
+ shell: |
+ rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+ rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+ args:
+ chdir: /root/OVS_UPGRADE
+ with_items:
+ - "{{ovs_list_of_rpms.stdout_lines}}"
+ tags: step2
+ when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+ or
+ ovs_packaging_issue|succeeded"
diff --git a/puppet/services/ovn-dbs.yaml b/puppet/services/ovn-dbs.yaml
index 20c38d8a..df234c77 100644
--- a/puppet/services/ovn-dbs.yaml
+++ b/puppet/services/ovn-dbs.yaml
@@ -44,6 +44,7 @@ outputs:
ovn::northbound::port: {get_param: OVNNorthboundServerPort}
ovn::southbound::port: {get_param: OVNSouthboundServerPort}
ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]}
+ tripleo::haproxy::ovn_dbs_manage_lb: true
tripleo.ovn_dbs.firewall_rules:
'121 OVN DB server ports':
proto: 'tcp'
diff --git a/puppet/services/pacemaker/database/mysql.yaml b/puppet/services/pacemaker/database/mysql.yaml
index d8e942d0..0a7659e0 100644
--- a/puppet/services/pacemaker/database/mysql.yaml
+++ b/puppet/services/pacemaker/database/mysql.yaml
@@ -27,6 +27,11 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
resources:
@@ -61,6 +66,8 @@ outputs:
# internal_api_subnet - > IP/CIDR
tripleo::profile::pacemaker::database::mysql::gmcast_listen_addr:
get_param: [ServiceNetMap, MysqlNetwork]
+ tripleo::profile::pacemaker::database::mysql::ca_file:
+ get_param: InternalTLSCAFile
step_config: |
include ::tripleo::profile::pacemaker::database::mysql
metadata_settings:
diff --git a/puppet/services/pacemaker/ovn-dbs.yaml b/puppet/services/pacemaker/ovn-dbs.yaml
new file mode 100644
index 00000000..1cbb4763
--- /dev/null
+++ b/puppet/services/pacemaker/ovn-dbs.yaml
@@ -0,0 +1,61 @@
+heat_template_version: ocata
+
+description: >
+ OVN databases configured with puppet in HA mode
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OVNNorthboundServerPort:
+ description: Port of the OVN Northbound DB server
+ type: number
+ default: 6641
+ OVNSouthboundServerPort:
+ description: Port of the OVN Southbound DB server
+ type: number
+ default: 6642
+
+resources:
+
+ OVNDBsBase:
+ type: ../ovn-dbs.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the OVN northd service
+ value:
+ service_name: ovn_dbs
+ config_settings:
+ map_merge:
+ - get_attr: [OVNDBsBase, role_data, config_settings]
+ - tripleo::haproxy::ovn_dbs_manage_lb: false
+ tripleo::profile::pacemaker::ovn_northd::nb_db_port: {get_param: OVNNorthboundServerPort}
+ tripleo::profile::pacemaker::ovn_northd::sb_db_port: {get_param: OVNSouthboundServerPort}
+ step_config: |
+ include ::tripleo::profile::pacemaker::ovn_northd
diff --git a/puppet/services/pacemaker_remote.yaml b/puppet/services/pacemaker_remote.yaml
index 74aaf599..c49b0848 100644
--- a/puppet/services/pacemaker_remote.yaml
+++ b/puppet/services/pacemaker_remote.yaml
@@ -34,6 +34,42 @@ parameters:
MonitoringSubscriptionPacemakerRemote:
default: 'overcloud-pacemaker_remote'
type: string
+ EnableFencing:
+ default: false
+ description: Whether to enable fencing in Pacemaker or not.
+ type: boolean
+ FencingConfig:
+ default: {}
+ description: |
+ Pacemaker fencing configuration. The JSON should have
+ the following structure:
+ {
+ "devices": [
+ {
+ "agent": "AGENT_NAME",
+ "host_mac": "HOST_MAC_ADDRESS",
+ "params": {"PARAM_NAME": "PARAM_VALUE"}
+ }
+ ]
+ }
+ For instance:
+ {
+ "devices": [
+ {
+ "agent": "fence_xvm",
+ "host_mac": "52:54:00:aa:bb:cc",
+ "params": {
+ "multicast_address": "225.0.0.12",
+ "port": "baremetal_0",
+ "manage_fw": true,
+ "manage_key_file": true,
+ "key_file": "/etc/fence_xvm.key",
+ "key_file_password": "abcdef"
+ }
+ }
+ ]
+ }
+ type: json
PacemakerRemoteLoggingSource:
type: json
default:
@@ -60,6 +96,8 @@ outputs:
proto: 'tcp'
dport:
- 3121
+ tripleo::fencing::config: {get_param: FencingConfig}
+ enable_fencing: {get_param: EnableFencing}
tripleo::profile::base::pacemaker_remote::remote_authkey: {get_param: PacemakerRemoteAuthkey}
step_config: |
include ::tripleo::profile::base::pacemaker_remote
diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml
index a41e34f7..0289b7a7 100644
--- a/puppet/services/panko-api.yaml
+++ b/puppet/services/panko-api.yaml
@@ -84,8 +84,8 @@ outputs:
tripleo.panko_api.firewall_rules:
'140 panko-api':
dport:
- - 8779
- - 13779
+ - 8977
+ - 13977
panko::api::host:
str_replace:
template:
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
deleted file mode 100644
index 0e7b6d2b..00000000
--- a/puppet/services/services.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-heat_template_version: pike
-
-description: >
- Utility stack to convert an array of services into a set of combined
- role configs.
-
-parameters:
- Services:
- default: []
- description: |
- List nested stack service templates.
- type: comma_delimited_list
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- DefaultPasswords:
- default: {}
- description: Mapping of service -> default password. Used to help
- pass top level passwords managed by Heat into services.
- type: json
- RoleName:
- default: ''
- description: Role name on which the service is applied
- type: string
- RoleParameters:
- description: Role Specific parameters to be provided to service
- default: {}
- type: json
-
-resources:
-
- ServiceChain:
- type: OS::Heat::ResourceChain
- properties:
- resources: {get_param: Services}
- concurrent: true
- resource_properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- EndpointMap: {get_param: EndpointMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- RoleName: {get_param: RoleName}
- RoleParameters: {get_param: RoleParameters}
-
- LoggingConfiguration:
- type: OS::TripleO::LoggingConfiguration
-
- ServiceServerMetadataHook:
- type: OS::TripleO::ServiceServerMetadataHook
- properties:
- RoleData: {get_attr: [ServiceChain, role_data]}
-
-outputs:
- role_data:
- description: Combined Role data for this set of services.
- value:
- service_names:
- {get_attr: [ServiceChain, role_data, service_name]}
- monitoring_subscriptions:
- yaql:
- expression: list($.data.role_data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
- data: {role_data: {get_attr: [ServiceChain, role_data]}}
- logging_sources:
- # Transform the individual logging_source configuration from
- # each service in the chain into a global list, adding some
- # default configuration at the same time.
- yaql:
- expression: >
- let(
- default_format => $.data.default_format,
- pos_file_path => $.data.pos_file_path,
- sources => $.data.sources.flatten()
- ) ->
- $sources.where($ != null).select({
- 'type' => 'tail',
- 'tag' => $.tag,
- 'path' => $.path,
- 'format' => $.get('format', $default_format),
- 'pos_file' => $.get('pos_file', $pos_file_path + '/' + $.tag + '.pos')
- })
- data:
- sources:
- - {get_attr: [LoggingConfiguration, LoggingDefaultSources]}
- - yaql:
- expression: list($.data.role_data.where($ != null).select($.get('logging_source')).where($ != null))
- data: {role_data: {get_attr: [ServiceChain, role_data]}}
-
- - {get_attr: [LoggingConfiguration, LoggingExtraSources]}
- default_format: {get_attr: [LoggingConfiguration, LoggingDefaultFormat]}
- pos_file_path: {get_attr: [LoggingConfiguration, LoggingPosFilePath]}
- logging_groups:
- # Build a list of unique groups to which we should add the
- # fluentd user.
- yaql:
- expression: >
- set(($.data.default + $.data.extra + $.data.role_data.where($ != null).select($.get('logging_groups'))).flatten()).where($)
- data:
- default: {get_attr: [LoggingConfiguration, LoggingDefaultGroups]}
- extra: {get_attr: [LoggingConfiguration, LoggingExtraGroups]}
- role_data: {get_attr: [ServiceChain, role_data]}
- config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
- global_config_settings:
- map_merge:
- yaql:
- expression: list($.data.role_data.where($ != null).select($.get('global_config_settings')).where($ != null))
- data: {role_data: {get_attr: [ServiceChain, role_data]}}
- service_config_settings:
- yaql:
- expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
- data: {role_data: {get_attr: [ServiceChain, role_data]}}
- step_config: {get_attr: [ServiceChain, role_data, step_config]}
- upgrade_tasks:
- yaql:
- # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
- expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
- data: {get_attr: [ServiceChain, role_data]}
- upgrade_batch_tasks:
- yaql:
- # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
- expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
- data: {get_attr: [ServiceChain, role_data]}
- service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index 9a304edb..c707efb1 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -59,10 +59,10 @@ parameters:
type: string
SwiftCeilometerPipelineEnabled:
description: Set to False to disable the swift proxy ceilometer pipeline.
- default: True
+ default: false
type: boolean
SwiftCeilometerIgnoreProjects:
- default: ['services']
+ default: ['service']
description: Comma-seperated list of project names to ignore.
type: comma_delimited_list
RabbitClientPort:
@@ -81,7 +81,7 @@ parameters:
conditions:
- ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
+ ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, true]}
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
resources:
@@ -118,14 +118,20 @@ outputs:
swift::proxy::authtoken::project_name: 'service'
swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
- swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
- swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
- swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- swift::proxy::ceilometer::password: {get_param: SwiftPassword}
- swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
- swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
- swift::proxy::ceilometer::nonblocking_notify: true
+ -
+ if:
+ - ceilometer_pipeline_enabled
+ -
+ swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
+ swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
+ swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ swift::proxy::ceilometer::password: {get_param: SwiftPassword}
+ swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
+ swift::proxy::ceilometer::nonblocking_notify: true
+ swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ - {}
+ - swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: {get_param: RabbitClientUseSSL}
tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled}
@@ -168,7 +174,6 @@ outputs:
- ''
- 'proxy-logging'
- 'proxy-server'
- swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
swift::proxy::account_autocreate: true
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples