aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docker/compute-post.yaml1
-rw-r--r--environments/enable-tls.yaml6
-rw-r--r--environments/network-environment.yaml2
-rw-r--r--environments/neutron-midonet.yaml2
-rw-r--r--environments/neutron-nuage-config.yaml9
-rw-r--r--environments/neutron-opencontrail.yaml6
-rwxr-xr-xenvironments/neutron-plumgrid.yaml9
-rw-r--r--environments/puppet-pacemaker.yaml10
-rw-r--r--environments/storage-environment.yaml2
-rw-r--r--extraconfig/post_deploy/example_run_on_update.yaml39
-rw-r--r--net-config-static-bridge.yaml4
-rw-r--r--net-config-static.yaml82
-rw-r--r--network/endpoints/endpoint_data.yaml30
-rw-r--r--network/endpoints/endpoint_map.yaml240
-rw-r--r--network/ports/management_from_pool.yaml2
-rw-r--r--network/ports/net_ip_map.yaml41
-rw-r--r--network/ports/net_ip_subnet_map.yaml47
-rw-r--r--overcloud-resource-registry-puppet.yaml11
-rw-r--r--overcloud.yaml97
-rw-r--r--puppet/ceph-storage-post.yaml32
-rw-r--r--puppet/ceph-storage.yaml33
-rw-r--r--puppet/cinder-storage.yaml12
-rw-r--r--puppet/compute.yaml12
-rw-r--r--puppet/controller-post.yaml26
-rw-r--r--puppet/controller.yaml250
-rw-r--r--puppet/hieradata/compute.yaml2
-rw-r--r--puppet/hieradata/controller.yaml17
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp67
-rw-r--r--puppet/manifests/overcloud_compute.pp13
-rw-r--r--puppet/manifests/overcloud_controller.pp91
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp499
-rw-r--r--puppet/services/glance-api.yaml4
-rw-r--r--puppet/services/heat-api-cfn.yaml46
-rw-r--r--puppet/services/heat-api-cloudwatch.yaml33
-rw-r--r--puppet/services/heat-api.yaml46
-rw-r--r--puppet/services/heat-base.yaml40
-rw-r--r--puppet/services/heat-engine.yaml62
-rw-r--r--puppet/services/loadbalancer.yaml21
-rw-r--r--puppet/services/memcached.yaml23
-rw-r--r--puppet/services/neutron-base.yaml44
-rw-r--r--puppet/services/neutron-dhcp.yaml56
-rw-r--r--puppet/services/neutron-l3.yaml37
-rw-r--r--puppet/services/neutron-metadata.yaml45
-rw-r--r--puppet/services/pacemaker/glance-api.yaml2
-rw-r--r--puppet/services/pacemaker/glance-registry.yaml5
-rw-r--r--puppet/services/pacemaker/heat-api-cfn.yaml35
-rw-r--r--puppet/services/pacemaker/heat-api-cloudwatch.yaml35
-rw-r--r--puppet/services/pacemaker/heat-api.yaml33
-rw-r--r--puppet/services/pacemaker/heat-engine.yaml36
-rw-r--r--puppet/services/pacemaker/keystone.yaml4
-rw-r--r--puppet/services/pacemaker/loadbalancer.yaml34
-rw-r--r--puppet/services/pacemaker/memcached.yaml31
-rw-r--r--puppet/services/pacemaker/neutron-dhcp.yaml35
-rw-r--r--puppet/services/pacemaker/neutron-l3.yaml33
-rw-r--r--puppet/services/pacemaker/neutron-metadata.yaml33
-rw-r--r--puppet/services/pacemaker/rabbitmq.yaml32
-rw-r--r--puppet/services/rabbitmq.yaml42
-rw-r--r--puppet/swift-storage-post.yaml5
-rw-r--r--puppet/swift-storage.yaml12
-rw-r--r--validation-scripts/all-nodes.sh2
60 files changed, 1533 insertions, 1027 deletions
diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml
index 17000ac3..3c4a9413 100644
--- a/docker/compute-post.yaml
+++ b/docker/compute-post.yaml
@@ -200,6 +200,7 @@ resources:
volumes:
- /run:/run
- /var/lib/etc-data/json-config/ovsdb-server.json:/var/lib/kolla/config_files/config.json
+ - /etc/openvswitchd:/etc/openvswitchd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/environments/enable-tls.yaml b/environments/enable-tls.yaml
index e708688f..266bbc78 100644
--- a/environments/enable-tls.yaml
+++ b/environments/enable-tls.yaml
@@ -24,6 +24,9 @@ parameter_defaults:
HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
@@ -34,9 +37,6 @@ parameter_defaults:
NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
- NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
- NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
- NovaEC2Public: {protocol: 'https', port: '13773', host: 'CLOUDNAME'}
NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
diff --git a/environments/network-environment.yaml b/environments/network-environment.yaml
index 9f2c667b..062c7bee 100644
--- a/environments/network-environment.yaml
+++ b/environments/network-environment.yaml
@@ -47,4 +47,4 @@ parameter_defaults:
# Set to empty string to enable multiple external networks or VLANs
NeutronExternalNetworkBridge: "''"
# Customize bonding options, e.g. "mode=4 lacp_rate=1 updelay=1000 miimon=100"
- BondInterfaceOvsOptions: "mode=active-backup"
+ BondInterfaceOvsOptions: "bond_mode=active-backup"
diff --git a/environments/neutron-midonet.yaml b/environments/neutron-midonet.yaml
index 726852a0..463c1874 100644
--- a/environments/neutron-midonet.yaml
+++ b/environments/neutron-midonet.yaml
@@ -2,13 +2,13 @@
resource_registry:
OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
OS::TripleO::Controller::Net::SoftwareConfig: ../net-config-linux-bridge.yaml # We have to avoid any ovs bridge. MidoNet is incompatible with its datapath
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
parameter_defaults:
EnableZookeeperOnController: true
EnableCassandraOnController: true
NeutronCorePlugin: 'midonet.neutron.plugin_v1.MidonetPluginV2' # Overriding default core_plugin in Neutron. Don't touch it
NeutronEnableIsolatedMetadata: true # MidoNet 1.9 needs this one to work. Don't change it
- NeutronEnableL3Agent: false
NeutronEnableOVSAgent: false
# Other available options for MidoNet Services
diff --git a/environments/neutron-nuage-config.yaml b/environments/neutron-nuage-config.yaml
index 50ba8f53..0cd49a20 100644
--- a/environments/neutron-nuage-config.yaml
+++ b/environments/neutron-nuage-config.yaml
@@ -2,6 +2,8 @@
# a Neutron Nuage backend on the controller, configured via puppet
resource_registry:
OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
parameter_defaults:
NeutronNuageOSControllerIp: '0.0.0.0'
@@ -13,3 +15,10 @@ parameter_defaults:
NeutronNuageBaseURIVersion: 'default_uri_version'
NeutronNuageCMSId: ''
UseForwardedFor: true
+ NeutronCorePlugin: 'neutron.plugins.nuage.plugin.NuagePlugin'
+ NeutronEnableDHCPAgent: false
+ NeutronEnableOVSAgent: false
+ NeutronServicePlugins: []
+ NovaOVSBridge: 'alubr0'
+ controllerExtraConfig:
+ neutron::api_extensions_path: '/usr/lib/python2.7/site-packages/neutron/plugins/nuage/'
diff --git a/environments/neutron-opencontrail.yaml b/environments/neutron-opencontrail.yaml
index 4704dbc8..ceccd132 100644
--- a/environments/neutron-opencontrail.yaml
+++ b/environments/neutron-opencontrail.yaml
@@ -3,13 +3,13 @@
resource_registry:
OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml
OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
+ OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
parameter_defaults:
NeutronCorePlugin: neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2
NeutronServicePlugins: neutron_plugin_contrail.plugins.opencontrail.loadbalancer.plugin.LoadBalancerPlugin
- NeutronEnableDHCPAgent: false
- NeutronEnableL3Agent: false
- NeutronEnableMetadataAgent: false
NeutronEnableOVSAgent: false
NeutronEnableTunnelling: false
diff --git a/environments/neutron-plumgrid.yaml b/environments/neutron-plumgrid.yaml
index b8d66015..19f51cfc 100755
--- a/environments/neutron-plumgrid.yaml
+++ b/environments/neutron-plumgrid.yaml
@@ -2,6 +2,10 @@
# extensions, configured via puppet
resource_registry:
OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml
+ # PLUMgrid doesn't require dhcp, l3, and metadata agents
+ OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
parameter_defaults:
NeutronCorePlugin: networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2
@@ -22,8 +26,5 @@ parameter_defaults:
#PLUMgridNeutronPluginVersion: present
#PLUMgridPlumlibVersion: present
- # PLUMgrid doesn't require dhcp, l3, ovs and metadata agents
- NeutronEnableDHCPAgent: false
- NeutronEnableL3Agent: false
- NeutronEnableMetadataAgent: false
+ # PLUMgrid doesn't require the ovs agent
NeutronEnableOVSAgent: false
diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml
index 2e6e5ec9..679b79cb 100644
--- a/environments/puppet-pacemaker.yaml
+++ b/environments/puppet-pacemaker.yaml
@@ -12,3 +12,13 @@ resource_registry:
OS::TripleO::Services::Keystone: ../puppet/services/pacemaker/keystone.yaml
OS::TripleO::Services::GlanceApi: ../puppet/services/pacemaker/glance-api.yaml
OS::TripleO::Services::GlanceRegistry: ../puppet/services/pacemaker/glance-registry.yaml
+ OS::TripleO::Services::HeatApi: ../puppet/services/pacemaker/heat-api.yaml
+ OS::TripleO::Services::HeatApiCfn: ../puppet/services/pacemaker/heat-api-cfn.yaml
+ OS::TripleO::Services::HeatApiCloudwatch: ../puppet/services/pacemaker/heat-api-cloudwatch.yaml
+ OS::TripleO::Services::HeatEngine: ../puppet/services/pacemaker/heat-engine.yaml
+ OS::TripleO::Services::NeutronDhcpAgent: ../puppet/services/pacemaker/neutron-dhcp.yaml
+ OS::TripleO::Services::NeutronL3Agent: ../puppet/services/pacemaker/neutron-l3.yaml
+ OS::TripleO::Services::NeutronMetadataAgent: ../puppet/services/pacemaker/neutron-metadata.yaml
+ OS::TripleO::Services::RabbitMQ: ../puppet/services/pacemaker/rabbitmq.yaml
+ OS::TripleO::Services::Loadbalancer: ../puppet/services/pacemaker/loadbalancer.yaml
+ OS::TripleO::Services::Memcached: ../puppet/services/pacemaker/memcached.yaml
diff --git a/environments/storage-environment.yaml b/environments/storage-environment.yaml
index e1cafd2b..da33acfd 100644
--- a/environments/storage-environment.yaml
+++ b/environments/storage-environment.yaml
@@ -34,6 +34,8 @@ parameter_defaults:
## File system type of the mount
# GlanceFilePcmkFstype: nfs
## Pacemaker mount point, e.g. '192.168.122.1:/export/glance' for NFS
+ ## (If using IPv6, use both double- and single-quotes,
+ ## e.g. "'[fdd0::1]:/export/glance'")
# GlanceFilePcmkDevice: ''
## Options for the mount managed by Pacemaker
# GlanceFilePcmkOptions: ''
diff --git a/extraconfig/post_deploy/example_run_on_update.yaml b/extraconfig/post_deploy/example_run_on_update.yaml
new file mode 100644
index 00000000..234488af
--- /dev/null
+++ b/extraconfig/post_deploy/example_run_on_update.yaml
@@ -0,0 +1,39 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Example extra config for post-deployment, this re-runs every update
+
+# Note extra parameters can be defined, then passed data via the
+# environment parameter_defaults, without modifying the parent template
+parameters:
+ servers:
+ type: json
+ # This is provided via parameter_defaults from tripleoclient
+ # it changes to a new timestamp every update, so we can use it to
+ # trigger the deployment to run even though it and the config are
+ # otherwise unchanged
+ DeployIdentifier:
+ type: string
+
+resources:
+
+ ExtraConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: deploy_identifier
+ config: |
+ #!/bin/sh
+ echo "extra_update $deploy_identifier" >> /root/extra_update
+
+ ExtraDeployments:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ name: ExtraDeployments
+ servers: {get_param: servers}
+ config: {get_resource: ExtraConfig}
+ # Do this on CREATE/UPDATE (which is actually the default)
+ actions: ['CREATE', 'UPDATE']
+ input_values:
+ deploy_identifier: {get_param: DeployIdentifier}
diff --git a/net-config-static-bridge.yaml b/net-config-static-bridge.yaml
index 52c8f895..a3d6d8b5 100644
--- a/net-config-static-bridge.yaml
+++ b/net-config-static-bridge.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet:
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
diff --git a/net-config-static.yaml b/net-config-static.yaml
new file mode 100644
index 00000000..9de16cd8
--- /dev/null
+++ b/net-config-static.yaml
@@ -0,0 +1,82 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config for a simple bridge.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet:
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: interface
+ name: {get_input: interface_name}
+ use_dhcp: false
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
index f88a68f4..40b26f26 100644
--- a/network/endpoints/endpoint_data.yaml
+++ b/network/endpoints/endpoint_data.yaml
@@ -79,6 +79,21 @@ Heat:
'': /v1/%(tenant_id)s
port: 8004
+HeatCfn:
+ Internal:
+ vip_param: HeatApi
+ uri_suffixes:
+ '': /v1
+ Public:
+ vip_param: Public
+ uri_suffixes:
+ '': /v1
+ Admin:
+ vip_param: HeatApi
+ uri_suffixes:
+ '': /v1
+ port: 8000
+
Horizon:
Public:
vip_param: Public
@@ -134,21 +149,6 @@ Nova:
'': /v2.1/%(tenant_id)s
port: 8774
-NovaEC2:
- Internal:
- vip_param: NovaApi
- uri_suffixes:
- '': /services/Cloud
- Public:
- vip_param: Public
- uri_suffixes:
- '': /services/Cloud
- Admin:
- vip_param: NovaApi
- uri_suffixes:
- '': /services/Admin
- port: 8773
-
NovaVNCProxy:
Internal:
vip_param: NovaApi
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index 1bd35a7c..f4c17985 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -45,6 +45,9 @@ parameters:
HeatAdmin: {protocol: http, port: '8004', host: IP_ADDRESS}
HeatInternal: {protocol: http, port: '8004', host: IP_ADDRESS}
HeatPublic: {protocol: http, port: '8004', host: IP_ADDRESS}
+ HeatCfnAdmin: {protocol: http, port: '8000', host: IP_ADDRESS}
+ HeatCfnInternal: {protocol: http, port: '8000', host: IP_ADDRESS}
+ HeatCfnPublic: {protocol: http, port: '8000', host: IP_ADDRESS}
HorizonPublic: {protocol: http, port: '80', host: IP_ADDRESS}
KeystoneAdmin: {protocol: http, port: '35357', host: IP_ADDRESS}
KeystoneInternal: {protocol: http, port: '5000', host: IP_ADDRESS}
@@ -55,9 +58,6 @@ parameters:
NovaAdmin: {protocol: http, port: '8774', host: IP_ADDRESS}
NovaInternal: {protocol: http, port: '8774', host: IP_ADDRESS}
NovaPublic: {protocol: http, port: '8774', host: IP_ADDRESS}
- NovaEC2Admin: {protocol: http, port: '8773', host: IP_ADDRESS}
- NovaEC2Internal: {protocol: http, port: '8773', host: IP_ADDRESS}
- NovaEC2Public: {protocol: http, port: '8773', host: IP_ADDRESS}
NovaVNCProxyAdmin: {protocol: http, port: '6080', host: IP_ADDRESS}
NovaVNCProxyInternal: {protocol: http, port: '6080', host: IP_ADDRESS}
NovaVNCProxyPublic: {protocol: http, port: '6080', host: IP_ADDRESS}
@@ -919,6 +919,123 @@ outputs:
IP_ADDRESS: {get_param: PublicVirtualIP}
- ':'
- get_param: [EndpointMap, HeatPublic, port]
+ HeatCfnAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, HeatCfnAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ port:
+ get_param: [EndpointMap, HeatCfnAdmin, port]
+ protocol:
+ get_param: [EndpointMap, HeatCfnAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatCfnAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatCfnAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatCfnAdmin, port]
+ - /v1
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatCfnAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatCfnAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatCfnAdmin, port]
+ HeatCfnInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, HeatCfnInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ port:
+ get_param: [EndpointMap, HeatCfnInternal, port]
+ protocol:
+ get_param: [EndpointMap, HeatCfnInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatCfnInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatCfnInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatCfnInternal, port]
+ - /v1
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatCfnInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatCfnInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatCfnInternal, port]
+ HeatCfnPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, HeatCfnPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, HeatCfnPublic, port]
+ protocol:
+ get_param: [EndpointMap, HeatCfnPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatCfnPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatCfnPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatCfnPublic, port]
+ - /v1
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatCfnPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatCfnPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatCfnPublic, port]
HorizonPublic:
host:
str_replace:
@@ -1462,123 +1579,6 @@ outputs:
IP_ADDRESS: {get_param: PublicVirtualIP}
- ':'
- get_param: [EndpointMap, NovaPublic, port]
- NovaEC2Admin:
- host:
- str_replace:
- template:
- get_param: [EndpointMap, NovaEC2Admin, host]
- params:
- CLOUDNAME: {get_param: CloudName}
- IP_ADDRESS: {get_param: NovaApiVirtualIP}
- port:
- get_param: [EndpointMap, NovaEC2Admin, port]
- protocol:
- get_param: [EndpointMap, NovaEC2Admin, protocol]
- uri:
- list_join:
- - ''
- - - get_param: [EndpointMap, NovaEC2Admin, protocol]
- - ://
- - str_replace:
- template:
- get_param: [EndpointMap, NovaEC2Admin, host]
- params:
- CLOUDNAME: {get_param: CloudName}
- IP_ADDRESS: {get_param: NovaApiVirtualIP}
- - ':'
- - get_param: [EndpointMap, NovaEC2Admin, port]
- - /services/Admin
- uri_no_suffix:
- list_join:
- - ''
- - - get_param: [EndpointMap, NovaEC2Admin, protocol]
- - ://
- - str_replace:
- template:
- get_param: [EndpointMap, NovaEC2Admin, host]
- params:
- CLOUDNAME: {get_param: CloudName}
- IP_ADDRESS: {get_param: NovaApiVirtualIP}
- - ':'
- - get_param: [EndpointMap, NovaEC2Admin, port]
- NovaEC2Internal:
- host:
- str_replace:
- template:
- get_param: [EndpointMap, NovaEC2Internal, host]
- params:
- CLOUDNAME: {get_param: CloudName}
- IP_ADDRESS: {get_param: NovaApiVirtualIP}
- port:
- get_param: [EndpointMap, NovaEC2Internal, port]
- protocol:
- get_param: [EndpointMap, NovaEC2Internal, protocol]
- uri:
- list_join:
- - ''
- - - get_param: [EndpointMap, NovaEC2Internal, protocol]
- - ://
- - str_replace:
- template:
- get_param: [EndpointMap, NovaEC2Internal, host]
- params:
- CLOUDNAME: {get_param: CloudName}
- IP_ADDRESS: {get_param: NovaApiVirtualIP}
- - ':'
- - get_param: [EndpointMap, NovaEC2Internal, port]
- - /services/Cloud
- uri_no_suffix:
- list_join:
- - ''
- - - get_param: [EndpointMap, NovaEC2Internal, protocol]
- - ://
- - str_replace:
- template:
- get_param: [EndpointMap, NovaEC2Internal, host]
- params:
- CLOUDNAME: {get_param: CloudName}
- IP_ADDRESS: {get_param: NovaApiVirtualIP}
- - ':'
- - get_param: [EndpointMap, NovaEC2Internal, port]
- NovaEC2Public:
- host:
- str_replace:
- template:
- get_param: [EndpointMap, NovaEC2Public, host]
- params:
- CLOUDNAME: {get_param: CloudName}
- IP_ADDRESS: {get_param: PublicVirtualIP}
- port:
- get_param: [EndpointMap, NovaEC2Public, port]
- protocol:
- get_param: [EndpointMap, NovaEC2Public, protocol]
- uri:
- list_join:
- - ''
- - - get_param: [EndpointMap, NovaEC2Public, protocol]
- - ://
- - str_replace:
- template:
- get_param: [EndpointMap, NovaEC2Public, host]
- params:
- CLOUDNAME: {get_param: CloudName}
- IP_ADDRESS: {get_param: PublicVirtualIP}
- - ':'
- - get_param: [EndpointMap, NovaEC2Public, port]
- - /services/Cloud
- uri_no_suffix:
- list_join:
- - ''
- - - get_param: [EndpointMap, NovaEC2Public, protocol]
- - ://
- - str_replace:
- template:
- get_param: [EndpointMap, NovaEC2Public, host]
- params:
- CLOUDNAME: {get_param: CloudName}
- IP_ADDRESS: {get_param: PublicVirtualIP}
- - ':'
- - get_param: [EndpointMap, NovaEC2Public, port]
NovaVNCProxyAdmin:
host:
str_replace:
diff --git a/network/ports/management_from_pool.yaml b/network/ports/management_from_pool.yaml
index fc87e39a..451677b2 100644
--- a/network/ports/management_from_pool.yaml
+++ b/network/ports/management_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml
index 6bb4557b..78c7c32c 100644
--- a/network/ports/net_ip_map.yaml
+++ b/network/ports/net_ip_map.yaml
@@ -4,9 +4,16 @@ parameters:
ControlPlaneIp:
default: ''
type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
ExternalIp:
default: ''
type: string
+ ExternalIpSubnet:
+ default: ''
+ type: string
ExternalIpUri:
default: ''
type: string
@@ -14,6 +21,9 @@ parameters:
InternalApiIp:
default: ''
type: string
+ InternalApiIpSubnet:
+ default: ''
+ type: string
InternalApiIpUri:
default: ''
type: string
@@ -21,6 +31,9 @@ parameters:
StorageIp:
default: ''
type: string
+ StorageIpSubnet:
+ default: ''
+ type: string
StorageIpUri:
default: ''
type: string
@@ -28,6 +41,9 @@ parameters:
StorageMgmtIp:
default: ''
type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ type: string
StorageMgmtIpUri:
default: ''
type: string
@@ -35,6 +51,9 @@ parameters:
TenantIp:
default: ''
type: string
+ TenantIpSubnet:
+ default: ''
+ type: string
TenantIpUri:
default: ''
type: string
@@ -42,6 +61,9 @@ parameters:
ManagementIp:
default: ''
type: string
+ ManagementIpSubnet:
+ default: ''
+ type: string
ManagementIpUri:
default: ''
type: string
@@ -60,9 +82,26 @@ outputs:
storage_mgmt: {get_param: StorageMgmtIp}
tenant: {get_param: TenantIp}
management: {get_param: ManagementIp}
+ net_ip_subnet_map:
+ description: >
+ A Hash containing a mapping of network names to assigned IPs/CIDR
+ for a specific machine.
+ value:
+ ctlplane:
+ list_join:
+ - ''
+ - - {get_param: ControlPlaneIp}
+ - '/'
+ - {get_param: ControlPlaneSubnetCidr}
+ external: {get_param: ExternalIpSubnet}
+ internal_api: {get_param: InternalApiIpSubnet}
+ storage: {get_param: StorageIpSubnet}
+ storage_mgmt: {get_param: StorageMgmtIpSubnet}
+ tenant: {get_param: TenantIpSubnet}
+ management: {get_param: ManagementIpSubnet}
net_ip_uri_map:
description: >
- A Hash containing a mapping of netowrk names to assigned IPs for a
+ A Hash containing a mapping of network names to assigned IPs for a
specific machine with brackets around IPv6 addresses for use in URLs.
value:
ctlplane: {get_param: ControlPlaneIp}
diff --git a/network/ports/net_ip_subnet_map.yaml b/network/ports/net_ip_subnet_map.yaml
deleted file mode 100644
index 2f933eaa..00000000
--- a/network/ports/net_ip_subnet_map.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-heat_template_version: 2015-04-30
-
-parameters:
- ControlPlaneIp:
- default: ''
- type: string
- ExternalIpSubnet:
- default: ''
- type: string
- InternalApiIpSubnet:
- default: ''
- type: string
- StorageIpSubnet:
- default: ''
- type: string
- StorageMgmtIpSubnet:
- default: ''
- type: string
- TenantIpSubnet:
- default: ''
- type: string
- ManagementIpSubnet:
- default: ''
- type: string
- ControlPlaneSubnetCidr: # Override this via parameter_defaults
- default: '24'
- description: The subnet CIDR of the control plane network.
- type: string
-
-outputs:
- net_ip_subnet_map:
- description: >
- A Hash containing a mapping of network names to assigned
- IP/subnet mappings.
- value:
- ctlplane:
- list_join:
- - ''
- - - {get_param: ControlPlaneIp}
- - '/'
- - {get_param: ControlPlaneSubnetCidr}
- external: {get_param: ExternalIpSubnet}
- internal_api: {get_param: InternalApiIpSubnet}
- storage: {get_param: StorageIpSubnet}
- storage_mgmt: {get_param: StorageMgmtIpSubnet}
- tenant: {get_param: TenantIpSubnet}
- management: {get_param: ManagementIpSubnet}
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
index 4f79e516..b207a1fe 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.yaml
@@ -65,7 +65,6 @@ resource_registry:
OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml
OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml
- OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml
OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml
# Port assignments for the VIPs
@@ -126,6 +125,16 @@ resource_registry:
OS::TripleO::Services::Keystone: puppet/services/keystone.yaml
OS::TripleO::Services::GlanceApi: puppet/services/glance-api.yaml
OS::TripleO::Services::GlanceRegistry: puppet/services/glance-registry.yaml
+ OS::TripleO::Services::HeatApi: puppet/services/heat-api.yaml
+ OS::TripleO::Services::HeatApiCfn: puppet/services/heat-api-cfn.yaml
+ OS::TripleO::Services::HeatApiCloudwatch: puppet/services/heat-api-cloudwatch.yaml
+ OS::TripleO::Services::HeatEngine: puppet/services/heat-engine.yaml
+ OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml
+ OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
+ OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
+ OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
+ OS::TripleO::Services::Loadbalancer: puppet/services/loadbalancer.yaml
+ OS::TripleO::Services::Memcached: puppet/services/memcached.yaml
parameter_defaults:
EnablePackageInstall: false
diff --git a/overcloud.yaml b/overcloud.yaml
index 2c34ad1c..6a8081af 100644
--- a/overcloud.yaml
+++ b/overcloud.yaml
@@ -118,10 +118,6 @@ parameters:
default: false
description: Enable IPv6 features in Memcached.
type: boolean
- NeutronExternalNetworkBridge:
- description: Name of bridge used for external network traffic.
- type: string
- default: 'br-ex'
NeutronBridgeMappings:
description: >
The OVS logical->physical bridge mappings to use. See the Neutron
@@ -136,10 +132,6 @@ parameters:
default: 'ctlplane'
type: string
description: Neutron ID or name for ctlplane network.
- NeutronEnableIsolatedMetadata:
- default: 'False'
- description: If True, DHCP provide metadata route to VM.
- type: string
NeutronEnableTunnelling:
type: string
default: "True"
@@ -258,10 +250,6 @@ parameters:
default: 'False'
description: Whether to enable l3-agent HA
type: string
- NeutronDhcpAgentsPerNetwork:
- type: number
- default: 1
- description: The number of neutron dhcp agents to schedule per network
NovaIPv6:
default: false
description: Enable IPv6 features in Nova
@@ -310,15 +298,6 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
- # We need to set this as string because 'unlimited' is a valid setting
- RabbitFDLimit:
- default: 16384
- description: Configures RabbitMQ FD limit
- type: string
- RabbitIPv6:
- default: false
- description: Enable IPv6 in RabbitMQ
- type: boolean
RedisPassword:
description: The password for Redis
type: string
@@ -484,14 +463,6 @@ parameters:
description: The password for the gnocchi service account.
type: string
hidden: true
- HeatPassword:
- description: The password for the Heat service account, used by the Heat services.
- type: string
- hidden: true
- HeatStackDomainAdminPassword:
- description: Password for heat_stack_domain_admin user.
- type: string
- hidden: true
InstanceNameTemplate:
default: 'instance-%08x'
description: Template string to be used to generate instance names
@@ -515,13 +486,6 @@ parameters:
description: Configures MySQL max_connections config setting
type: number
default: 4096
- NeutronDnsmasqOptions:
- default: 'dhcp-option-force=26,%MTU%'
- description: >
- Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU
- to be set to the value of NeutronTenantMtu, which should be set to account
- for tunnel overhead.
- type: string
NeutronPublicInterfaceDefaultRoute:
default: ''
description: A custom default route for the NeutronPublicInterface.
@@ -682,6 +646,16 @@ parameters:
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceRegistry
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::Loadbalancer
+ - OS::TripleO::Services::Memcached
description: A list of service resources (configured in the Heat
resource_registry) which represent nested stacks
for each service that should get installed on the Controllers.
@@ -759,7 +733,12 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
-
+ CephStorageServices:
+ default: []
+ description: A list of service resources (configured in the Heat
+ resource_registry) which represent nested stacks
+ for each service that should get installed on the CephStorage nodes.
+ type: comma_delimited_list
# Hostname format for each role
# Note %index% is translated into the index of the node, e.g 0/1/2 etc
@@ -924,8 +903,6 @@ resources:
GnocchiBackend: {get_param: GnocchiBackend}
GnocchiIndexerBackend: {get_param: GnocchiIndexerBackend}
HAProxySyslogAddress: {get_param: HAProxySyslogAddress}
- HeatPassword: {get_param: HeatPassword}
- HeatStackDomainAdminPassword: {get_param: HeatStackDomainAdminPassword}
HeatAuthEncryptionKey: {get_resource: HeatAuthEncryptionKey}
HorizonAllowedHosts: {get_param: HorizonAllowedHosts}
HorizonSecret: {get_resource: HorizonSecret}
@@ -942,8 +919,6 @@ resources:
NeutronFlatNetworks: {get_param: NeutronFlatNetworks}
NeutronBridgeMappings: {get_param: NeutronBridgeMappings}
NeutronTenantMtu: {get_param: NeutronTenantMtu}
- NeutronExternalNetworkBridge: {get_param: NeutronExternalNetworkBridge}
- NeutronEnableIsolatedMetadata: {get_param: NeutronEnableIsolatedMetadata}
NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling}
NeutronEnableL2Pop: {get_param: NeutronEnableL2Pop}
NeutronNetworkVLANRanges: {get_param: NeutronNetworkVLANRanges}
@@ -951,11 +926,6 @@ resources:
NeutronPublicInterfaceDefaultRoute: {get_param: NeutronPublicInterfaceDefaultRoute}
NeutronPublicInterfaceRawDevice: {get_param: NeutronPublicInterfaceRawDevice}
NeutronPassword: {get_param: NeutronPassword}
- NeutronDnsmasqOptions:
- str_replace:
- template: {get_param: NeutronDnsmasqOptions}
- params:
- '%MTU%': {get_param: NeutronTenantMtu}
NeutronDVR: {get_param: NeutronDVR}
NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret}
NeutronAgentMode: {get_param: NeutronAgentMode}
@@ -967,7 +937,6 @@ resources:
NeutronAgentExtensions: {get_param: NeutronAgentExtensions}
NeutronAllowL3AgentFailover: {get_param: NeutronAllowL3AgentFailover}
NeutronL3HA: {get_param: NeutronL3HA}
- NeutronDhcpAgentsPerNetwork: {get_param: NeutronDhcpAgentsPerNetwork}
NeutronNetworkType: {get_param: NeutronNetworkType}
NeutronTunnelTypes: {get_param: NeutronTunnelTypes}
NovaIPv6: {get_param: NovaIPv6}
@@ -982,8 +951,6 @@ resources:
RabbitCookie: {get_attr: [RabbitCookie, value]}
RabbitClientUseSSL: {get_param: RabbitClientUseSSL}
RabbitClientPort: {get_param: RabbitClientPort}
- RabbitFDLimit: {get_param: RabbitFDLimit}
- RabbitIPv6: {get_param: RabbitIPv6}
RedisPassword: {get_param: RedisPassword}
SaharaPassword: {get_param: SaharaPassword}
SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName}
@@ -1181,6 +1148,13 @@ resources:
SchedulerHints: {get_param: ObjectStorageSchedulerHints}
NodeIndex: '%index%'
+ CephStorageServiceChain:
+ type: OS::TripleO::Services
+ properties:
+ Services: {get_param: CephStorageServices}
+ EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ MysqlVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
+
CephStorage:
type: OS::Heat::ResourceGroup
depends_on: Networks
@@ -1208,6 +1182,7 @@ resources:
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: CephStorageSchedulerHints}
NodeIndex: '%index%'
+ ServiceConfigSettings: {get_attr: [CephStorageServiceChain, config_settings]}
ControllerIpListMap:
type: OS::TripleO::Network::Ports::NetIpListMap
@@ -1443,29 +1418,6 @@ resources:
ceph_mon_names: {get_attr: [Controller, hostname]}
ceph_mon_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]}
- ControllerClusterConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- corosync:
- nodes: {get_attr: [Controller, corosync_node]}
- horizon:
- caches:
- memcached:
- nodes: {get_attr: [Controller, hostname]}
- mysql:
- nodes: {get_attr: [Controller, corosync_node]}
- haproxy:
- nodes: {get_attr: [Controller, corosync_node]}
-
- ControllerClusterDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- name: ControllerClusterDeployment
- config: {get_resource: ControllerClusterConfig}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
-
ControllerAllNodesDeployment:
type: OS::Heat::StructuredDeployments
properties:
@@ -1635,6 +1587,7 @@ resources:
allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
cephstorage_config: {get_attr: [CephStorage, attributes, config_identifier]}
deployment_identifier: {get_param: DeployIdentifier}
+ StepConfig: {get_attr: [CephStorageServiceChain, step_config]}
outputs:
KeystoneURL:
diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml
index e90710c7..2b9ae751 100644
--- a/puppet/ceph-storage-post.yaml
+++ b/puppet/ceph-storage-post.yaml
@@ -13,6 +13,10 @@ parameters:
NodeConfigIdentifiers:
type: json
description: Value which changes if the node configuration may need to be re-applied
+ StepConfig:
+ type: string
+ description: Config manifests that will be used to step through the deployment.
+ default: ''
resources:
@@ -33,26 +37,44 @@ resources:
group: puppet
options:
enable_debug: {get_param: ConfigDebug}
+ enable_hiera: True
+ enable_facter: False
+ inputs:
+ - name: step
outputs:
- name: result
config:
- get_file: manifests/overcloud_cephstorage.pp
+ list_join:
+ - ''
+ - - get_file: manifests/overcloud_cephstorage.pp
+ - {get_param: StepConfig}
- CephStorageDeployment_Step1:
+ CephStorageDeployment_Step2:
type: OS::Heat::StructuredDeployments
depends_on: CephStorageArtifactsDeploy
properties:
- name: CephStorageDeployment_Step1
+ name: CephStorageDeployment_Step2
servers: {get_param: servers}
config: {get_resource: CephStoragePuppetConfig}
input_values:
+ step: 2
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
+ CephStorageDeployment_Step3:
+ type: OS::Heat::StructuredDeployments
+ depends_on: CephStorageDeployment_Step2
+ properties:
+ name: CephStorageDeployment_Step3
+ servers: {get_param: servers}
+ config: {get_resource: CephStoragePuppetConfig}
+ input_values:
+ step: 3
update_identifier: {get_param: NodeConfigIdentifiers}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
ExtraConfig:
- depends_on: CephStorageDeployment_Step1
+ depends_on: CephStorageDeployment_Step3
type: OS::TripleO::NodeExtraConfigPost
properties:
servers: {get_param: servers}
-
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index f0eb71e4..eedb35e4 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -96,6 +96,9 @@ parameters:
NodeIndex:
type: number
default: 0
+ ServiceConfigSettings:
+ type: json
+ default: {}
resources:
CephStorage:
@@ -195,28 +198,23 @@ resources:
properties:
ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
ManagementIp: {get_attr: [ManagementPort, ip_address]}
- ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
-
- NetIpSubnetMap:
- type: OS::TripleO::Network::Ports::NetIpSubnetMap
- properties:
- ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
- ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
- InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
- StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
- StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
- TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
+ ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -238,8 +236,8 @@ resources:
timezone: {get_param: TimeZone}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
- ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]}
- ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]}
+ ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]}
+ ceph_public_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]}
CephStorageConfig:
type: OS::Heat::StructuredConfig
@@ -252,14 +250,23 @@ resources:
- heat_config_%{::deploy_config_name}
- ceph_extraconfig
- extraconfig
+ - service_configs
- ceph_cluster # provided by CephClusterConfig
- ceph
- '"%{::osfamily}"'
- common
+ - network
merge_behavior: deeper
datafiles:
+ service_configs:
+ mapped_data: {get_param: ServiceConfigSettings}
common:
raw_data: {get_file: hieradata/common.yaml}
+ network:
+ mapped_data:
+ net_ip_map: {get_attr: [NetIpMap, net_ip_map]}
+ net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]}
+ net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]}
ceph_extraconfig:
mapped_data: {get_param: CephStorageExtraConfig}
extraconfig:
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index c1a04e24..13914878 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -246,16 +246,22 @@ resources:
properties:
ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]}
ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
ManagementIp: {get_attr: [ManagementPort, ip_address]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
NetworkDeployment:
@@ -316,10 +322,16 @@ resources:
- all_nodes # provided by allNodesConfig
- '"%{::osfamily}"'
- common
+ - network
merge_behavior: deeper
datafiles:
common:
raw_data: {get_file: hieradata/common.yaml}
+ network:
+ mapped_data:
+ net_ip_map: {get_attr: [NetIpMap, net_ip_map]}
+ net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]}
+ net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]}
volume_extraconfig:
mapped_data: {get_param: BlockStorageExtraConfig}
extraconfig:
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index 4c18067a..e56deefd 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -430,16 +430,22 @@ resources:
properties:
ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]}
ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
ManagementIp: {get_attr: [ManagementPort, ip_address]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
NetworkConfig:
@@ -481,6 +487,7 @@ resources:
- all_nodes # provided by allNodesConfig
- '"%{::osfamily}"'
- common
+ - network
- neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre
- cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre
- nova_nuage_data # Optionally provided by ComputeExtraConfigPre
@@ -494,6 +501,11 @@ resources:
mapped_data: {get_param: ExtraConfig}
common:
raw_data: {get_file: hieradata/common.yaml}
+ network:
+ mapped_data:
+ net_ip_map: {get_attr: [NetIpMap, net_ip_map]}
+ net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]}
+ net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]}
ceph:
raw_data: {get_file: hieradata/ceph.yaml}
compute:
diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml
index 80b08a06..36f9b4f8 100644
--- a/puppet/controller-post.yaml
+++ b/puppet/controller-post.yaml
@@ -55,7 +55,6 @@ resources:
input_values:
step: 1
update_identifier: {get_param: NodeConfigIdentifiers}
- actions: ['CREATE'] # no need for two passes on an UPDATE
ControllerServicesBaseDeployment_Step2:
type: OS::Heat::StructuredDeployments
@@ -67,7 +66,6 @@ resources:
input_values:
step: 2
update_identifier: {get_param: NodeConfigIdentifiers}
- actions: ['CREATE'] # no need for two passes on an UPDATE
ControllerOvercloudServicesDeployment_Step3:
type: OS::Heat::StructuredDeployments
@@ -102,31 +100,9 @@ resources:
step: 5
update_identifier: {get_param: NodeConfigIdentifiers}
- ControllerOvercloudServicesDeployment_Step6:
- type: OS::Heat::StructuredDeployments
- depends_on: ControllerOvercloudServicesDeployment_Step5
- properties:
- name: ControllerOvercloudServicesDeployment_Step6
- servers: {get_param: servers}
- config: {get_resource: ControllerPuppetConfig}
- input_values:
- step: 6
- update_identifier: {get_param: NodeConfigIdentifiers}
-
- ControllerOvercloudServicesDeployment_Step7:
- type: OS::Heat::StructuredDeployments
- depends_on: ControllerOvercloudServicesDeployment_Step6
- properties:
- name: ControllerOvercloudServicesDeployment_Step7
- servers: {get_param: servers}
- config: {get_resource: ControllerPuppetConfig}
- input_values:
- step: 7
- update_identifier: {get_param: NodeConfigIdentifiers}
-
ControllerPostPuppet:
type: OS::TripleO::Tasks::ControllerPostPuppet
- depends_on: ControllerOvercloudServicesDeployment_Step7
+ depends_on: ControllerOvercloudServicesDeployment_Step5
properties:
servers: {get_param: servers}
input_values:
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index bf196d24..3aa0df14 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -220,14 +220,6 @@ parameters:
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
- HeatPassword:
- description: The password for the Heat service and db account, used by the Heat services.
- type: string
- hidden: true
- HeatStackDomainAdminPassword:
- description: Password for heat_stack_domain_admin user.
- type: string
- hidden: true
HeatAuthEncryptionKey:
description: Auth encryption key for heat-engine
type: string
@@ -236,15 +228,6 @@ parameters:
default: '*'
description: A list of IP/Hostname allowed to connect to horizon
type: comma_delimited_list
- HeatWorkers:
- default: 0
- description: Number of workers for Heat service.
- type: number
- HeatEnableDBPurge:
- type: boolean
- default: true
- description: |
- Whether to create cron job for purging soft deleted rows in the Heat database.
HorizonSecret:
description: Secret key for Django
type: string
@@ -310,14 +293,13 @@ parameters:
description: Configures MySQL max_connections config setting
type: number
default: 4096
+ MysqlClustercheckPassword:
+ type: string
+ hidden: true
MysqlRootPassword:
type: string
hidden: true
default: '' # Has to be here because of the ignored empty value bug
- NeutronExternalNetworkBridge:
- description: Name of bridge used for external network traffic.
- type: string
- default: 'br-ex'
NeutronBridgeMappings:
description: >
The OVS logical->physical bridge mappings to use. See the Neutron
@@ -328,22 +310,6 @@ parameters:
scripts or be sure to keep 'datacentre' as a mapping network name.
type: comma_delimited_list
default: "datacentre:br-ex"
- NeutronDnsmasqOptions:
- default: 'dhcp-option-force=26,1400'
- description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the gre tunnel overhead.
- type: string
- NeutronEnableDHCPAgent:
- description: Knob to enable/disable DHCP Agent
- type: boolean
- default: true
- NeutronEnableL3Agent:
- description: Knob to enable/disable L3 agent
- type: boolean
- default: true
- NeutronEnableMetadataAgent:
- description: Knob to enable/disable Metadata agent
- type: boolean
- default: true
NeutronEnableOVSAgent:
description: Knob to enable/disable OVS Agent
type: boolean
@@ -356,10 +322,6 @@ parameters:
default: 'False'
description: Whether to enable l3-agent HA
type: string
- NeutronDhcpAgentsPerNetwork:
- type: number
- default: 3
- description: The number of neutron dhcp agents to schedule per network
NeutronDVR:
default: 'False'
description: Whether to configure Neutron Distributed Virtual Routers
@@ -394,10 +356,6 @@ parameters:
default: 'True'
description: Allow automatic l3-agent failover
type: string
- NeutronEnableIsolatedMetadata:
- default: 'False'
- description: If True, DHCP provide metadata route to VM.
- type: string
NeutronEnableTunnelling:
type: string
default: "True"
@@ -562,14 +520,6 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
- RabbitFDLimit:
- default: 16384
- description: Configures RabbitMQ FD limit
- type: string
- RabbitIPv6:
- default: false
- description: Enable IPv6 in RabbitMQ
- type: boolean
RedisPassword:
type: string
description: The password to access the Redis service
@@ -808,28 +758,23 @@ resources:
properties:
ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]}
ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
ManagementIp: {get_attr: [ManagementPort, ip_address]}
- ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
-
- NetIpSubnetMap:
- type: OS::TripleO::Network::Ports::NetIpSubnetMap
- properties:
- ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]}
- ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
- InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
- StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
- StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
- TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
+ ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
NetworkConfig:
type: OS::TripleO::Controller::Net::SoftwareConfig
@@ -880,36 +825,15 @@ resources:
bootstack_nodeid: {get_attr: [Controller, name]}
ceilometer_workers: {get_param: CeilometerWorkers}
cinder_workers: {get_param: CinderWorkers}
- heat_workers: {get_param: HeatWorkers}
nova_workers: {get_param: NovaWorkers}
neutron_workers: {get_param: NeutronWorkers}
swift_workers: {get_param: SwiftWorkers}
neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
- neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
haproxy_log_address: {get_param: HAProxySyslogAddress}
haproxy_stats_password: {get_param: HAProxyStatsPassword}
haproxy_stats_user: {get_param: HAProxyStatsUser}
- heat.watch_server_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: HeatApiVirtualIPUri}
- - ':8003'
- heat.metadata_server_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: HeatApiVirtualIPUri}
- - ':8000'
- heat.waitcondition_server_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: HeatApiVirtualIPUri}
- - ':8000/v1/waitcondition'
heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey}
- heat_enable_db_purge: {get_param: HeatEnableDBPurge}
horizon_allowed_hosts: {get_param: HorizonAllowedHosts}
horizon_secret: {get_param: HorizonSecret}
admin_password: {get_param: AdminPassword}
@@ -937,16 +861,12 @@ resources:
- '@'
- {get_param: MysqlVirtualIPUri}
- '/cinder'
- heat_password: {get_param: HeatPassword}
- heat_stack_domain_admin_password: {get_param: HeatStackDomainAdminPassword}
- heat_dsn:
- list_join:
- - ''
- - - 'mysql+pymysql://heat:'
- - {get_param: HeatPassword}
- - '@'
- - {get_param: MysqlVirtualIPUri}
- - '/heat'
+ cinder_public_url: {get_param: [EndpointMap, CinderPublic, uri]}
+ cinder_internal_url: {get_param: [EndpointMap, CinderInternal, uri]}
+ cinder_admin_url: {get_param: [EndpointMap, CinderAdmin, uri]}
+ cinder_public_url_v2: {get_param: [EndpointMap, CinderV2Public, uri]}
+ cinder_internal_url_v2: {get_param: [EndpointMap, CinderV2Internal, uri]}
+ cinder_admin_url_v2: {get_param: [EndpointMap, CinderV2Admin, uri]}
keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
keystone_ec2_uri: { get_param: [EndpointMap, KeystoneEC2, uri] }
@@ -960,6 +880,7 @@ resources:
mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize}
mysql_max_connections: {get_param: MysqlMaxConnections}
mysql_root_password: {get_param: MysqlRootPassword}
+ mysql_clustercheck_password: {get_param: MysqlClustercheckPassword}
mysql_cluster_name:
str_replace:
template: tripleo-CLUSTER
@@ -984,9 +905,6 @@ resources:
template: DRIVERS
params:
DRIVERS: {get_param: NeutronTypeDrivers}
- neutron_enable_dhcp_agent: {get_param: NeutronEnableDHCPAgent}
- neutron_enable_l3_agent: {get_param: NeutronEnableL3Agent}
- neutron_enable_metadata_agent: {get_param: NeutronEnableMetadataAgent}
neutron_enable_ovs_agent: {get_param: NeutronEnableOVSAgent}
neutron_mechanism_drivers:
str_replace:
@@ -995,7 +913,6 @@ resources:
MECHANISMS: {get_param: NeutronMechanismDrivers}
neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron_l3_ha: {get_param: NeutronL3HA}
- neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
neutron_network_vlan_ranges:
str_replace:
template: RANGES
@@ -1006,7 +923,6 @@ resources:
template: MAPPINGS
params:
MAPPINGS: {get_param: NeutronBridgeMappings}
- neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge}
neutron_public_interface: {get_param: NeutronPublicInterface}
neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute}
@@ -1043,7 +959,6 @@ resources:
AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions}
neutron_password: {get_param: NeutronPassword}
neutron_tenant_mtu: {get_param: NeutronTenantMtu}
- neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions}
neutron_dsn:
list_join:
- ''
@@ -1062,6 +977,9 @@ resources:
ceilometer_password: {get_param: CeilometerPassword}
ceilometer_store_events: {get_param: CeilometerStoreEvents}
aodh_password: {get_param: AodhPassword}
+ aodh_internal_url: { get_param: [ EndpointMap, AodhInternal, uri ] }
+ aodh_public_url: { get_param: [ EndpointMap, AodhPublic, uri ] }
+ aodh_admin_url: { get_param: [ EndpointMap, AodhAdmin, uri ] }
ceilometer_meter_dispatcher: {get_param: CeilometerMeterDispatcher}
gnocchi_password: {get_param: GnocchiPassword}
gnocchi_backend: {get_param: GnocchiBackend}
@@ -1091,6 +1009,11 @@ resources:
- {get_param: MysqlVirtualIPUri}
- '/gnocchi'
gnocchi_internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]}
+ gnocchi_public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] }
+ gnocchi_admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] }
+ ceilometer_public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
+ ceilometer_internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
+ ceilometer_admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
@@ -1116,6 +1039,9 @@ resources:
- '/nova_api'
upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute}
instance_name_template: {get_param: InstanceNameTemplate}
+ nova_public_url: {get_param: [EndpointMap, NovaPublic, uri]}
+ nova_internal_url: {get_param: [EndpointMap, NovaInternal, uri]}
+ nova_admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
fencing_config: {get_param: FencingConfig}
pcsd_password: {get_param: PcsdPassword}
rabbit_username: {get_param: RabbitUserName}
@@ -1123,8 +1049,6 @@ resources:
rabbit_cookie: {get_param: RabbitCookie}
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
rabbit_client_port: {get_param: RabbitClientPort}
- rabbit_ipv6: {get_param: RabbitIPv6}
- rabbit_fd_limit: {get_param: RabbitFDLimit}
mongodb_no_journal: {get_param: MongoDbNoJournal}
mongodb_ipv6: {get_param: MongoDbIPv6}
ntp_servers: {get_param: NtpServer}
@@ -1138,9 +1062,18 @@ resources:
swift_replicas: {get_param: SwiftReplicas}
swift_min_part_hours: {get_param: SwiftMinPartHours}
swift_mount_check: {get_param: SwiftMountCheck}
+ swift_public_url: {get_param: [EndpointMap, SwiftPublic, uri]}
+ swift_internal_url: {get_param: [EndpointMap, SwiftInternal, uri]}
+ swift_admin_url: {get_param: [EndpointMap, SwiftAdmin, uri]}
+ swift_public_url_s3: {get_param: [EndpointMap, SwiftS3Public, uri]}
+ swift_internal_url_s3: {get_param: [EndpointMap, SwiftS3Internal, uri]}
+ swift_admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
sahara_password: {get_param: SaharaPassword}
+ sahara_public_url: {get_param: [EndpointMap, SaharaPublic, uri]}
+ sahara_internal_url: {get_param: [EndpointMap, SaharaInternal, uri]}
+ sahara_admin_url: {get_param: [EndpointMap, SaharaAdmin, uri]}
sahara_dsn:
list_join:
- ''
@@ -1177,7 +1110,7 @@ resources:
str_replace:
template: "['SUBNET']"
params:
- SUBNET: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
+ SUBNET: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
rabbitmq_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
redis_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]}
redis_password: {get_param: RedisPassword}
@@ -1186,8 +1119,8 @@ resources:
memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
mysql_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
mysql_virtual_ip: {get_param: MysqlVirtualIP}
- ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]}
- ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]}
+ ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]}
+ ceph_public_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]}
ceph_public_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]}
# Map heat metadata into hiera datafiles
@@ -1214,6 +1147,7 @@ resources:
- vip_data # provided by vip-config
- '"%{::osfamily}"'
- common
+ - network
- cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre
- cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
- cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre
@@ -1237,6 +1171,11 @@ resources:
mapped_data: {get_param: ExtraConfig}
common:
raw_data: {get_file: hieradata/common.yaml}
+ network:
+ mapped_data:
+ net_ip_map: {get_attr: [NetIpMap, net_ip_map]}
+ net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]}
+ net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]}
ceph:
raw_data: {get_file: hieradata/ceph.yaml}
mapped_data:
@@ -1272,6 +1211,14 @@ resources:
tripleo::ringbuilder::replicas: {get_input: swift_replicas}
tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours}
swift_mount_check: {get_input: swift_mount_check}
+ swift::keystone::auth::public_url: {get_input: swift_public_url }
+ swift::keystone::auth::internal_url: {get_input: swift_internal_url }
+ swift::keystone::auth::admin_url: {get_input: swift_admin_url }
+ swift::keystone::auth::public_url_s3: {get_input: swift_public_url_v3 }
+ swift::keystone::auth::internal_url_s3: {get_input: swift_internal_url_v3 }
+ swift::keystone::auth::admin_url_s3: {get_input: swift_admin_url_v3 }
+ swift::keystone::auth::password: {get_input: swift_password }
+ swift::keystone::auth::region: {get_input: keystone_region}
# Cinder
cinder_enable_db_purge: {get_input: cinder_enable_db_purge}
@@ -1296,36 +1243,26 @@ resources:
cinder::glance::glance_api_servers: {get_input: glance_api_servers}
cinder_backend_config: {get_input: CinderBackendConfig}
cinder::db::mysql::password: {get_input: cinder_password}
+ cinder::keystone::auth::public_url: {get_input: cinder_public_url }
+ cinder::keystone::auth::internal_url: {get_input: cinder_internal_url }
+ cinder::keystone::auth::admin_url: {get_input: cinder_admin_url }
+ cinder::keystone::auth::public_url_v2: {get_input: cinder_public_url_v2 }
+ cinder::keystone::auth::internal_url_v2: {get_input: cinder_internal_url_v2 }
+ cinder::keystone::auth::admin_url_v2: {get_input: cinder_admin_url_v2 }
+ cinder::keystone::auth::password: {get_input: cinder_password }
+ cinder::keystone::auth::region: {get_input: keystone_region}
# Glance
glance::api::bind_host: {get_input: glance_api_network}
glance::registry::bind_host: {get_input: glance_registry_network}
+ glance::keystone::auth::region: {get_input: keystone_region}
+
# Heat
- heat_stack_domain_admin_password: {get_input: heat_stack_domain_admin_password}
- heat::engine::heat_watch_server_url: {get_input: heat.watch_server_url}
- heat::engine::heat_metadata_server_url: {get_input: heat.metadata_server_url}
- heat::engine::heat_waitcondition_server_url: {get_input: heat.waitcondition_server_url}
- heat::engine::auth_encryption_key: {get_input: heat_auth_encryption_key}
- heat::rabbit_userid: {get_input: rabbit_username}
- heat::rabbit_password: {get_input: rabbit_password}
- heat::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
- heat::rabbit_port: {get_input: rabbit_client_port}
- heat::auth_uri: {get_input: keystone_auth_uri}
- heat::keystone_ec2_uri: {get_input: keystone_ec2_uri}
- heat::identity_uri: {get_input: keystone_identity_uri}
- heat::keystone_password: {get_input: heat_password}
heat::api::bind_host: {get_input: heat_api_network}
- heat::api::workers: {get_input: heat_workers}
heat::api_cloudwatch::bind_host: {get_input: heat_api_network}
- heat::api_cloudwatch::workers: {get_input: heat_workers}
heat::api_cfn::bind_host: {get_input: heat_api_network}
- heat::api_cfn::workers: {get_input: heat_workers}
- heat::engine::num_engine_workers: {get_input: heat_workers}
- heat::database_connection: {get_input: heat_dsn}
- heat::debug: {get_input: debug}
- heat::db::mysql::password: {get_input: heat_password}
- heat_enable_db_purge: {get_input: heat_enable_db_purge}
- heat::keystone::domain::domain_password: {get_input: heat_stack_domain_admin_password}
+ heat::engine::auth_encryption_key: {get_input: heat_auth_encryption_key}
+
# Keystone
keystone::admin_bind_host: {get_input: keystone_admin_api_network}
keystone::public_bind_host: {get_input: keystone_public_api_network}
@@ -1343,45 +1280,33 @@ resources:
mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size}
mysql_max_connections: {get_input: mysql_max_connections}
mysql::server::root_password: {get_input: mysql_root_password}
+ mysql_clustercheck_password: {get_input: mysql_clustercheck_password}
mysql_cluster_name: {get_input: mysql_cluster_name}
mysql_bind_host: {get_input: mysql_network}
mysql_virtual_ip: {get_input: mysql_virtual_ip}
# Neutron
neutron::bind_host: {get_input: neutron_api_network}
- neutron::rabbit_password: {get_input: rabbit_password}
- neutron::rabbit_user: {get_input: rabbit_username}
- neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
- neutron::rabbit_port: {get_input: rabbit_client_port}
- neutron::debug: {get_input: debug}
neutron::server::auth_uri: {get_input: keystone_auth_uri}
neutron::server::identity_uri: {get_input: keystone_identity_uri}
neutron::server::database_connection: {get_input: neutron_dsn}
neutron::server::api_workers: {get_input: neutron_workers}
- neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge}
neutron::network_device_mtu: {get_input: neutron_tenant_mtu}
neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling}
neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
- neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata}
neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks}
- neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network}
- neutron::agents::metadata::metadata_workers: {get_input: neutron_workers}
neutron_agent_mode: {get_input: neutron_agent_mode}
neutron_router_distributed: {get_input: neutron_router_distributed}
neutron::core_plugin: {get_input: neutron_core_plugin}
neutron::service_plugins: {get_input: neutron_service_plugins}
- neutron::enable_dhcp_agent: {get_input: neutron_enable_dhcp_agent}
- neutron::enable_l3_agent: {get_input: neutron_enable_l3_agent}
- neutron::enable_metadata_agent: {get_input: neutron_enable_metadata_agent}
neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent}
neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers}
neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers}
neutron::plugins::ml2::extension_drivers: {get_input: neutron_plugin_extensions}
neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover}
neutron::server::l3_ha: {get_input: neutron_l3_ha}
- neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network}
neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges}
@@ -1394,10 +1319,7 @@ resources:
neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types}
neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions}
neutron::server::auth_password: {get_input: neutron_password}
- neutron::agents::metadata::auth_password: {get_input: neutron_password}
- neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options}
neutron_dsn: {get_input: neutron_dsn}
- neutron::agents::metadata::auth_url: {get_input: keystone_identity_uri}
neutron::db::mysql::password: {get_input: neutron_password}
neutron::keystone::auth::public_url: {get_input: neutron_public_url }
neutron::keystone::auth::internal_url: {get_input: neutron_internal_url }
@@ -1433,6 +1355,11 @@ resources:
ceilometer::dispatcher::gnocchi::filter_project: 'service'
ceilometer::dispatcher::gnocchi::archive_policy: 'low'
ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml'
+ ceilometer::keystone::auth::public_url: {get_input: ceilometer_public_url }
+ ceilometer::keystone::auth::internal_url: {get_input: ceilometer_internal_url }
+ ceilometer::keystone::auth::admin_url: {get_input: ceilometer_admin_url }
+ ceilometer::keystone::auth::password: {get_input: ceilometer_password }
+ ceilometer::keystone::auth::region: {get_input: keystone_region}
snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
@@ -1453,6 +1380,11 @@ resources:
aodh::db::mysql::password: {get_input: aodh_password}
# for a migration path from ceilometer-alarm to aodh, we use the same database & coordination
aodh::evaluator::coordination_url: {get_input: ceilometer_coordination_url}
+ aodh::keystone::auth::public_url: {get_input: aodh_public_url }
+ aodh::keystone::auth::internal_url: {get_input: aodh_internal_url }
+ aodh::keystone::auth::admin_url: {get_input: aodh_admin_url }
+ aodh::keystone::auth::password: {get_input: aodh_password }
+ aodh::keystone::auth::region: {get_input: keystone_region}
# Gnocchi
gnocchi_backend: {get_input: gnocchi_backend}
@@ -1469,6 +1401,11 @@ resources:
gnocchi::db::mysql::password: {get_input: gnocchi_password}
gnocchi::storage::swift::swift_authurl: {get_input: keystone_auth_uri}
gnocchi::storage::swift::swift_key: {get_input: gnocchi_password}
+ gnocchi::keystone::auth::public_url: {get_input: gnocchi_public_url }
+ gnocchi::keystone::auth::internal_url: {get_input: gnocchi_internal_url }
+ gnocchi::keystone::auth::admin_url: {get_input: gnocchi_admin_url }
+ gnocchi::keystone::auth::password: {get_input: gnocchi_password }
+ gnocchi::keystone::auth::region: {get_input: keystone_region}
# Nova
nova::rabbit_userid: {get_input: rabbit_username}
@@ -1484,7 +1421,6 @@ resources:
nova::api::metadata_listen: {get_input: nova_metadata_network}
nova::api::admin_password: {get_input: nova_password}
nova::api::osapi_compute_workers: {get_input: nova_workers}
- nova::api::ec2_workers: {get_input: nova_workers}
nova::api::metadata_workers: {get_input: nova_workers}
nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu}
nova::database_connection: {get_input: nova_dsn}
@@ -1499,6 +1435,11 @@ resources:
nova::db::mysql::password: {get_input: nova_password}
nova::db::mysql_api::password: {get_input: nova_password}
nova_enable_db_purge: {get_input: nova_enable_db_purge}
+ nova::keystone::auth::public_url: {get_input: nova_public_url}
+ nova::keystone::auth::internal_url: {get_input: nova_internal_url}
+ nova::keystone::auth::admin_url: {get_input: nova_admin_url}
+ nova::keystone::auth::password: {get_input: nova_password }
+ nova::keystone::auth::region: {get_input: keystone_region}
# Horizon
apache::mod::remoteip::proxy_ips: {get_input: horizon_subnet}
@@ -1531,14 +1472,14 @@ resources:
sahara::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
sahara::rabbit_port: {get_input: rabbit_client_port}
sahara::db::mysql::password: {get_input: sahara_password}
-
- # Rabbit
+ sahara::keystone::auth::public_url: {get_input: sahara_public_url }
+ sahara::keystone::auth::internal_url: {get_input: sahara_internal_url }
+ sahara::keystone::auth::admin_url: {get_input: sahara_admin_url }
+ sahara::keystone::auth::password: {get_input: sahara_password }
+ sahara::keystone::auth::region: {get_input: keystone_region}
+ # RabbitMQ
rabbitmq::node_ip_address: {get_input: rabbitmq_network}
rabbitmq::erlang_cookie: {get_input: rabbit_cookie}
- rabbitmq::file_limit: {get_input: rabbit_fd_limit}
- rabbitmq::default_user: {get_input: rabbit_username}
- rabbitmq::default_pass: {get_input: rabbit_password}
- rabbit_ipv6: {get_input: rabbit_ipv6}
# Redis
redis::bind: {get_input: redis_network}
redis::requirepass: {get_input: redis_password}
@@ -1619,13 +1560,6 @@ outputs:
hostname:
description: Hostname of the server
value: {get_attr: [Controller, name]}
- corosync_node:
- description: >
- Node object in the format {ip: ..., name: ...} format that the corosync
- element expects
- value:
- ip: {get_attr: [Controller, networks, ctlplane, 0]}
- name: {get_attr: [Controller, name]}
hosts_entry:
description: >
Server's IP address and hostname in the /etc/hosts format
diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml
index 865210c9..1e888f39 100644
--- a/puppet/hieradata/compute.yaml
+++ b/puppet/hieradata/compute.yaml
@@ -11,6 +11,8 @@ nova::compute::libvirt::migration_support: true
nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
+nova::network::neutron::neutron_auth_type: 'v3password'
+
# Changing the default from 512MB. The current templates can not deploy
# overclouds with swap. On an idle compute node, we see ~1024MB of RAM
# used. 2048 is suggested to account for other possible operations for
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index 9316cf17..7a446b50 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -50,13 +50,22 @@ glance::registry::keystone_tenant: 'service'
neutron::server::auth_tenant: 'service'
neutron::agents::metadata::auth_tenant: 'service'
neutron::agents::l3::router_delete_namespaces: True
-neutron::agents::dhcp::dhcp_delete_namespaces: True
cinder::api::keystone_tenant: 'service'
swift::proxy::authtoken::admin_tenant_name: 'service'
ceilometer::api::keystone_tenant: 'service'
gnocchi::api::keystone_tenant: 'service'
heat::keystone_tenant: 'service'
sahara::admin_tenant_name: 'service'
+aodh::keystone::auth::tenant: 'service'
+ceilometer::keystone::auth::tenant: 'service'
+cinder::keystone::auth::tenant: 'service'
+glance::keystone::auth::tenant: 'service'
+gnocchi::keystone::auth::tenant: 'service'
+heat::keystone::auth::tenant: 'service'
+neutron::keystone::auth::tenant: 'service'
+nova::keystone::auth::tenant: 'service'
+sahara::keystone::auth::tenant: 'service'
+swift::keystone::auth::tenant: 'service'
# keystone
keystone::cron::token_flush::maxdelay: 3600
@@ -86,6 +95,10 @@ swift::proxy::pipeline:
- 'proxy-server'
swift::proxy::account_autocreate: true
+swift::keystone::auth::configure_s3_endpoint: false
+swift::keystone::auth::operator_roles:
+ - admin
+ - swiftoperator
# glance
glance::api::pipeline: 'keystone'
@@ -96,7 +109,6 @@ glance_file_pcmk_directory: '/var/lib/glance/images'
# neutron
neutron::server::sync_db: true
-neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf
# nova
nova::notify_on_state_change: 'vm_and_task_state'
@@ -153,7 +165,6 @@ tripleo::loadbalancer::neutron: true
tripleo::loadbalancer::cinder: true
tripleo::loadbalancer::glance_api: true
tripleo::loadbalancer::glance_registry: true
-tripleo::loadbalancer::nova_ec2: true
tripleo::loadbalancer::nova_osapi: true
tripleo::loadbalancer::nova_metadata: true
tripleo::loadbalancer::nova_novncproxy: true
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index fd7faff1..4add2f02 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -16,41 +16,46 @@
include ::tripleo::packages
include ::tripleo::firewall
-create_resources(kmod::load, hiera('kernel_modules'), {})
-create_resources(sysctl::value, hiera('sysctl_settings'), {})
-Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
+if hiera('step') >= 1 {
-if count(hiera('ntp::servers')) > 0 {
- include ::ntp
-}
+ create_resources(kmod::load, hiera('kernel_modules'), {})
+ create_resources(sysctl::value, hiera('sysctl_settings'), {})
+ Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-include ::timezone
+ include ::timezone
-if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
- exec { 'set selinux to permissive on boot':
- command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
- onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
- path => ['/usr/bin', '/usr/sbin'],
+ if count(hiera('ntp::servers')) > 0 {
+ include ::ntp
}
-
- exec { 'set selinux to permissive':
- command => 'setenforce 0',
- onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
- path => ['/usr/bin', '/usr/sbin'],
- } -> Class['ceph::profile::osd']
}
-if str2bool(hiera('ceph_ipv6', false)) {
- $mon_host = hiera('ceph_mon_host_v6')
-} else {
- $mon_host = hiera('ceph_mon_host')
-}
-class { '::ceph::profile::params':
- mon_host => $mon_host,
-}
-include ::ceph::conf
-include ::ceph::profile::client
-include ::ceph::profile::osd
+if hiera('step') >= 3 {
+ if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
+ exec { 'set selinux to permissive on boot':
+ command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
+ onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
+ path => ['/usr/bin', '/usr/sbin'],
+ }
+
+ exec { 'set selinux to permissive':
+ command => 'setenforce 0',
+ onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
+ path => ['/usr/bin', '/usr/sbin'],
+ } -> Class['ceph::profile::osd']
+ }
-hiera_include('ceph_classes')
-package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present}
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
+ include ::ceph::conf
+ include ::ceph::profile::client
+ include ::ceph::profile::osd
+
+ hiera_include('ceph_classes')
+ package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present}
+}
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index cc58cb14..43e87789 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -90,8 +90,17 @@ if str2bool(hiera('nova::use_ipv6', false)) {
} else {
$vncserver_listen = '0.0.0.0'
}
-class { '::nova::compute::libvirt' :
- vncserver_listen => $vncserver_listen,
+
+if $rbd_ephemeral_storage {
+ class { '::nova::compute::libvirt':
+ libvirt_disk_cachemodes => ['network=writeback'],
+ libvirt_hw_disk_discard => 'unmap',
+ vncserver_listen => $vncserver_listen,
+ }
+} else {
+ class { '::nova::compute::libvirt' :
+ vncserver_listen => $vncserver_listen,
+ }
}
nova_config {
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index ef330e29..536c680f 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -24,15 +24,6 @@ if hiera('step') >= 1 {
create_resources(sysctl::value, hiera('sysctl_settings'), {})
Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
- $controller_node_ips = split(hiera('controller_node_ips'), ',')
-
- if $enable_load_balancer {
- class { '::tripleo::loadbalancer' :
- controller_hosts => $controller_node_ips,
- manage_vip => true,
- }
- }
-
}
if hiera('step') >= 2 {
@@ -117,7 +108,6 @@ if hiera('step') >= 2 {
include ::nova::db::mysql_api
include ::neutron::db::mysql
include ::cinder::db::mysql
- include ::heat::db::mysql
include ::sahara::db::mysql
if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
include ::gnocchi::db::mysql
@@ -127,36 +117,6 @@ if hiera('step') >= 2 {
include ::aodh::db::mysql
}
- $rabbit_nodes = hiera('rabbit_node_ips')
- if count($rabbit_nodes) > 1 {
-
- $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
- if $rabbit_ipv6 {
- $rabbit_env = merge(hiera('rabbitmq_environment'), {
- 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
- })
- } else {
- $rabbit_env = hiera('rabbitmq_environment')
- }
-
- class { '::rabbitmq':
- config_cluster => true,
- cluster_nodes => $rabbit_nodes,
- tcp_keepalive => false,
- config_kernel_variables => hiera('rabbitmq_kernel_variables'),
- config_variables => hiera('rabbitmq_config_variables'),
- environment_variables => $rabbit_env,
- }
- rabbitmq_policy { 'ha-all@/':
- pattern => '^(?!amq\.).*',
- definition => {
- 'ha-mode' => 'all',
- },
- }
- } else {
- include ::rabbitmq
- }
-
# pre-install swift here so we can build rings
include ::swift
@@ -305,17 +265,6 @@ if hiera('step') >= 4 {
metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'),
}
} else {
- include ::neutron::agents::l3
- include ::neutron::agents::dhcp
- include ::neutron::agents::metadata
-
- file { '/etc/neutron/dnsmasq-neutron.conf':
- content => hiera('neutron_dnsmasq_options'),
- owner => 'neutron',
- group => 'neutron',
- notify => Service['neutron-dhcp-service'],
- require => Package['neutron'],
- }
# If the value of core plugin is set to 'midonet',
# skip all the ML2 configuration
@@ -358,23 +307,14 @@ if hiera('step') >= 4 {
include ::neutron::plugins::ml2::bigswitch::restproxy
include ::neutron::agents::bigswitch
}
- neutron_l3_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
- }
- neutron_dhcp_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
- }
Service['neutron-server'] -> Service['neutron-ovs-agent-service']
}
- Service['neutron-server'] -> Service['neutron-dhcp-service']
- Service['neutron-server'] -> Service['neutron-l3']
Service['neutron-server'] -> Service['neutron-metadata']
}
include ::cinder
include ::cinder::config
- include ::tripleo::ssl::cinder_config
include ::cinder::api
include ::cinder::glance
include ::cinder::scheduler
@@ -510,7 +450,6 @@ if hiera('step') >= 4 {
}
# swift proxy
- include ::memcached
include ::swift::proxy
include ::swift::proxy::proxy_logging
include ::swift::proxy::healthcheck
@@ -581,16 +520,6 @@ if hiera('step') >= 4 {
include ::aodh::listener
include ::aodh::client
- # Heat
- class { '::heat' :
- notification_driver => 'messaging',
- }
- include ::heat::config
- include ::heat::api
- include ::heat::api_cfn
- include ::heat::api_cloudwatch
- include ::heat::engine
-
# Sahara
include ::sahara
include ::sahara::service::api
@@ -654,7 +583,6 @@ if hiera('step') >= 4 {
if hiera('step') >= 5 {
$nova_enable_db_purge = hiera('nova_enable_db_purge', true)
$cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
- $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
if $nova_enable_db_purge {
include ::nova::cron::archive_deleted_rows
@@ -662,25 +590,6 @@ if hiera('step') >= 5 {
if $cinder_enable_db_purge {
include ::cinder::cron::db_purge
}
- if $heat_enable_db_purge {
- include ::heat::cron::purge_deleted
- }
-
- if downcase(hiera('bootstrap_nodeid')) == $::hostname {
- # Class ::heat::keystone::domain has to run on bootstrap node
- # because it creates DB entities via API calls.
- include ::heat::keystone::domain
-
- Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain']
- } else {
- # On non-bootstrap node we don't need to create Keystone resources again
- class { '::heat::keystone::domain':
- manage_domain => false,
- manage_user => false,
- manage_role => false,
- }
- }
-
} #END STEP 5
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')])
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 0652a1c6..30345694 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -24,7 +24,6 @@ Service <|
tag == 'cinder-service' or
tag == 'ceilometer-service' or
tag == 'gnocchi-service' or
- tag == 'heat-service' or
tag == 'neutron-service' or
tag == 'nova-service' or
tag == 'sahara-service'
@@ -46,7 +45,7 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) {
$sync_db = false
}
-$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 6
+$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
$enable_load_balancer = hiera('enable_load_balancer', true)
# When to start and enable services which haven't been Pacemakerized
@@ -66,18 +65,6 @@ if hiera('step') >= 1 {
include ::ntp
}
- $controller_node_ips = split(hiera('controller_node_ips'), ',')
- $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
- if $enable_load_balancer {
- class { '::tripleo::loadbalancer' :
- controller_hosts => $controller_node_ips,
- controller_hosts_names => $controller_node_names,
- manage_vip => false,
- mysql_clustercheck => true,
- haproxy_service_manage => false,
- }
- }
-
$pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
$corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
if $corosync_ipv6 {
@@ -99,6 +86,10 @@ if hiera('step') >= 1 {
if $enable_fencing {
include ::tripleo::fencing
+ # enable stonith after all Pacemaker resources have been created
+ Pcmk_resource<||> -> Class['tripleo::fencing']
+ Pcmk_constraint<||> -> Class['tripleo::fencing']
+ Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing']
# enable stonith after all fencing devices have been created
Class['tripleo::fencing'] -> Class['pacemaker::stonith']
}
@@ -110,35 +101,6 @@ if hiera('step') >= 1 {
op_params => 'start timeout=200s stop timeout=200s',
}
- # Only configure RabbitMQ in this step, don't start it yet to
- # avoid races where non-master nodes attempt to start without
- # config (eg. binding on 0.0.0.0)
- # The module ignores erlang_cookie if cluster_config is false
- $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
- if $rabbit_ipv6 {
- $rabbit_env = merge(hiera('rabbitmq_environment'), {
- 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
- })
- } else {
- $rabbit_env = hiera('rabbitmq_environment')
- }
-
- class { '::rabbitmq':
- service_manage => false,
- tcp_keepalive => false,
- config_kernel_variables => hiera('rabbitmq_kernel_variables'),
- config_variables => hiera('rabbitmq_config_variables'),
- environment_variables => $rabbit_env,
- } ->
- file { '/var/lib/rabbitmq/.erlang.cookie':
- ensure => file,
- owner => 'rabbitmq',
- group => 'rabbitmq',
- mode => '0400',
- content => hiera('rabbitmq::erlang_cookie'),
- replace => true,
- }
-
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
include ::mongodb::globals
include ::mongodb::client
@@ -147,11 +109,6 @@ if hiera('step') >= 1 {
}
}
- # Memcached
- class {'::memcached' :
- service_manage => false,
- }
-
# Redis
class { '::redis' :
service_manage => false,
@@ -235,77 +192,12 @@ if hiera('step') >= 2 {
if $pacemaker_master {
- if $enable_load_balancer {
-
- include ::pacemaker::resource_defaults
-
- # Create an openstack-core dummy resource. See RHBZ 1290121
- pacemaker::resource::ocf { 'openstack-core':
- ocf_agent_name => 'heartbeat:Dummy',
- clone_params => true,
- }
- # FIXME: we should not have to access tripleo::loadbalancer class
- # parameters here to configure pacemaker VIPs. The configuration
- # of pacemaker VIPs could move into puppet-tripleo or we should
- # make use of less specific hiera parameters here for the settings.
- pacemaker::resource::service { 'haproxy':
- clone_params => true,
- }
-
- $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
- tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_control_vip':
- vip_name => 'control',
- ip_address => $control_vip,
- }
-
- $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
- tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_public_vip':
- ensure => $public_vip and $public_vip != $control_vip,
- vip_name => 'public',
- ip_address => $public_vip,
- }
-
- $redis_vip = hiera('redis_vip')
- tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_redis_vip':
- ensure => $redis_vip and $redis_vip != $control_vip,
- vip_name => 'redis',
- ip_address => $redis_vip,
- }
+ include ::pacemaker::resource_defaults
-
- $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
- tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_internal_api_vip':
- ensure => $internal_api_vip and $internal_api_vip != $control_vip,
- vip_name => 'internal_api',
- ip_address => $internal_api_vip,
- }
-
- $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
- tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_vip':
- ensure => $storage_vip and $storage_vip != $control_vip,
- vip_name => 'storage',
- ip_address => $storage_vip,
- }
-
- $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
- tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_mgmt_vip':
- ensure => $storage_mgmt_vip and $storage_mgmt_vip != $control_vip,
- vip_name => 'storage_mgmt',
- ip_address => $storage_mgmt_vip,
- }
- }
-
- pacemaker::resource::service { $::memcached::params::service_name :
- clone_params => 'interleave=true',
- require => Class['::memcached'],
- }
-
- pacemaker::resource::ocf { 'rabbitmq':
- ocf_agent_name => 'heartbeat:rabbitmq-cluster',
- resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
- clone_params => 'ordered=true interleave=true',
- meta_params => 'notify=true',
- require => Class['::rabbitmq'],
+ # Create an openstack-core dummy resource. See RHBZ 1290121
+ pacemaker::resource::ocf { 'openstack-core':
+ ocf_agent_name => 'heartbeat:Dummy',
+ clone_params => true,
}
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
@@ -345,6 +237,16 @@ if hiera('step') >= 2 {
}
}
+ $mysql_root_password = hiera('mysql::server::root_password')
+ $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
+ # This step is to create a sysconfig clustercheck file with the root user and empty password
+ # on the first install only (because later on the clustercheck db user will be used)
+ # We are using exec and not file in order to not have duplicate definition errors in puppet
+ # when we later set the the file to contain the clustercheck data
+ exec { 'create-root-sysconfig-clustercheck':
+ command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
+ unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
+ }
exec { 'galera-ready' :
command => '/usr/bin/clustercheck >/dev/null',
@@ -352,14 +254,7 @@ if hiera('step') >= 2 {
tries => 180,
try_sleep => 10,
environment => ['AVAILABLE_WHEN_READONLY=0'],
- require => File['/etc/sysconfig/clustercheck'],
- }
-
- file { '/etc/sysconfig/clustercheck' :
- ensure => file,
- content => "MYSQL_USERNAME=root\n
-MYSQL_PASSWORD=''\n
-MYSQL_HOST=localhost\n",
+ require => Exec['create-root-sysconfig-clustercheck'],
}
xinetd::service { 'galera-monitor' :
@@ -372,7 +267,24 @@ MYSQL_HOST=localhost\n",
service_type => 'UNLISTED',
user => 'root',
group => 'root',
- require => File['/etc/sysconfig/clustercheck'],
+ require => Exec['create-root-sysconfig-clustercheck'],
+ }
+ # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
+ # to it in a later step. We do this only on one node as it will replicate on
+ # the other members. We also make sure that the permissions are the minimum necessary
+ if $pacemaker_master {
+ mysql_user { 'clustercheck@localhost':
+ ensure => 'present',
+ password_hash => mysql_password($mysql_clustercheck_password),
+ require => Exec['galera-ready'],
+ }
+ mysql_grant { 'clustercheck@localhost/*.*':
+ ensure => 'present',
+ options => ['GRANT'],
+ privileges => ['PROCESS'],
+ table => '*.*',
+ user => 'clustercheck@localhost',
+ }
}
# Create all the database schemas
@@ -389,9 +301,6 @@ MYSQL_HOST=localhost\n",
class { '::cinder::db::mysql':
require => Exec['galera-ready'],
}
- class { '::heat::db::mysql':
- require => Exec['galera-ready'],
- }
if downcase(hiera('ceilometer_backend')) == 'mysql' {
class { '::ceilometer::db::mysql':
@@ -466,6 +375,17 @@ MYSQL_HOST=localhost\n",
} #END STEP 2
if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
+ # At this stage we are guaranteed that the clustercheck db user exists
+ # so we switch the resource agent to use it.
+ file { '/etc/sysconfig/clustercheck' :
+ ensure => file,
+ mode => '0600',
+ owner => 'root',
+ group => 'root',
+ content => "MYSQL_USERNAME=clustercheck\n
+MYSQL_PASSWORD='${mysql_clustercheck_password}'\n
+MYSQL_HOST=localhost\n",
+ }
$nova_ipv6 = hiera('nova::use_ipv6', false)
if $nova_ipv6 {
@@ -588,31 +508,6 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'),
}
}
- if hiera('neutron::enable_dhcp_agent',true) {
- class { '::neutron::agents::dhcp' :
- manage_service => false,
- enabled => false,
- }
- file { '/etc/neutron/dnsmasq-neutron.conf':
- content => hiera('neutron_dnsmasq_options'),
- owner => 'neutron',
- group => 'neutron',
- notify => Service['neutron-dhcp-service'],
- require => Package['neutron'],
- }
- }
- if hiera('neutron::enable_l3_agent',true) {
- class { '::neutron::agents::l3' :
- manage_service => false,
- enabled => false,
- }
- }
- if hiera('neutron::enable_metadata_agent',true) {
- class { '::neutron::agents::metadata':
- manage_service => false,
- enabled => false,
- }
- }
include ::neutron::plugins::ml2
class { '::neutron::agents::ml2::ovs':
manage_service => false,
@@ -644,19 +539,9 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
include ::neutron::plugins::ml2::bigswitch::restproxy
include ::neutron::agents::bigswitch
}
- neutron_l3_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
- }
- neutron_dhcp_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
- }
- neutron_config {
- 'DEFAULT/notification_driver': value => 'messaging';
- }
include ::cinder
include ::cinder::config
- include ::tripleo::ssl::cinder_config
class { '::cinder::api':
sync_db => $sync_db,
manage_service => false,
@@ -897,29 +782,6 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
- # Heat
- include ::heat::config
- class { '::heat' :
- sync_db => $sync_db,
- notification_driver => 'messaging',
- }
- class { '::heat::api' :
- manage_service => false,
- enabled => false,
- }
- class { '::heat::api_cfn' :
- manage_service => false,
- enabled => false,
- }
- class { '::heat::api_cloudwatch' :
- manage_service => false,
- enabled => false,
- }
- class { '::heat::engine' :
- manage_service => false,
- enabled => false,
- }
-
# httpd/apache and horizon
# NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
class { '::apache' :
@@ -1022,9 +884,31 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
} #END STEP 4
if hiera('step') >= 5 {
+ # We now make sure that the root db password is set to a random one
+ # At first installation /root/.my.cnf will be empty and we connect without a root
+ # password. On second runs or updates /root/.my.cnf will already be populated
+ # with proper credentials. This step happens on every node because this sql
+ # statement does not automatically replicate across nodes.
+ exec { 'galera-set-root-password':
+ command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
+ }
+ file { '/root/.my.cnf' :
+ ensure => file,
+ mode => '0600',
+ owner => 'root',
+ group => 'root',
+ content => "[client]
+user=root
+password=\"${mysql_root_password}\"
+
+[mysql]
+user=root
+password=\"${mysql_root_password}\"",
+ require => Exec['galera-set-root-password'],
+ }
+
$nova_enable_db_purge = hiera('nova_enable_db_purge', true)
$cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
- $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
if $nova_enable_db_purge {
include ::nova::cron::archive_deleted_rows
@@ -1032,9 +916,6 @@ if hiera('step') >= 5 {
if $cinder_enable_db_purge {
include ::cinder::cron::db_purge
}
- if $heat_enable_db_purge {
- include ::heat::cron::purge_deleted
- }
if $pacemaker_master {
@@ -1047,15 +928,6 @@ if hiera('step') >= 5 {
require => [Pacemaker::Resource::Service[$::apache::params::service_name],
Pacemaker::Resource::Ocf['openstack-core']],
}
- pacemaker::constraint::base { 'memcached-then-openstack-core-constraint':
- constraint_type => 'order',
- first_resource => 'memcached-clone',
- second_resource => 'openstack-core-clone',
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service['memcached'],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
constraint_type => 'order',
first_resource => 'galera-master',
@@ -1145,43 +1017,6 @@ if hiera('step') >= 5 {
Pacemaker::Resource::Service[$::sahara::params::engine_service_name]],
}
- if hiera('step') == 5 {
- # Neutron
- # NOTE(gfidente): Neutron will try to populate the database with some data
- # as soon as neutron-server is started; to avoid races we want to make this
- # happen only on one node, before normal Pacemaker initialization
- # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
- # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
- # will try to start the service while it's already started by Pacemaker
- # It would result to a deployment failure since systemd would return 1 to Puppet
- # and the overcloud would fail to deploy (6 would be returned).
- # This conditional prevents from a race condition during the deployment.
- # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
- exec { 'neutron-server-systemd-start-sleep' :
- command => 'systemctl start neutron-server && /usr/bin/sleep 5',
- path => '/usr/bin',
- unless => '/sbin/pcs resource show neutron-server',
- } ->
- pacemaker::resource::service { $::neutron::params::server_service:
- clone_params => 'interleave=true',
- require => Pacemaker::Resource::Ocf['openstack-core']
- }
- } else {
- pacemaker::resource::service { $::neutron::params::server_service:
- clone_params => 'interleave=true',
- require => Pacemaker::Resource::Ocf['openstack-core']
- }
- }
- if hiera('neutron::enable_l3_agent', true) {
- pacemaker::resource::service { $::neutron::params::l3_agent_service:
- clone_params => 'interleave=true',
- }
- }
- if hiera('neutron::enable_dhcp_agent', true) {
- pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
- clone_params => 'interleave=true',
- }
- }
if hiera('neutron::enable_ovs_agent', true) {
pacemaker::resource::service { $::neutron::params::ovs_agent_service:
clone_params => 'interleave=true',
@@ -1192,11 +1027,6 @@ if hiera('step') >= 5 {
clone_params => 'interleave=true',
}
}
- if hiera('neutron::enable_metadata_agent', true) {
- pacemaker::resource::service { $::neutron::params::metadata_agent_service:
- clone_params => 'interleave=true',
- }
- }
if hiera('neutron::enable_ovs_agent', true) {
pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
ocf_agent_name => 'neutron:OVSCleanup',
@@ -1241,81 +1071,6 @@ if hiera('step') >= 5 {
Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
}
}
- pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
- constraint_type => 'order',
- first_resource => 'openstack-core-clone',
- second_resource => "${::neutron::params::server_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Ocf['openstack-core'],
- Pacemaker::Resource::Service[$::neutron::params::server_service]],
- }
- if hiera('neutron::enable_ovs_agent',true) {
- pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::ovs_agent_service}-clone",
- second_resource => "${::neutron::params::dhcp_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
- }
- }
- if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) {
- pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::server_service}-clone",
- second_resource => "${::neutron::params::ovs_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
- }
-
- pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
- source => "${::neutron::params::dhcp_agent_service}-clone",
- target => "${::neutron::params::ovs_agent_service}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
- }
- }
- if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_l3_agent',true) {
- pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::dhcp_agent_service}-clone",
- second_resource => "${::neutron::params::l3_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
- }
- pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
- source => "${::neutron::params::l3_agent_service}-clone",
- target => "${::neutron::params::dhcp_agent_service}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
- }
- }
- if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) {
- pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::l3_agent_service}-clone",
- second_resource => "${::neutron::params::metadata_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
- }
- pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
- source => "${::neutron::params::metadata_agent_service}-clone",
- target => "${::neutron::params::l3_agent_service}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
- }
- }
if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
#midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
@@ -1471,11 +1226,6 @@ if hiera('step') >= 5 {
pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
clone_params => 'interleave=true',
}
- pacemaker::resource::ocf { 'delay' :
- ocf_agent_name => 'heartbeat:Delay',
- clone_params => 'interleave=true',
- resource_params => 'startdelay=10',
- }
# Fedora doesn't know `require-all` parameter for constraints yet
if $::operatingsystem == 'Fedora' {
$redis_ceilometer_constraint_params = undef
@@ -1547,22 +1297,6 @@ if hiera('step') >= 5 {
require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
}
- pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
- constraint_type => 'order',
- first_resource => "${::ceilometer::params::api_service_name}-clone",
- second_resource => 'delay-clone',
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
- Pacemaker::Resource::Ocf['delay']],
- }
- pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
- source => 'delay-clone',
- target => "${::ceilometer::params::api_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
- Pacemaker::Resource::Ocf['delay']],
- }
# Aodh
pacemaker::resource::service { $::aodh::params::evaluator_service_name :
clone_params => 'interleave=true',
@@ -1573,22 +1307,6 @@ if hiera('step') >= 5 {
pacemaker::resource::service { $::aodh::params::listener_service_name :
clone_params => 'interleave=true',
}
- pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint':
- constraint_type => 'order',
- first_resource => 'delay-clone',
- second_resource => "${::aodh::params::evaluator_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
- Pacemaker::Resource::Ocf['delay']],
- }
- pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation':
- source => "${::aodh::params::evaluator_service_name}-clone",
- target => 'delay-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
- Pacemaker::Resource::Ocf['delay']],
- }
pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
constraint_type => 'order',
first_resource => "${::aodh::params::evaluator_service_name}-clone",
@@ -1657,77 +1375,6 @@ if hiera('step') >= 5 {
Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
}
- # Heat
- pacemaker::resource::service { $::heat::params::api_service_name :
- clone_params => 'interleave=true',
- }
- pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
- clone_params => 'interleave=true',
- }
- pacemaker::resource::service { $::heat::params::api_cfn_service_name :
- clone_params => 'interleave=true',
- }
- pacemaker::resource::service { $::heat::params::engine_service_name :
- clone_params => 'interleave=true',
- }
- pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
- constraint_type => 'order',
- first_resource => "${::heat::params::api_service_name}-clone",
- second_resource => "${::heat::params::api_cfn_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
- Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
- }
- pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
- source => "${::heat::params::api_cfn_service_name}-clone",
- target => "${::heat::params::api_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
- Pacemaker::Resource::Service[$::heat::params::api_service_name]],
- }
- pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
- constraint_type => 'order',
- first_resource => "${::heat::params::api_cfn_service_name}-clone",
- second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
- Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
- }
- pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
- source => "${::heat::params::api_cloudwatch_service_name}-clone",
- target => "${::heat::params::api_cfn_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
- Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
- }
- pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
- constraint_type => 'order',
- first_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
- second_resource => "${::heat::params::engine_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
- Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
- }
- pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
- source => "${::heat::params::engine_service_name}-clone",
- target => "${::heat::params::api_cloudwatch_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
- Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
- }
- pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
- constraint_type => 'order',
- first_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
- second_resource => "${::heat::params::api_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
- }
-
# Horizon and Keystone
pacemaker::resource::service { $::apache::params::service_name:
clone_params => 'interleave=true',
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index 3e8784b7..ca50d91d 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -94,5 +94,9 @@ outputs:
glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
+ glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
+ glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
+ glance::keystone::auth::password: {get_param: GlancePassword }
step_config: |
include ::tripleo::profile::base::glance::api
diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml
new file mode 100644
index 00000000..99eb1074
--- /dev/null
+++ b/puppet/services/heat-api-cfn.yaml
@@ -0,0 +1,46 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Heat CloudFormation API service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+ HeatWorkers:
+ default: 0
+ description: Number of workers for Heat service.
+ type: number
+ HeatPassword:
+ description: The password for the Heat service and db account, used by the Heat services.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+
+resources:
+ HeatBase:
+ type: ./heat-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Heat CloudFormation API role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [HeatBase, role_data, config_settings]
+ - heat::api_cfn::workers: {get_param: HeatWorkers}
+ heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
+ heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
+ heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
+ heat::keystone::auth_cfn::password: {get_param: HeatPassword}
+ heat::keystone::auth::region: {get_param: KeystoneRegion}
+ step_config: |
+ include ::tripleo::profile::base::heat::api_cfn
diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml
new file mode 100644
index 00000000..f3d68042
--- /dev/null
+++ b/puppet/services/heat-api-cloudwatch.yaml
@@ -0,0 +1,33 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Heat CloudWatch API service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+ HeatWorkers:
+ default: 0
+ description: Number of workers for Heat service.
+ type: number
+
+resources:
+ HeatBase:
+ type: ./heat-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Heat Cloudwatch API role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [HeatBase, role_data, config_settings]
+ - heat::api_cloudwatch::workers: {get_param: HeatWorkers}
+ step_config: |
+ include ::tripleo::profile::base::heat::api_cloudwatch
diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml
new file mode 100644
index 00000000..4fc259ac
--- /dev/null
+++ b/puppet/services/heat-api.yaml
@@ -0,0 +1,46 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Heat API service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+ HeatWorkers:
+ default: 0
+ description: Number of workers for Heat service.
+ type: number
+ HeatPassword:
+ description: The password for the Heat service and db account, used by the Heat services.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+
+resources:
+ HeatBase:
+ type: ./heat-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Heat API role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [HeatBase, role_data, config_settings]
+ - heat::api::workers: {get_param: HeatWorkers}
+ heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
+ heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
+ heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
+ heat::keystone::auth::password: {get_param: HeatPassword}
+ heat::keystone::auth::region: {get_param: KeystoneRegion}
+ step_config: |
+ include ::tripleo::profile::base::heat::api
diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml
new file mode 100644
index 00000000..8617df27
--- /dev/null
+++ b/puppet/services/heat-base.yaml
@@ -0,0 +1,40 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Heat base service. Shared for all Heat services.
+
+parameters:
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+
+outputs:
+ role_data:
+ description: Shared role data for the Heat services.
+ value:
+ config_settings:
+ heat::rabbit_userid: {get_param: RabbitUserName}
+ heat::rabbit_password: {get_param: RabbitPassword}
+ heat::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ heat::rabbit_port: {get_param: RabbitClientPort}
+ heat::debug: {get_param: Debug}
+ heat::enable_proxy_headers_parsing: true
diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml
new file mode 100644
index 00000000..143d24bb
--- /dev/null
+++ b/puppet/services/heat-engine.yaml
@@ -0,0 +1,62 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Heat Engine service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+ HeatEnableDBPurge:
+ type: boolean
+ default: true
+ description: |
+ Whether to create cron job for purging soft deleted rows in the Heat database.
+ HeatWorkers:
+ default: 0
+ description: Number of workers for Heat service.
+ type: number
+ HeatPassword:
+ description: The password for the Heat service and db account, used by the Heat services.
+ type: string
+ hidden: true
+ HeatStackDomainAdminPassword:
+ description: Password for heat_stack_domain_admin user.
+ type: string
+ hidden: true
+
+resources:
+ HeatBase:
+ type: ./heat-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Heat Engine role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [HeatBase, role_data, config_settings]
+ - heat::engine::num_engine_workers: {get_param: HeatWorkers}
+ tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge}
+ heat_dsn: &heat_dsn
+ list_join:
+ - ''
+ - - 'mysql+pymysql://heat:'
+ - {get_param: HeatPassword}
+ - '@'
+ - {get_param: MysqlVirtualIPUri}
+ - '/heat'
+ heat::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
+ heat::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ heat::keystone_password: {get_param: HeatPassword}
+ heat::database_connection: *heat_dsn
+ heat::db::mysql::password: {get_param: HeatPassword}
+ heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
+ step_config: |
+ include ::tripleo::profile::base::heat::engine
diff --git a/puppet/services/loadbalancer.yaml b/puppet/services/loadbalancer.yaml
new file mode 100644
index 00000000..0c1757bf
--- /dev/null
+++ b/puppet/services/loadbalancer.yaml
@@ -0,0 +1,21 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Loadbalancer service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+outputs:
+ role_data:
+ description: Role data for the Loadbalancer role.
+ value:
+ step_config: |
+ include ::tripleo::profile::base::loadbalancer
diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml
new file mode 100644
index 00000000..1833fbff
--- /dev/null
+++ b/puppet/services/memcached.yaml
@@ -0,0 +1,23 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Memcached service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+outputs:
+ role_data:
+ description: Role data for the Memcached role.
+ value:
+ config_settings:
+ step_config: |
+ include ::tripleo::profile::base::memcached
+
diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml
new file mode 100644
index 00000000..b34bdd22
--- /dev/null
+++ b/puppet/services/neutron-base.yaml
@@ -0,0 +1,44 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron base service. Shared for all Neutron agents.
+
+parameters:
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ NeutronDhcpAgentsPerNetwork:
+ type: number
+ default: 3
+ description: The number of neutron dhcp agents to schedule per network
+ Debug:
+ type: string
+ default: ''
+ description: Set to True to enable debugging on all services.
+
+outputs:
+ role_data:
+ description: Role data for the Neutron base service.
+ value:
+ config_settings:
+ neutron::rabbit_password: {get_param: RabbitPassword}
+ neutron::rabbit_user: {get_param: RabbitUserName}
+ neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ neutron::rabbit_port: {get_param: RabbitClientPort}
+ neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
+ neutron::debug: {get_param: Debug}
diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml
new file mode 100644
index 00000000..548b4ba0
--- /dev/null
+++ b/puppet/services/neutron-dhcp.yaml
@@ -0,0 +1,56 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron DHCP agent configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+ NeutronEnableIsolatedMetadata:
+ default: 'False'
+ description: If True, DHCP provide metadata route to VM.
+ type: string
+ NeutronDnsmasqOptions:
+ default: 'dhcp-option-force=26,%MTU%'
+ description: >
+ Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU
+ to be set to the value of NeutronTenantMtu, which should be set to account
+ for tunnel overhead.
+ type: string
+ NeutronTenantMtu:
+ description: >
+ The default MTU for tenant networks. For VXLAN/GRE tunneling, this should
+ be at least 50 bytes smaller than the MTU on the physical network. This
+ value will be used to set the MTU on the virtual Ethernet device.
+ This value will be used to construct the NeutronDnsmasqOptions, since that
+ will determine the MTU that is assigned to the VM host through DHCP.
+ default: "1400"
+ type: string
+
+resources:
+
+ NeutronBase:
+ type: ./neutron-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Neutron DHCP agent service.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf
+ tripleo::profile::base::neutron::dhcp:
+ str_replace:
+ template: {get_param: NeutronDnsmasqOptions}
+ params:
+ '%MTU%': {get_param: NeutronTenantMtu}
+ neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
+ step_config: |
+ include tripleo::profile::base::neutron::dhcp
diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml
new file mode 100644
index 00000000..2ea1b19d
--- /dev/null
+++ b/puppet/services/neutron-l3.yaml
@@ -0,0 +1,37 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron L3 agent configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+ Debug:
+ type: string
+ default: ''
+ NeutronExternalNetworkBridge:
+ description: Name of bridge used for external network traffic.
+ type: string
+ default: 'br-ex'
+
+resources:
+
+ NeutronBase:
+ type: ./neutron-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Neutron L3 agent service.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
+ step_config: |
+ include tripleo::profile::base::neutron::l3
diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml
new file mode 100644
index 00000000..1fe139f3
--- /dev/null
+++ b/puppet/services/neutron-metadata.yaml
@@ -0,0 +1,45 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Metadata agent configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+ NeutronMetadataProxySharedSecret:
+ description: Shared secret to prevent spoofing
+ type: string
+ hidden: true
+ NeutronWorkers:
+ default: 0
+ description: Number of workers for Neutron service.
+ type: number
+ NeutronPassword:
+ description: The password for the neutron service and db account, used by neutron agents.
+ type: string
+ hidden: true
+
+resources:
+
+ NeutronBase:
+ type: ./neutron-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Metadata agent service.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - neutron::agents::metadata::shared_secret: {get_param: NeutronMetadataProxySharedSecret}
+ neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers}
+ neutron::agents::metadata::auth_password: {get_param: NeutronPassword}
+ neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ step_config: |
+ include tripleo::profile::base::neutron::metadata
diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml
index 815eb5bf..ad964216 100644
--- a/puppet/services/pacemaker/glance-api.yaml
+++ b/puppet/services/pacemaker/glance-api.yaml
@@ -56,5 +56,7 @@ outputs:
glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype}
glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage}
glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions}
+ glance::api::manage_service: false
+ glance::api::enabled: false
step_config: |
include ::tripleo::profile::pacemaker::glance
diff --git a/puppet/services/pacemaker/glance-registry.yaml b/puppet/services/pacemaker/glance-registry.yaml
index 56353459..393fbaaf 100644
--- a/puppet/services/pacemaker/glance-registry.yaml
+++ b/puppet/services/pacemaker/glance-registry.yaml
@@ -26,7 +26,10 @@ outputs:
description: Role data for the Glance role.
value:
config_settings:
- get_attr: [GlanceRegistryBase, role_data, config_settings]
+ map_merge:
+ - get_attr: [GlanceRegistryBase, role_data, config_settings]
+ - glance::registry::manage_service: false
+ glance::registry::enabled: false
# No puppet manifests since glance-registry is included in
# ::tripleo::profile::pacemaker::glance which is maintained alongside of
# pacemaker/glance-api.yaml.
diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml
new file mode 100644
index 00000000..ba620f89
--- /dev/null
+++ b/puppet/services/pacemaker/heat-api-cfn.yaml
@@ -0,0 +1,35 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Heat CloudFormation API service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+ HeatApiCfnBase:
+ type: ../heat-api-cfn.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+outputs:
+ role_data:
+ description: Role data for the Heat CloudFormation API role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [HeatApiCfnBase, role_data, config_settings]
+ - heat::api_cfn::manage_service: false
+ heat::api_cfn::enabled: false
+ step_config:
+ # No puppet manifests since heat-api-cfn is included in
+ # ::tripleo::profile::pacemaker::heat which is maintained alongside of
+ # pacemaker/heat-api.yaml.
diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
new file mode 100644
index 00000000..db71891c
--- /dev/null
+++ b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
@@ -0,0 +1,35 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Heat CloudWatch API service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+ HeatApiCloudwatchBase:
+ type: ../heat-api-cloudwatch.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+outputs:
+ role_data:
+ description: Role data for the Heat Cloudwatch API role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [HeatApiCloudwatchBase, role_data, config_settings]
+ - heat::api_cloudwatch::manage_service: false
+ heat::api_cloudwatch::enabled: false
+ step_config:
+ # No puppet manifests since heat-api-cloudwatch is included in
+ # ::tripleo::profile::pacemaker::heat which is maintained alongside of
+ # pacemaker/heat-api.yaml.
diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml
new file mode 100644
index 00000000..b1c37d41
--- /dev/null
+++ b/puppet/services/pacemaker/heat-api.yaml
@@ -0,0 +1,33 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Heat API service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+ HeatApiBase:
+ type: ../heat-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+outputs:
+ role_data:
+ description: Role data for the Heat API role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [HeatApiBase, role_data, config_settings]
+ - heat::api::manage_service: false
+ heat::api::enabled: false
+ step_config: |
+ include ::tripleo::profile::pacemaker::heat
diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml
new file mode 100644
index 00000000..1e39b363
--- /dev/null
+++ b/puppet/services/pacemaker/heat-engine.yaml
@@ -0,0 +1,36 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Heat Engine service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+ HeatEngineBase:
+ type: ../heat-engine.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+
+outputs:
+ role_data:
+ description: Role data for the Heat engine role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [HeatEngineBase, role_data, config_settings]
+ - heat::engine::manage_service: false
+ heat::engine::enabled: false
+ step_config:
+ # No puppet manifests since heat-engine is included in
+ # ::tripleo::profile::pacemaker::heat which is maintained alongside of
+ # pacemaker/heat-api.yaml.
diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml
index 8fcab15f..db52cae7 100644
--- a/puppet/services/pacemaker/keystone.yaml
+++ b/puppet/services/pacemaker/keystone.yaml
@@ -28,7 +28,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [KeystoneServiceBase, role_data, config_settings]
- #-
- # custom keystone hiera goes here if we need it!?
+ - keystone::manage_service: false
+ keystone::enabled: false
step_config: |
include ::tripleo::profile::pacemaker::keystone
diff --git a/puppet/services/pacemaker/loadbalancer.yaml b/puppet/services/pacemaker/loadbalancer.yaml
new file mode 100644
index 00000000..771b3d9b
--- /dev/null
+++ b/puppet/services/pacemaker/loadbalancer.yaml
@@ -0,0 +1,34 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Loadbalancer service with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+ LoadbalancerServiceBase:
+ type: ../loadbalancer.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+outputs:
+ role_data:
+ description: Role data for the Loadbalancer pacemaker role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [LoadbalancerServiceBase, role_data, config_settings]
+ - tripleo::loadbalancer::haproxy_service_manage: false
+ tripleo::loadbalancer::mysql_clustercheck: true
+ tripleo::loadbalancer::manage_vip: false
+ step_config: |
+ include ::tripleo::profile::pacemaker::loadbalancer
diff --git a/puppet/services/pacemaker/memcached.yaml b/puppet/services/pacemaker/memcached.yaml
new file mode 100644
index 00000000..306f805e
--- /dev/null
+++ b/puppet/services/pacemaker/memcached.yaml
@@ -0,0 +1,31 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Mecached service with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+
+ MemcachedServiceBase:
+ type: ../memcached.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Memcached pacemaker role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [MemcachedServiceBase, role_data, config_settings]
+ - memcached::service_manage: false
+ step_config: |
+ include ::tripleo::profile::pacemaker::memcached
+
diff --git a/puppet/services/pacemaker/neutron-dhcp.yaml b/puppet/services/pacemaker/neutron-dhcp.yaml
new file mode 100644
index 00000000..0e972b28
--- /dev/null
+++ b/puppet/services/pacemaker/neutron-dhcp.yaml
@@ -0,0 +1,35 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron DHCP service with Pacemaker configured with Puppet.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+
+ NeutronDhcpBase:
+ type: ../neutron-dhcp.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron DHCP role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronDhcpBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::neutron::enable_dhcp: True
+ neutron::agents::dhcp::enabled: false
+ neutron::agents::dhcp::manage_service: false
+ step_config: |
+ include ::tripleo::profile::pacemaker::neutron::dhcp
diff --git a/puppet/services/pacemaker/neutron-l3.yaml b/puppet/services/pacemaker/neutron-l3.yaml
new file mode 100644
index 00000000..84bff808
--- /dev/null
+++ b/puppet/services/pacemaker/neutron-l3.yaml
@@ -0,0 +1,33 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron L3 service with Pacemaker configured with Puppet.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+
+ NeutronL3Base:
+ type: ../neutron-l3.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron L3 role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronL3Base, role_data, config_settings]
+ - tripleo::profile::pacemaker::neutron::enable_l3: True
+ step_config: |
+ include ::tripleo::profile::pacemaker::neutron::l3
diff --git a/puppet/services/pacemaker/neutron-metadata.yaml b/puppet/services/pacemaker/neutron-metadata.yaml
new file mode 100644
index 00000000..79baf1ea
--- /dev/null
+++ b/puppet/services/pacemaker/neutron-metadata.yaml
@@ -0,0 +1,33 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Metadata service with Pacemaker configured with Puppet.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+
+ NeutronMetadataBase:
+ type: ../neutron-metadata.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Metadata role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMetadataBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::neutron::enable_metadata: True
+ step_config: |
+ include ::tripleo::profile::pacemaker::neutron::metadata
diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml
new file mode 100644
index 00000000..613db449
--- /dev/null
+++ b/puppet/services/pacemaker/rabbitmq.yaml
@@ -0,0 +1,32 @@
+heat_template_version: 2016-04-08
+
+description: >
+ RabbitMQ service with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+ RabbitMQServiceBase:
+ type: ../rabbitmq.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+outputs:
+ role_data:
+ description: Role data for the RabbitMQ pacemaker role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [RabbitMQServiceBase, role_data, config_settings]
+ - rabbitmq::service_manage: false
+ step_config: |
+ include ::tripleo::profile::pacemaker::rabbitmq
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
new file mode 100644
index 00000000..ae5678a3
--- /dev/null
+++ b/puppet/services/rabbitmq.yaml
@@ -0,0 +1,42 @@
+heat_template_version: 2016-04-08
+
+description: >
+ RabbitMQ service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitFDLimit:
+ default: 16384
+ description: Configures RabbitMQ FD limit
+ type: string
+ RabbitIPv6:
+ default: false
+ description: Enable IPv6 in RabbitMQ
+ type: boolean
+
+outputs:
+ role_data:
+ description: Role data for the RabbitMQ role.
+ value:
+ config_settings:
+ rabbitmq::file_limit: {get_param: RabbitFDLimit}
+ rabbitmq::default_user: {get_param: RabbitUserName}
+ rabbitmq::default_pass: {get_param: RabbitPassword}
+ rabbit_ipv6: {get_param: RabbitIPv6}
+ step_config: |
+ include ::tripleo::profile::base::rabbitmq
diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml
index eb06b241..b262f947 100644
--- a/puppet/swift-storage-post.yaml
+++ b/puppet/swift-storage-post.yaml
@@ -52,6 +52,10 @@ resources:
group: puppet
options:
enable_debug: {get_param: ConfigDebug}
+ enable_hiera: True
+ enable_facter: False
+ inputs:
+ - name: step
outputs:
- name: result
config:
@@ -65,6 +69,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: StorageRingbuilderPuppetConfig}
input_values:
+ step: 3 # Note ringbuilder.pp expects >=3
update_identifier: {get_param: NodeConfigIdentifiers}
# Note, this should come last, so use depends_on to ensure
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index 296428db..3f6f4733 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -220,16 +220,22 @@ resources:
properties:
ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
ManagementIp: {get_attr: [ManagementPort, ip_address]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
NetworkDeployment:
@@ -256,10 +262,16 @@ resources:
- all_nodes # provided by allNodesConfig
- '"%{::osfamily}"'
- common
+ - network
merge_behavior: deeper
datafiles:
common:
raw_data: {get_file: hieradata/common.yaml}
+ network:
+ mapped_data:
+ net_ip_map: {get_attr: [NetIpMap, net_ip_map]}
+ net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]}
+ net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]}
object_extraconfig:
mapped_data: {get_param: ObjectStorageExtraConfig}
extraconfig:
diff --git a/validation-scripts/all-nodes.sh b/validation-scripts/all-nodes.sh
index 1c834e76..0b8b3523 100644
--- a/validation-scripts/all-nodes.sh
+++ b/validation-scripts/all-nodes.sh
@@ -10,7 +10,7 @@ function ping_retry() {
PING_CMD=ping6
fi
until [ $COUNT -ge $TIMES ]; do
- if $PING_CMD -W 300 -c 1 $IP_ADDR &> /dev/null; then
+ if $PING_CMD -w 300 -c 1 $IP_ADDR &> /dev/null; then
echo "Ping to $IP_ADDR succeeded."
return 0
fi