aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docker/compute-post.yaml11
-rw-r--r--environments/neutron-midonet.yaml2
-rw-r--r--environments/puppet-pacemaker.yaml1
-rw-r--r--extraconfig/all_nodes/default.yaml27
-rw-r--r--extraconfig/all_nodes/mac_hostname.yaml7
-rw-r--r--extraconfig/all_nodes/random_string.yaml6
-rw-r--r--extraconfig/all_nodes/swap-partition.yaml4
-rw-r--r--extraconfig/all_nodes/swap.yaml4
-rw-r--r--network/endpoints/endpoint_data.yaml6
-rw-r--r--network/endpoints/endpoint_map.yaml41
-rw-r--r--overcloud-resource-registry-puppet.yaml5
-rw-r--r--overcloud.yaml6
-rw-r--r--puppet/ceph-storage.yaml7
-rw-r--r--puppet/cinder-storage.yaml7
-rw-r--r--puppet/compute.yaml7
-rw-r--r--puppet/controller.yaml7
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml8
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml8
-rw-r--r--puppet/hieradata/controller.yaml3
-rw-r--r--puppet/hieradata/database.yaml56
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp8
-rw-r--r--puppet/manifests/overcloud_compute.pp4
-rw-r--r--puppet/manifests/overcloud_controller.pp70
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp188
-rw-r--r--puppet/manifests/overcloud_object.pp6
-rw-r--r--puppet/manifests/overcloud_volume.pp8
-rw-r--r--puppet/services/cinder-base.yaml6
-rw-r--r--puppet/services/glance-registry.yaml7
-rw-r--r--puppet/services/heat-engine.yaml6
-rw-r--r--puppet/services/ironic-api.yaml1
-rw-r--r--puppet/services/ironic-base.yaml11
-rw-r--r--puppet/services/ironic-conductor.yaml1
-rw-r--r--puppet/services/kernel.yaml18
-rw-r--r--puppet/services/keystone.yaml7
-rw-r--r--puppet/services/neutron-midonet.yaml48
-rw-r--r--puppet/services/neutron-server.yaml9
-rw-r--r--puppet/services/pacemaker/heat-api-cfn.yaml6
-rw-r--r--puppet/services/pacemaker/heat-api-cloudwatch.yaml6
-rw-r--r--puppet/services/pacemaker/heat-api.yaml2
-rw-r--r--puppet/services/pacemaker/heat-engine.yaml6
-rw-r--r--puppet/services/pacemaker/neutron-midonet.yaml28
-rw-r--r--puppet/services/sahara-engine.yaml6
-rw-r--r--puppet/services/swift-proxy.yaml5
-rw-r--r--puppet/services/time/ntp.yaml6
-rw-r--r--puppet/swift-storage.yaml7
45 files changed, 304 insertions, 389 deletions
diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml
index 1e330015..3fc07561 100644
--- a/docker/compute-post.yaml
+++ b/docker/compute-post.yaml
@@ -38,6 +38,11 @@ parameters:
NeutronOpenvswitchAgentOvsVolume:
type: string
default: " "
+ StepConfig:
+ type: string
+ description: Config manifests that will be used to step through the deployment.
+ default: ''
+
resources:
@@ -56,7 +61,11 @@ resources:
outputs:
- name: result
config:
- get_file: ../puppet/manifests/overcloud_compute.pp
+ list_join:
+ - ''
+ - - get_file: ../puppet/manifests/overcloud_compute.pp
+ - {get_param: StepConfig}
+
ComputePuppetDeployment:
type: OS::Heat::SoftwareDeployments
diff --git a/environments/neutron-midonet.yaml b/environments/neutron-midonet.yaml
index 7f50f15b..90b98ae8 100644
--- a/environments/neutron-midonet.yaml
+++ b/environments/neutron-midonet.yaml
@@ -4,6 +4,8 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../net-config-linux-bridge.yaml # We have to avoid any ovs bridge. MidoNet is incompatible with its datapath
OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ # Override the NeutronCorePlugin to use Nuage
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginMidonet
parameter_defaults:
EnableZookeeperOnController: true
diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml
index d1df6c41..8043ccbf 100644
--- a/environments/puppet-pacemaker.yaml
+++ b/environments/puppet-pacemaker.yaml
@@ -28,6 +28,7 @@ resource_registry:
OS::TripleO::Services::NeutronCorePluginPlumgrid: ../puppet/services/pacemaker/neutron-plugin-plumgrid.yaml
OS::TripleO::Services::NeutronCorePluginNuage: ../puppet/services/pacemaker/neutron-plugin-nuage.yaml
OS::TripleO::Services::NeutronCorePluginOpencontrail: ../puppet/services/pacemaker/neutron-plugin-opencontrail.yaml
+ OS::TripleO::Services::NeutronCorePluginMidonet: ../puppet/services/pacemaker/neutron-midonet.yaml
OS::TripleO::Services::NeutronOvsAgent: ../puppet/services/pacemaker/neutron-ovs-agent.yaml
OS::TripleO::Services::RabbitMQ: ../puppet/services/pacemaker/rabbitmq.yaml
OS::TripleO::Services::HAproxy: ../puppet/services/pacemaker/haproxy.yaml
diff --git a/extraconfig/all_nodes/default.yaml b/extraconfig/all_nodes/default.yaml
deleted file mode 100644
index 68f9eadd..00000000
--- a/extraconfig/all_nodes/default.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Noop extra config for allnodes extra cluster config
-
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
-parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
- type: json
-# Note extra parameters can be defined, then passed data via the
-# environment parameter_defaults, without modifying the parent template
-
-outputs:
- # This value should change if the configuration data has changed
- # It is used to e.g re-apply puppet after hieradata values change.
- config_identifier:
- value: none
diff --git a/extraconfig/all_nodes/mac_hostname.yaml b/extraconfig/all_nodes/mac_hostname.yaml
index 5883e06a..7d8704e3 100644
--- a/extraconfig/all_nodes/mac_hostname.yaml
+++ b/extraconfig/all_nodes/mac_hostname.yaml
@@ -113,10 +113,3 @@ resources:
objectstorage_mappings: {get_attr: [CollectMacDeploymentsObjectStorage, deploy_stdouts]}
cephstorage_mappings: {get_attr: [CollectMacDeploymentsCephStorage, deploy_stdouts]}
actions: ['CREATE'] # Only do this on CREATE
-
-outputs:
- # This value should change if the configuration data has changed
- # It is used to e.g re-apply puppet after hieradata values change.
- config_identifier:
- value: {get_attr: [DistributeMacDeploymentsController, deploy_stdouts]}
-
diff --git a/extraconfig/all_nodes/random_string.yaml b/extraconfig/all_nodes/random_string.yaml
index 49d2d8b6..d38701e2 100644
--- a/extraconfig/all_nodes/random_string.yaml
+++ b/extraconfig/all_nodes/random_string.yaml
@@ -57,9 +57,3 @@ resources:
actions: ['CREATE'] # Only do this on CREATE
input_values:
random_value: {get_attr: [Random, value]}
-
-outputs:
- # This value should change if the configuration data has changed
- # It is used to e.g re-apply puppet after hieradata values change.
- config_identifier:
- value: {get_attr: [Random, value]}
diff --git a/extraconfig/all_nodes/swap-partition.yaml b/extraconfig/all_nodes/swap-partition.yaml
index 89a2adb0..e6fa9eca 100644
--- a/extraconfig/all_nodes/swap-partition.yaml
+++ b/extraconfig/all_nodes/swap-partition.yaml
@@ -84,7 +84,3 @@ resources:
input_values:
swap_partition_label: {get_param: swap_partition_label}
actions: ["CREATE"]
-
-outputs:
- config_identifier:
- value: none
diff --git a/extraconfig/all_nodes/swap.yaml b/extraconfig/all_nodes/swap.yaml
index 374b1e5d..5383ffc9 100644
--- a/extraconfig/all_nodes/swap.yaml
+++ b/extraconfig/all_nodes/swap.yaml
@@ -102,7 +102,3 @@ resources:
swap_size_megabytes: {get_param: swap_size_megabytes}
swap_path: {get_param: swap_path}
actions: ["CREATE"]
-
-outputs:
- config_identifier:
- value: none
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
index 53b474de..f1dee045 100644
--- a/network/endpoints/endpoint_data.yaml
+++ b/network/endpoints/endpoint_data.yaml
@@ -66,6 +66,12 @@ Mysql:
protocol: mysql+pymysql
port: 3306
+MysqlNoBrackets:
+ Internal:
+ vip_param: MysqlNoBrackets
+ protocol: mysql+pymysql
+ port: 3306
+
Heat:
Internal:
vip_param: HeatApi
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index 51ff375b..43b9921e 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -18,6 +18,7 @@ parameters:
IronicApiVirtualIP: {type: string, default: ''}
KeystoneAdminApiVirtualIP: {type: string, default: ''}
KeystonePublicApiVirtualIP: {type: string, default: ''}
+ MysqlNoBracketsVirtualIP: {type: string, default: ''}
MysqlVirtualIP: {type: string, default: ''}
NeutronApiVirtualIP: {type: string, default: ''}
NovaApiVirtualIP: {type: string, default: ''}
@@ -57,6 +58,8 @@ parameters:
KeystoneInternal: {protocol: http, port: '5000', host: IP_ADDRESS}
KeystonePublic: {protocol: http, port: '5000', host: IP_ADDRESS}
MysqlInternal: {protocol: mysql+pymysql, port: '3306', host: IP_ADDRESS}
+ MysqlNoBracketsInternal: {protocol: mysql+pymysql, port: '3306',
+ host: IP_ADDRESS}
NeutronAdmin: {protocol: http, port: '9696', host: IP_ADDRESS}
NeutronInternal: {protocol: http, port: '9696', host: IP_ADDRESS}
NeutronPublic: {protocol: http, port: '9696', host: IP_ADDRESS}
@@ -1508,6 +1511,44 @@ outputs:
IP_ADDRESS: {get_param: MysqlVirtualIP}
- ':'
- get_param: [EndpointMap, MysqlInternal, port]
+ MysqlNoBracketsInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, MysqlNoBracketsInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: MysqlNoBracketsVirtualIP}
+ port:
+ get_param: [EndpointMap, MysqlNoBracketsInternal, port]
+ protocol:
+ get_param: [EndpointMap, MysqlNoBracketsInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, MysqlNoBracketsInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, MysqlNoBracketsInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: MysqlNoBracketsVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, MysqlNoBracketsInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, MysqlNoBracketsInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, MysqlNoBracketsInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: MysqlNoBracketsVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, MysqlNoBracketsInternal, port]
NeutronAdmin:
host:
str_replace:
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
index 1ef3660f..591b0300 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.yaml
@@ -50,7 +50,8 @@ resource_registry:
# phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
# configuration with knowledge of all nodes in the cluster is required vs single
# node configuration in the pre_deploy step.
- OS::TripleO::AllNodesExtraConfig: extraconfig/all_nodes/default.yaml
+ # See extraconfig/all_nodes/* for examples
+ OS::TripleO::AllNodesExtraConfig: OS::Heat::None
# TripleO overcloud networks
OS::TripleO::Network: network/networks.yaml
@@ -140,6 +141,7 @@ resource_registry:
OS::TripleO::Services::HeatEngine: puppet/services/heat-engine.yaml
OS::TripleO::Services::IronicApi: puppet/services/ironic-api.yaml
OS::TripleO::Services::IronicConductor: puppet/services/ironic-conductor.yaml
+ OS::TripleO::Services::Kernel: puppet/services/kernel.yaml
OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
@@ -149,6 +151,7 @@ resource_registry:
OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml
OS::TripleO::Services::NeutronCorePluginNuage: puppet/services/neutron-plugin-nuage.yaml
OS::TripleO::Services::NeutronCorePluginOpencontrail: puppet/services/neutron-plugin-opencontrail.yaml
+ OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml
OS::TripleO::Services::NeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml
diff --git a/overcloud.yaml b/overcloud.yaml
index 32d513be..cc345572 100644
--- a/overcloud.yaml
+++ b/overcloud.yaml
@@ -495,6 +495,7 @@ parameters:
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceRegistry
@@ -538,6 +539,7 @@ parameters:
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Kernel
description: A list of service resources (configured in the Heat
resource_registry) which represent nested stacks
for each service that should get installed on the Compute Nodes.
@@ -569,6 +571,7 @@ parameters:
BlockStorageServices:
default:
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Snmp
@@ -602,6 +605,7 @@ parameters:
default: {}
ObjectStorageServices:
default:
+ - OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::Snmp
@@ -639,6 +643,7 @@ parameters:
CephStorageServices:
default:
- OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Timezone
description: A list of service resources (configured in the Heat
@@ -756,6 +761,7 @@ resources:
KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
MysqlVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
+ MysqlNoBracketsVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
NovaApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
SaharaApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index caceb0bc..8a43b673 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -91,10 +91,17 @@ parameters:
ServiceConfigSettings:
type: json
default: {}
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
resources:
CephStorage:
type: OS::Nova::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
properties:
image: {get_param: Image}
image_update_policy: {get_param: ImageUpdatePolicy}
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index ed6afc53..d0f562ed 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -91,10 +91,17 @@ parameters:
ServiceConfigSettings:
type: json
default: {}
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
resources:
BlockStorage:
type: OS::Nova::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
properties:
image:
{get_param: Image}
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index 820a2acd..01807f35 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -307,11 +307,18 @@ parameters:
ServiceConfigSettings:
type: json
default: {}
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
resources:
NovaCompute:
type: OS::Nova::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
properties:
image:
{get_param: Image}
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index 86e7a4b8..33729734 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -381,6 +381,10 @@ parameters:
ServiceConfigSettings:
type: json
default: {}
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
parameter_groups:
- label: deprecated
@@ -392,6 +396,9 @@ resources:
Controller:
type: OS::Nova::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
properties:
image: {get_param: Image}
image_update_policy: {get_param: ImageUpdatePolicy}
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
index 3e455347..aa5c3c43 100644
--- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -109,11 +109,3 @@ resources:
properties:
config: {get_resource: NetworkMidoNetConfig}
servers: {get_param: compute_servers}
-
-outputs:
- config_identifier:
- value:
- list_join:
- - ' '
- - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]}
- - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]}
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index 71445800..e924fc87 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -343,11 +343,3 @@ resources:
input_values:
ucsm_config: {get_param: NetworkUCSMHostList}
actions: ['CREATE'] # Only do this on CREATE
-
-outputs:
- # The Deployment applying the hieradata outputs the derived config-id, which
- # changes if the input_values change, so if the stdouts from
- # NetworkCiscoDeployment change, we need to reapply puppet (which will
- # happen if we return a different config_identifier)
- config_identifier:
- value: {get_attr: [NetworkCiscoDeployment, deploy_stdouts]}
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index f84f7049..66613f0f 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -4,7 +4,7 @@ nova::api::enabled: true
nova::vncproxy::enabled: true
# gnocchi
-gnocchi::db::sync::extra_opts: '--skip-storage'
+gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types'
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 2
gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26'
@@ -84,6 +84,7 @@ swift::proxy::pipeline:
- 'healthcheck'
- 'cache'
- 'ratelimit'
+ - 'bulk'
- 'tempurl'
- 'formpost'
- 'authtoken'
diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml
index 9cbff586..9b2ea4f4 100644
--- a/puppet/hieradata/database.yaml
+++ b/puppet/hieradata/database.yaml
@@ -13,54 +13,6 @@ nova::db::mysql_api::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
-# Glance
-glance::db::mysql::user: glance
-glance::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-glance::db::mysql::dbname: glance
-glance::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
-# Keystone
-keystone::db::mysql::user: keystone
-keystone::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-keystone::db::mysql::dbname: keystone
-keystone::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
-# Neutron
-neutron::db::mysql::user: neutron
-neutron::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-neutron::db::mysql::dbname: ovs_neutron
-neutron::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
-# Cinder
-cinder::db::mysql::user: cinder
-cinder::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-cinder::db::mysql::dbname: cinder
-cinder::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
-# Heat
-heat::db::mysql::user: heat
-heat::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-heat::db::mysql::dbname: heat
-heat::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
-# Ironic
-ironic::db::mysql::user: ironic
-ironic::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-ironic::db::mysql::dbname: ironic
-ironic::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
# Ceilometer
ceilometer::db::mysql::user: ceilometer
ceilometer::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
@@ -84,11 +36,3 @@ aodh::db::mysql::dbname: aodh
aodh::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
-
-
-sahara::db::mysql::user: sahara
-sahara::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-sahara::db::mysql::dbname: sahara
-sahara::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index af6b0960..152694d9 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -16,14 +16,6 @@
include ::tripleo::packages
include ::tripleo::firewall
-if hiera('step') >= 1 {
-
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
-}
-
if hiera('step') >= 4 {
hiera_include('ceph_classes')
}
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index b8e267fc..b25d62f8 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -16,10 +16,6 @@
include ::tripleo::packages
include ::tripleo::firewall
-create_resources(kmod::load, hiera('kernel_modules'), { })
-create_resources(sysctl::value, hiera('sysctl_settings'), { })
-Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
if hiera('step') >= 4 {
# When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 72c31a31..de84bcf8 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -18,14 +18,6 @@ include ::tripleo::firewall
$enable_load_balancer = hiera('enable_load_balancer', true)
-if hiera('step') >= 1 {
-
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
-}
-
if hiera('step') >= 2 {
# MongoDB
@@ -95,66 +87,6 @@ if hiera('step') >= 4 {
}
include ::nova::config
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- # TODO(devvesa) provide non-controller ips for these services
- $zookeeper_node_ips = hiera('neutron_api_node_ips')
- $cassandra_node_ips = hiera('neutron_api_node_ips')
-
- # Run zookeeper in the controller if configured
- if hiera('enable_zookeeper_on_controller') {
- class {'::tripleo::cluster::zookeeper':
- zookeeper_server_ips => $zookeeper_node_ips,
- # TODO: create a 'bind' hiera key for zookeeper
- zookeeper_client_ip => hiera('neutron::bind_host'),
- zookeeper_hostnames => hiera('controller_node_names')
- }
- }
-
- # Run cassandra in the controller if configured
- if hiera('enable_cassandra_on_controller') {
- class {'::tripleo::cluster::cassandra':
- cassandra_servers => $cassandra_node_ips,
- # TODO: create a 'bind' hiera key for cassandra
- cassandra_ip => hiera('neutron::bind_host'),
- }
- }
-
- class {'::tripleo::network::midonet::agent':
- zookeeper_servers => $zookeeper_node_ips,
- cassandra_seeds => $cassandra_node_ips
- }
-
- class {'::tripleo::network::midonet::api':
- zookeeper_servers => $zookeeper_node_ips,
- vip => hiera('public_virtual_ip'),
- keystone_ip => hiera('public_virtual_ip'),
- keystone_admin_token => hiera('keystone::admin_token'),
- # TODO: create a 'bind' hiera key for api
- bind_address => hiera('neutron::bind_host'),
- admin_password => hiera('admin_password')
- }
-
- # TODO: find a way to get an empty list from hiera
- # TODO: when doing the composable midonet plugin, don't forget to
- # set service_plugins to an empty array in Hiera.
- class {'::neutron':
- service_plugins => []
- }
-
- }
-
- # If the value of core plugin is set to 'midonet',
- # skip all the ML2 configuration
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- class {'::neutron::plugins::midonet':
- midonet_api_ip => hiera('public_virtual_ip'),
- keystone_tenant => hiera('neutron::server::auth_tenant'),
- keystone_password => hiera('neutron::server::password')
- }
- }
-
# Ceilometer
$ceilometer_backend = downcase(hiera('ceilometer_backend'))
case $ceilometer_backend {
@@ -200,7 +132,7 @@ if hiera('step') >= 4 {
} else {
$_profile_support = 'None'
}
- $neutron_options = {'profile_support' => $_profile_support }
+ $neutron_options = merge({'profile_support' => $_profile_support },hiera('horizon::neutron_options',undef))
$memcached_ipv6 = hiera('memcached_ipv6', false)
if $memcached_ipv6 {
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 7205002a..d7b1ce54 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -22,9 +22,7 @@ Pcmk_resource <| |> {
Service <|
tag == 'aodh-service' or
tag == 'ceilometer-service' or
- tag == 'gnocchi-service' or
- tag == 'neutron-service' or
- tag == 'nova-service'
+ tag == 'gnocchi-service'
|> {
hasrestart => true,
restart => '/bin/true',
@@ -53,10 +51,6 @@ $non_pcmk_start = hiera('step') >= 5
if hiera('step') >= 1 {
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
$pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
$corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
if $corosync_ipv6 {
@@ -192,48 +186,25 @@ if hiera('step') >= 2 {
require => Class['::mysql::server'],
before => Exec['galera-ready'],
}
- }
- $mysql_root_password = hiera('mysql::server::root_password')
- $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
- # This step is to create a sysconfig clustercheck file with the root user and empty password
- # on the first install only (because later on the clustercheck db user will be used)
- # We are using exec and not file in order to not have duplicate definition errors in puppet
- # when we later set the the file to contain the clustercheck data
- exec { 'create-root-sysconfig-clustercheck':
- command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
- unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
- }
- exec { 'galera-ready' :
- command => '/usr/bin/clustercheck >/dev/null',
- timeout => 30,
- tries => 180,
- try_sleep => 10,
- environment => ['AVAILABLE_WHEN_READONLY=0'],
- require => Exec['create-root-sysconfig-clustercheck'],
- }
+ exec { 'galera-ready' :
+ command => '/usr/bin/clustercheck >/dev/null',
+ timeout => 30,
+ tries => 180,
+ try_sleep => 10,
+ environment => ['AVAILABLE_WHEN_READONLY=0'],
+ require => Exec['create-root-sysconfig-clustercheck'],
+ }
- xinetd::service { 'galera-monitor' :
- port => '9200',
- server => '/usr/bin/clustercheck',
- per_source => 'UNLIMITED',
- log_on_success => '',
- log_on_failure => 'HOST',
- flags => 'REUSE',
- service_type => 'UNLISTED',
- user => 'root',
- group => 'root',
- require => Exec['create-root-sysconfig-clustercheck'],
- }
- # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
- # to it in a later step. We do this only on one node as it will replicate on
- # the other members. We also make sure that the permissions are the minimum necessary
- if $pacemaker_master {
+ # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
+ # to it in a later step. We do this only on one node as it will replicate on
+ # the other members. We also make sure that the permissions are the minimum necessary
mysql_user { 'clustercheck@localhost':
ensure => 'present',
- password_hash => mysql_password($mysql_clustercheck_password),
+ password_hash => mysql_password(hiera('mysql_clustercheck_password')),
require => Exec['galera-ready'],
}
+
mysql_grant { 'clustercheck@localhost/*.*':
ensure => 'present',
options => ['GRANT'],
@@ -241,10 +212,8 @@ if hiera('step') >= 2 {
table => '*.*',
user => 'clustercheck@localhost',
}
- }
- # Create all the database schemas
- if $sync_db {
+ # Create all the database schemas
if downcase(hiera('ceilometer_backend')) == 'mysql' {
class { '::ceilometer::db::mysql':
require => Exec['galera-ready'],
@@ -259,7 +228,28 @@ if hiera('step') >= 2 {
class { '::aodh::db::mysql':
require => Exec['galera-ready'],
- }
+ }
+ }
+ # This step is to create a sysconfig clustercheck file with the root user and empty password
+ # on the first install only (because later on the clustercheck db user will be used)
+ # We are using exec and not file in order to not have duplicate definition errors in puppet
+ # when we later set the the file to contain the clustercheck data
+ exec { 'create-root-sysconfig-clustercheck':
+ command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
+ unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
+ }
+
+ xinetd::service { 'galera-monitor' :
+ port => '9200',
+ server => '/usr/bin/clustercheck',
+ per_source => 'UNLIMITED',
+ log_on_success => '',
+ log_on_failure => 'HOST',
+ flags => 'REUSE',
+ service_type => 'UNLISTED',
+ user => 'root',
+ group => 'root',
+ require => Exec['create-root-sysconfig-clustercheck'],
}
} #END STEP 2
@@ -267,6 +257,7 @@ if hiera('step') >= 2 {
if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
# At this stage we are guaranteed that the clustercheck db user exists
# so we switch the resource agent to use it.
+ $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
file { '/etc/sysconfig/clustercheck' :
ensure => file,
mode => '0600',
@@ -290,63 +281,6 @@ MYSQL_HOST=localhost\n",
include ::nova::config
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- # TODO(devvesa) provide non-controller ips for these services
- $zookeeper_node_ips = hiera('neutron_api_node_ips')
- $cassandra_node_ips = hiera('neutron_api_node_ips')
-
- # Run zookeeper in the controller if configured
- if hiera('enable_zookeeper_on_controller') {
- class {'::tripleo::cluster::zookeeper':
- zookeeper_server_ips => $zookeeper_node_ips,
- # TODO: create a 'bind' hiera key for zookeeper
- zookeeper_client_ip => hiera('neutron::bind_host'),
- zookeeper_hostnames => split(hiera('controller_node_names'), ',')
- }
- }
-
- # Run cassandra in the controller if configured
- if hiera('enable_cassandra_on_controller') {
- class {'::tripleo::cluster::cassandra':
- cassandra_servers => $cassandra_node_ips,
- # TODO: create a 'bind' hiera key for cassandra
- cassandra_ip => hiera('neutron::bind_host'),
- }
- }
-
- class {'::tripleo::network::midonet::agent':
- zookeeper_servers => $zookeeper_node_ips,
- cassandra_seeds => $cassandra_node_ips
- }
-
- class {'::tripleo::network::midonet::api':
- zookeeper_servers => $zookeeper_node_ips,
- vip => hiera('public_virtual_ip'),
- keystone_ip => hiera('public_virtual_ip'),
- keystone_admin_token => hiera('keystone::admin_token'),
- # TODO: create a 'bind' hiera key for api
- bind_address => hiera('neutron::bind_host'),
- admin_password => hiera('admin_password')
- }
-
- # Configure Neutron
- # TODO: when doing the composable midonet plugin, don't forget to
- # set service_plugins to an empty array in Hiera.
- class {'::neutron':
- service_plugins => []
- }
-
- }
-
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- class {'::neutron::plugins::midonet':
- midonet_api_ip => hiera('public_virtual_ip'),
- keystone_tenant => hiera('neutron::server::auth_tenant'),
- keystone_password => hiera('neutron::server::password')
- }
- }
-
# Ceilometer
case downcase(hiera('ceilometer_backend')) {
/mysql/: {
@@ -398,7 +332,7 @@ MYSQL_HOST=localhost\n",
} else {
$_profile_support = 'None'
}
- $neutron_options = {'profile_support' => $_profile_support }
+ $neutron_options = merge({'profile_support' => $_profile_support },hiera('horizon::neutron_options',undef))
$memcached_ipv6 = hiera('memcached_ipv6', false)
if $memcached_ipv6 {
@@ -482,6 +416,7 @@ if hiera('step') >= 5 {
# password. On second runs or updates /root/.my.cnf will already be populated
# with proper credentials. This step happens on every node because this sql
# statement does not automatically replicate across nodes.
+ $mysql_root_password = hiera('mysql::server::root_password')
exec { 'galera-set-root-password':
command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
}
@@ -527,49 +462,6 @@ password=\"${mysql_root_password}\"",
Pacemaker::Resource::Ocf['openstack-core']],
}
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- pacemaker::resource::service {'tomcat':
- clone_params => 'interleave=true',
- }
- }
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
- pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::server_service}-clone",
- second_resource => "${::neutron::params::dhcp_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
- }
- pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::dhcp_agent_service}-clone",
- second_resource => "${::neutron::params::metadata_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
- }
- pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::metadata_agent_service}-clone",
- second_resource => 'tomcat-clone',
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
- Pacemaker::Resource::Service['tomcat']],
- }
- pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
- source => "${::neutron::params::metadata_agent_service}-clone",
- target => "${::neutron::params::dhcp_agent_service}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
- }
- }
-
# Nova
pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
constraint_type => 'order',
diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp
index 418c56e6..1f04c581 100644
--- a/puppet/manifests/overcloud_object.pp
+++ b/puppet/manifests/overcloud_object.pp
@@ -16,12 +16,6 @@
include ::tripleo::packages
include ::tripleo::firewall
-if hiera('step') >= 1 {
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-}
-
if hiera('step') >= 4 {
hiera_include('object_classes')
}
diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp
index c6234bd3..7c7da586 100644
--- a/puppet/manifests/overcloud_volume.pp
+++ b/puppet/manifests/overcloud_volume.pp
@@ -16,14 +16,6 @@
include ::tripleo::packages
include ::tripleo::firewall
-if hiera('step') >= 1 {
-
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
-}
-
if hiera('step') >= 4 {
hiera_include('volume_classes')
}
diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml
index 85682448..f6d2b645 100644
--- a/puppet/services/cinder-base.yaml
+++ b/puppet/services/cinder-base.yaml
@@ -56,3 +56,9 @@ outputs:
cinder::rabbit_userid: {get_param: RabbitUserName}
cinder::rabbit_password: {get_param: RabbitPassword}
cinder::rabbit_port: {get_param: RabbitClientPort}
+ cinder::db::mysql::user: cinder
+ cinder::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]}
+ cinder::db::mysql::dbname: cinder
+ cinder::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml
index 6f2f0372..d71157f9 100644
--- a/puppet/services/glance-registry.yaml
+++ b/puppet/services/glance-registry.yaml
@@ -41,5 +41,12 @@ outputs:
glance::registry::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
glance::registry::debug: {get_param: Debug}
glance::registry::workers: {get_param: GlanceWorkers}
+ glance::db::mysql::user: glance
+ glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]}
+ glance::db::mysql::dbname: glance
+ glance::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+
step_config: |
include ::tripleo::profile::base::glance::registry
diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml
index 4a5ec2c0..77af55ef 100644
--- a/puppet/services/heat-engine.yaml
+++ b/puppet/services/heat-engine.yaml
@@ -54,5 +54,11 @@ outputs:
heat::keystone_password: {get_param: HeatPassword}
heat::db::mysql::password: {get_param: HeatPassword}
heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
+ heat::db::mysql::user: heat
+ heat::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]}
+ heat::db::mysql::dbname: heat
+ heat::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
step_config: |
include ::tripleo::profile::base::heat::engine
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
index e1626d5b..5ab03fcb 100644
--- a/puppet/services/ironic-api.yaml
+++ b/puppet/services/ironic-api.yaml
@@ -38,6 +38,5 @@ outputs:
ironic::keystone::auth::internal_url: {get_param: [EndpointMap, IronicInternal, uri]}
ironic::keystone::auth::admin_url: {get_param: [EndpointMap, IronicAdmin, uri]}
ironic::keystone::auth::password: {get_param: IronicPassword }
-
step_config: |
include ::tripleo::profile::base::ironic::api
diff --git a/puppet/services/ironic-base.yaml b/puppet/services/ironic-base.yaml
index 0eaa53cb..df82bb6c 100644
--- a/puppet/services/ironic-base.yaml
+++ b/puppet/services/ironic-base.yaml
@@ -41,7 +41,7 @@ outputs:
description: Role data for the Ironic role.
value:
config_settings:
- ironic_dsn: &ironic_dsn
+ ironic::database_connection:
list_join:
- ''
- - {get_param: [EndpointMap, MysqlInternal, protocol]}
@@ -51,14 +51,19 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ironic'
ironic::admin_tenant_name: 'service'
- ironic::database_connection: *ironic_dsn
ironic::debug: {get_param: Debug}
ironic::rabbit_userid: {get_param: RabbitUserName}
ironic::rabbit_password: {get_param: RabbitPassword}
ironic::rabbit_port: {get_param: RabbitClientPort}
ironic::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
ironic::db::mysql::password: {get_param: IronicPassword}
- ironic::keystone::auth::tenant: 'service'
+ ironic::db::mysql::user: ironic
+ ironic::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]}
+ ironic::db::mysql::dbname: ironic
+ ironic::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+ ironic::keystone::auth::tenant: 'service'
step_config: |
include ::tripleo::profile::base::ironic
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index 3fb3d9fd..26d4e0ed 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -31,6 +31,5 @@ outputs:
# Prevent tftp_server from defaulting to my_ip setting, which is
# controller VIP, not a real IP.
ironic::drivers::pxe::tftp_server: {get_input: ironic_api_network}
-
step_config: |
include ::tripleo::profile::base::ironic::conductor
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
new file mode 100644
index 00000000..b429c5ea
--- /dev/null
+++ b/puppet/services/kernel.yaml
@@ -0,0 +1,18 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Load kernel modules with kmod and configure kernel options with sysctl.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the Kernel modules
+ value:
+ step_config: |
+ include ::tripleo::profile::base::kernel
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index 25d92d4a..0ad6025c 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -128,5 +128,12 @@ outputs:
keystone::public_workers: {get_param: KeystoneWorkers}
keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]}
+ keystone::db::mysql::user: keystone
+ keystone::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]}
+ keystone::db::mysql::dbname: keystone
+ keystone::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+
step_config: |
include ::tripleo::profile::base::keystone
diff --git a/puppet/services/neutron-midonet.yaml b/puppet/services/neutron-midonet.yaml
new file mode 100644
index 00000000..736c01c3
--- /dev/null
+++ b/puppet/services/neutron-midonet.yaml
@@ -0,0 +1,48 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Midonet plugin and services
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronPassword:
+ description: The password for the neutron service and db account, used by neutron agents.
+ type: string
+ hidden: true
+ AdminPassword:
+ description: The password for the keystone admin account, used for monitoring, querying neutron etc.
+ type: string
+ hidden: true
+ AdminToken:
+ description: The keystone auth secret and db password.
+ type: string
+ hidden: true
+ EnableZookeeperOnController:
+ label: Enable Zookeeper On Controller
+ description: 'Whether enable Zookeeper cluster on Controller'
+ type: boolean
+ default: false
+ EnableCassandraOnController:
+ label: Enable Cassandra On Controller
+ description: 'Whether enable Cassandra cluster on Controller'
+ type: boolean
+ default: false
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Midonet plugin and services
+ value:
+ config_settings:
+ tripleo::profile::base::neutron::midonet::admin_password: {get_param: AdminPassword}
+ tripleo::profile::base::neutron::midonet::keystone_admin_token: {get_param: AdminToken}
+ tripleo::profile::base::neutron::midonet::neutron_auth_password: {get_param: NeutronPassword}
+ tripleo::profile::base::neutron::midonet::zk_on_controller: {get_param: EnableZookeeperOnController}
+ tripleo::profile::base::neutron::midonet::neutron_auth_tenant: 'service'
+ enable_cassandra_on_controller: {get_param: EnableCassandraOnController}
+ neutron::service_plugins: []
+ step_config: |
+ include tripleo::profile::base::neutron::plugins::midonet
diff --git a/puppet/services/neutron-server.yaml b/puppet/services/neutron-server.yaml
index 6299c39e..d759d420 100644
--- a/puppet/services/neutron-server.yaml
+++ b/puppet/services/neutron-server.yaml
@@ -42,7 +42,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- neutron_dsn: &neutron_dsn
+ neutron::server::database_connection:
list_join:
- ''
- - {get_param: [EndpointMap, MysqlInternal, protocol]}
@@ -54,7 +54,6 @@ outputs:
neutron::server::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
neutron::server::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
neutron::server::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
- neutron::server::database_connection: *neutron_dsn
neutron::server::api_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron::server::l3_ha: {get_param: NeutronL3HA}
@@ -66,5 +65,11 @@ outputs:
neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_param: NovaPassword}
neutron::db::mysql::password: {get_param: NeutronPassword}
+ neutron::db::mysql::user: neutron
+ neutron::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]}
+ neutron::db::mysql::dbname: ovs_neutron
+ neutron::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
step_config: |
include tripleo::profile::base::neutron::server
diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml
index 5833c42d..780c295e 100644
--- a/puppet/services/pacemaker/heat-api-cfn.yaml
+++ b/puppet/services/pacemaker/heat-api-cfn.yaml
@@ -25,7 +25,5 @@ outputs:
- get_attr: [HeatApiCfnBase, role_data, config_settings]
- heat::api_cfn::manage_service: false
heat::api_cfn::enabled: false
- step_config:
- # No puppet manifests since heat-api-cfn is included in
- # ::tripleo::profile::pacemaker::heat which is maintained alongside of
- # pacemaker/heat-api.yaml.
+ step_config: |
+ include ::tripleo::profile::pacemaker::heat::api_cfn
diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
index 8b67702c..2fa82fe7 100644
--- a/puppet/services/pacemaker/heat-api-cloudwatch.yaml
+++ b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
@@ -25,7 +25,5 @@ outputs:
- get_attr: [HeatApiCloudwatchBase, role_data, config_settings]
- heat::api_cloudwatch::manage_service: false
heat::api_cloudwatch::enabled: false
- step_config:
- # No puppet manifests since heat-api-cloudwatch is included in
- # ::tripleo::profile::pacemaker::heat which is maintained alongside of
- # pacemaker/heat-api.yaml.
+ step_config: |
+ include ::tripleo::profile::pacemaker::heat::api_cloudwatch
diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml
index 6628e8dd..be897a55 100644
--- a/puppet/services/pacemaker/heat-api.yaml
+++ b/puppet/services/pacemaker/heat-api.yaml
@@ -26,4 +26,4 @@ outputs:
- heat::api::manage_service: false
heat::api::enabled: false
step_config: |
- include ::tripleo::profile::pacemaker::heat
+ include ::tripleo::profile::pacemaker::heat::api
diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml
index e1195780..a8ed5c0c 100644
--- a/puppet/services/pacemaker/heat-engine.yaml
+++ b/puppet/services/pacemaker/heat-engine.yaml
@@ -26,7 +26,5 @@ outputs:
- get_attr: [HeatEngineBase, role_data, config_settings]
- heat::engine::manage_service: false
heat::engine::enabled: false
- step_config:
- # No puppet manifests since heat-engine is included in
- # ::tripleo::profile::pacemaker::heat which is maintained alongside of
- # pacemaker/heat-api.yaml.
+ step_config: |
+ include ::tripleo::profile::pacemaker::heat::engine
diff --git a/puppet/services/pacemaker/neutron-midonet.yaml b/puppet/services/pacemaker/neutron-midonet.yaml
new file mode 100644
index 00000000..f9fd992c
--- /dev/null
+++ b/puppet/services/pacemaker/neutron-midonet.yaml
@@ -0,0 +1,28 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Midonet with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ NeutronMidonetBase:
+ type: ../neutron-midonet.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Midonet plugin.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMidonetBase, role_data, config_settings]
+ step_config: |
+ include ::tripleo::profile::pacemaker::neutron::plugins::midonet
diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml
index 17ef49fa..f0411a35 100644
--- a/puppet/services/sahara-engine.yaml
+++ b/puppet/services/sahara-engine.yaml
@@ -37,5 +37,11 @@ outputs:
- '/sahara'
sahara::database_connection: *sahara_dsn
sahara::db::mysql::password: {get_param: SaharaPassword}
+ sahara::db::mysql::user: sahara
+ sahara::db::mysql::host: {get_param: [EndpointMap, MysqlVirtual, host]}
+ sahara::db::mysql::dbname: sahara
+ sahara::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
step_config: |
include ::tripleo::profile::base::sahara::engine
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index a86aeaf5..930b9e3d 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -17,6 +17,10 @@ parameters:
description: The password for the swift service account, used by the swift proxy services.
type: string
hidden: true
+ SwiftProxyNodeTimeout:
+ default: 60
+ description: Timeout for requests going from swift-proxy to swift a/c/o services.
+ type: number
SwiftWorkers:
default: 0
description: Number of workers for Swift service.
@@ -36,6 +40,7 @@ outputs:
swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
swift::proxy::authtoken::admin_password: {get_param: SwiftPassword}
+ swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]}
swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]}
diff --git a/puppet/services/time/ntp.yaml b/puppet/services/time/ntp.yaml
index dbef6f91..930dca41 100644
--- a/puppet/services/time/ntp.yaml
+++ b/puppet/services/time/ntp.yaml
@@ -12,7 +12,7 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- NtpServers:
+ NtpServer:
default: []
description: NTP servers
type: comma_delimited_list
@@ -22,6 +22,6 @@ outputs:
description: Role ntp using composable services.
value:
config_settings:
- ntp::ntpservers: {get_param: NtpServers}
+ ntp::ntpservers: {get_param: NtpServer}
step_config: |
- include ::ntp \ No newline at end of file
+ include ::ntp
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index e663fb6d..dc274dcd 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -112,11 +112,18 @@ parameters:
ServiceConfigSettings:
type: json
default: {}
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
resources:
SwiftStorage:
type: OS::Nova::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
properties:
image: {get_param: Image}
flavor: {get_param: Flavor}