aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docker/compute-post.yaml17
-rw-r--r--environments/neutron-midonet.yaml2
-rw-r--r--environments/puppet-pacemaker.yaml1
-rw-r--r--extraconfig/all_nodes/default.yaml27
-rw-r--r--extraconfig/all_nodes/mac_hostname.yaml7
-rw-r--r--extraconfig/all_nodes/random_string.yaml6
-rw-r--r--extraconfig/all_nodes/swap-partition.yaml4
-rw-r--r--extraconfig/all_nodes/swap.yaml4
-rw-r--r--overcloud-resource-registry-puppet.yaml5
-rw-r--r--overcloud.yaml27
-rw-r--r--puppet/ceph-storage-post.yaml12
-rw-r--r--puppet/ceph-storage.yaml9
-rw-r--r--puppet/cinder-storage-post.yaml12
-rw-r--r--puppet/cinder-storage.yaml8
-rw-r--r--puppet/compute-post.yaml14
-rw-r--r--puppet/compute.yaml9
-rw-r--r--puppet/controller-post.yaml20
-rw-r--r--puppet/controller.yaml10
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml8
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml8
-rw-r--r--puppet/hieradata/controller.yaml2
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp8
-rw-r--r--puppet/manifests/overcloud_compute.pp4
-rw-r--r--puppet/manifests/overcloud_controller.pp70
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp188
-rw-r--r--puppet/manifests/overcloud_object.pp6
-rw-r--r--puppet/manifests/overcloud_volume.pp40
-rw-r--r--puppet/services/kernel.yaml18
-rw-r--r--puppet/services/neutron-midonet.yaml48
-rw-r--r--puppet/services/pacemaker/heat-api-cfn.yaml6
-rw-r--r--puppet/services/pacemaker/heat-api-cloudwatch.yaml6
-rw-r--r--puppet/services/pacemaker/heat-api.yaml2
-rw-r--r--puppet/services/pacemaker/heat-engine.yaml6
-rw-r--r--puppet/services/pacemaker/neutron-midonet.yaml28
-rw-r--r--puppet/services/swift-proxy.yaml5
-rw-r--r--puppet/services/time/ntp.yaml6
-rw-r--r--puppet/swift-storage-post.yaml14
-rw-r--r--puppet/swift-storage.yaml8
38 files changed, 213 insertions, 462 deletions
diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml
index 3c4a9413..3fc07561 100644
--- a/docker/compute-post.yaml
+++ b/docker/compute-post.yaml
@@ -5,8 +5,8 @@ description: >
parameters:
servers:
type: json
- NodeConfigIdentifiers:
- type: json
+ DeployIdentifier:
+ type: string
description: Value which changes if the node configuration may need to be re-applied
DockerNamespace:
type: string
@@ -38,6 +38,11 @@ parameters:
NeutronOpenvswitchAgentOvsVolume:
type: string
default: " "
+ StepConfig:
+ type: string
+ description: Config manifests that will be used to step through the deployment.
+ default: ''
+
resources:
@@ -56,7 +61,11 @@ resources:
outputs:
- name: result
config:
- get_file: ../puppet/manifests/overcloud_compute.pp
+ list_join:
+ - ''
+ - - get_file: ../puppet/manifests/overcloud_compute.pp
+ - {get_param: StepConfig}
+
ComputePuppetDeployment:
type: OS::Heat::SoftwareDeployments
@@ -65,7 +74,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: ComputePuppetConfig}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
tripleo::packages::enable_install: True
CopyEtcConfig:
diff --git a/environments/neutron-midonet.yaml b/environments/neutron-midonet.yaml
index 7f50f15b..90b98ae8 100644
--- a/environments/neutron-midonet.yaml
+++ b/environments/neutron-midonet.yaml
@@ -4,6 +4,8 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../net-config-linux-bridge.yaml # We have to avoid any ovs bridge. MidoNet is incompatible with its datapath
OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ # Override the NeutronCorePlugin to use Nuage
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginMidonet
parameter_defaults:
EnableZookeeperOnController: true
diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml
index d1df6c41..8043ccbf 100644
--- a/environments/puppet-pacemaker.yaml
+++ b/environments/puppet-pacemaker.yaml
@@ -28,6 +28,7 @@ resource_registry:
OS::TripleO::Services::NeutronCorePluginPlumgrid: ../puppet/services/pacemaker/neutron-plugin-plumgrid.yaml
OS::TripleO::Services::NeutronCorePluginNuage: ../puppet/services/pacemaker/neutron-plugin-nuage.yaml
OS::TripleO::Services::NeutronCorePluginOpencontrail: ../puppet/services/pacemaker/neutron-plugin-opencontrail.yaml
+ OS::TripleO::Services::NeutronCorePluginMidonet: ../puppet/services/pacemaker/neutron-midonet.yaml
OS::TripleO::Services::NeutronOvsAgent: ../puppet/services/pacemaker/neutron-ovs-agent.yaml
OS::TripleO::Services::RabbitMQ: ../puppet/services/pacemaker/rabbitmq.yaml
OS::TripleO::Services::HAproxy: ../puppet/services/pacemaker/haproxy.yaml
diff --git a/extraconfig/all_nodes/default.yaml b/extraconfig/all_nodes/default.yaml
deleted file mode 100644
index 68f9eadd..00000000
--- a/extraconfig/all_nodes/default.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Noop extra config for allnodes extra cluster config
-
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
-parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
- type: json
-# Note extra parameters can be defined, then passed data via the
-# environment parameter_defaults, without modifying the parent template
-
-outputs:
- # This value should change if the configuration data has changed
- # It is used to e.g re-apply puppet after hieradata values change.
- config_identifier:
- value: none
diff --git a/extraconfig/all_nodes/mac_hostname.yaml b/extraconfig/all_nodes/mac_hostname.yaml
index 5883e06a..7d8704e3 100644
--- a/extraconfig/all_nodes/mac_hostname.yaml
+++ b/extraconfig/all_nodes/mac_hostname.yaml
@@ -113,10 +113,3 @@ resources:
objectstorage_mappings: {get_attr: [CollectMacDeploymentsObjectStorage, deploy_stdouts]}
cephstorage_mappings: {get_attr: [CollectMacDeploymentsCephStorage, deploy_stdouts]}
actions: ['CREATE'] # Only do this on CREATE
-
-outputs:
- # This value should change if the configuration data has changed
- # It is used to e.g re-apply puppet after hieradata values change.
- config_identifier:
- value: {get_attr: [DistributeMacDeploymentsController, deploy_stdouts]}
-
diff --git a/extraconfig/all_nodes/random_string.yaml b/extraconfig/all_nodes/random_string.yaml
index 49d2d8b6..d38701e2 100644
--- a/extraconfig/all_nodes/random_string.yaml
+++ b/extraconfig/all_nodes/random_string.yaml
@@ -57,9 +57,3 @@ resources:
actions: ['CREATE'] # Only do this on CREATE
input_values:
random_value: {get_attr: [Random, value]}
-
-outputs:
- # This value should change if the configuration data has changed
- # It is used to e.g re-apply puppet after hieradata values change.
- config_identifier:
- value: {get_attr: [Random, value]}
diff --git a/extraconfig/all_nodes/swap-partition.yaml b/extraconfig/all_nodes/swap-partition.yaml
index 89a2adb0..e6fa9eca 100644
--- a/extraconfig/all_nodes/swap-partition.yaml
+++ b/extraconfig/all_nodes/swap-partition.yaml
@@ -84,7 +84,3 @@ resources:
input_values:
swap_partition_label: {get_param: swap_partition_label}
actions: ["CREATE"]
-
-outputs:
- config_identifier:
- value: none
diff --git a/extraconfig/all_nodes/swap.yaml b/extraconfig/all_nodes/swap.yaml
index 374b1e5d..5383ffc9 100644
--- a/extraconfig/all_nodes/swap.yaml
+++ b/extraconfig/all_nodes/swap.yaml
@@ -102,7 +102,3 @@ resources:
swap_size_megabytes: {get_param: swap_size_megabytes}
swap_path: {get_param: swap_path}
actions: ["CREATE"]
-
-outputs:
- config_identifier:
- value: none
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
index 1ef3660f..591b0300 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.yaml
@@ -50,7 +50,8 @@ resource_registry:
# phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
# configuration with knowledge of all nodes in the cluster is required vs single
# node configuration in the pre_deploy step.
- OS::TripleO::AllNodesExtraConfig: extraconfig/all_nodes/default.yaml
+ # See extraconfig/all_nodes/* for examples
+ OS::TripleO::AllNodesExtraConfig: OS::Heat::None
# TripleO overcloud networks
OS::TripleO::Network: network/networks.yaml
@@ -140,6 +141,7 @@ resource_registry:
OS::TripleO::Services::HeatEngine: puppet/services/heat-engine.yaml
OS::TripleO::Services::IronicApi: puppet/services/ironic-api.yaml
OS::TripleO::Services::IronicConductor: puppet/services/ironic-conductor.yaml
+ OS::TripleO::Services::Kernel: puppet/services/kernel.yaml
OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
@@ -149,6 +151,7 @@ resource_registry:
OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml
OS::TripleO::Services::NeutronCorePluginNuage: puppet/services/neutron-plugin-nuage.yaml
OS::TripleO::Services::NeutronCorePluginOpencontrail: puppet/services/neutron-plugin-opencontrail.yaml
+ OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml
OS::TripleO::Services::NeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml
diff --git a/overcloud.yaml b/overcloud.yaml
index e6f23d8c..11ea1bdb 100644
--- a/overcloud.yaml
+++ b/overcloud.yaml
@@ -504,6 +504,7 @@ parameters:
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceRegistry
@@ -547,6 +548,7 @@ parameters:
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Kernel
description: A list of service resources (configured in the Heat
resource_registry) which represent nested stacks
for each service that should get installed on the Compute Nodes.
@@ -578,8 +580,10 @@ parameters:
BlockStorageServices:
default:
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Snmp
description: A list of service resources (configured in the Heat
resource_registry) which represent nested stacks
for each service that should get installed on the BlockStorage nodes.
@@ -610,6 +614,7 @@ parameters:
default: {}
ObjectStorageServices:
default:
+ - OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::Snmp
@@ -647,6 +652,7 @@ parameters:
CephStorageServices:
default:
- OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Timezone
description: A list of service resources (configured in the Heat
@@ -772,7 +778,6 @@ resources:
ControllerServiceChain:
type: OS::TripleO::Services
- depends_on: Networks
properties:
Services: {get_param: ControllerServices}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
@@ -1392,10 +1397,6 @@ resources:
depends_on: [ControllerBootstrapNodeDeployment, ControllerAllNodesDeployment, ControllerSwiftDeployment, ControllerCephDeployment]
properties:
servers: {get_attr: [Controller, attributes, nova_server_resource]}
- NodeConfigIdentifiers:
- allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
- controller_config: {get_attr: [Controller, attributes, config_identifier]}
- deployment_identifier: {get_param: DeployIdentifier}
RoleData: {get_attr: [ControllerServiceChain, role_data]}
ComputeNodesPostDeployment:
@@ -1403,10 +1404,6 @@ resources:
depends_on: [ComputeAllNodesDeployment, ComputeCephDeployment]
properties:
servers: {get_attr: [Compute, attributes, nova_server_resource]}
- NodeConfigIdentifiers:
- allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
- compute_config: {get_attr: [Compute, attributes, config_identifier]}
- deployment_identifier: {get_param: DeployIdentifier}
RoleData: {get_attr: [ComputeServiceChain, role_data]}
ObjectStorageNodesPostDeployment:
@@ -1414,10 +1411,6 @@ resources:
depends_on: [ObjectStorageSwiftDeployment, ObjectStorageAllNodesDeployment]
properties:
servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
- NodeConfigIdentifiers:
- allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
- objectstorage_config: {get_attr: [ObjectStorage, attributes, config_identifier]}
- deployment_identifier: {get_param: DeployIdentifier}
RoleData: {get_attr: [ObjectStorageServiceChain, role_data]}
BlockStorageNodesPostDeployment:
@@ -1425,10 +1418,6 @@ resources:
depends_on: [ControllerNodesPostDeployment, BlockStorageAllNodesDeployment]
properties:
servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
- NodeConfigIdentifiers:
- allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
- blockstorage_config: {get_attr: [BlockStorage, attributes, config_identifier]}
- deployment_identifier: {get_param: DeployIdentifier}
RoleData: {get_attr: [BlockStorageServiceChain, role_data]}
CephStorageNodesPostDeployment:
@@ -1436,10 +1425,6 @@ resources:
depends_on: [ControllerNodesPostDeployment, CephStorageCephDeployment, CephStorageAllNodesDeployment]
properties:
servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
- NodeConfigIdentifiers:
- allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
- cephstorage_config: {get_attr: [CephStorage, attributes, config_identifier]}
- deployment_identifier: {get_param: DeployIdentifier}
RoleData: {get_attr: [CephStorageServiceChain, role_data]}
outputs:
diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml
index edeb2d93..70baeb6e 100644
--- a/puppet/ceph-storage-post.yaml
+++ b/puppet/ceph-storage-post.yaml
@@ -10,12 +10,12 @@ parameters:
type: boolean
servers:
type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
RoleData:
type: json
default: {}
+ DeployIdentifier:
+ type: string
+ description: Value which changes if the node configuration may need to be re-applied
resources:
@@ -28,7 +28,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: CephStorageArtifactsConfig}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
CephStoragePuppetConfig:
type: OS::Heat::SoftwareConfig
@@ -57,7 +57,7 @@ resources:
config: {get_resource: CephStoragePuppetConfig}
input_values:
step: 2
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
CephStorageDeployment_Step3:
type: OS::Heat::StructuredDeployments
@@ -68,7 +68,7 @@ resources:
config: {get_resource: CephStoragePuppetConfig}
input_values:
step: 3
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index d4234921..f461a265 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -385,12 +385,3 @@ outputs:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value:
- list_join:
- - ','
- - - {get_attr: [CephStorageDeployment, deploy_stdout]}
- - {get_attr: [NodeTLSCAData, deploy_stdout]}
- - {get_attr: [CephStorageExtraConfigPre, deploy_stdout]}
- - {get_param: UpdateIdentifier}
diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml
index 4de141f2..c3dd403e 100644
--- a/puppet/cinder-storage-post.yaml
+++ b/puppet/cinder-storage-post.yaml
@@ -8,8 +8,8 @@ parameters:
type: boolean
servers:
type: json
- NodeConfigIdentifiers:
- type: json
+ DeployIdentifier:
+ type: string
description: Value which changes if the node configuration may need to be re-applied
RoleData:
type: json
@@ -26,7 +26,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: VolumeArtifactsConfig}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
VolumePuppetConfig:
type: OS::Heat::SoftwareConfig
@@ -56,7 +56,7 @@ resources:
config: {get_resource: VolumePuppetConfig}
input_values:
step: 2
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
VolumeDeployment_Step3:
type: OS::Heat::StructuredDeployments
@@ -67,7 +67,7 @@ resources:
config: {get_resource: VolumePuppetConfig}
input_values:
step: 3
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
VolumeDeployment_Step4:
type: OS::Heat::StructuredDeployments
@@ -78,7 +78,7 @@ resources:
config: {get_resource: VolumePuppetConfig}
input_values:
step: 4
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index 0bac1a40..0cbc4a27 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -386,11 +386,3 @@ outputs:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value:
- list_join:
- - ''
- - - {get_attr: [BlockStorageDeployment, deploy_stdout]}
- - {get_attr: [NodeTLSCAData, deploy_stdout]}
- - {get_param: UpdateIdentifier}
diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml
index 2033c4b4..c1b37772 100644
--- a/puppet/compute-post.yaml
+++ b/puppet/compute-post.yaml
@@ -10,12 +10,12 @@ parameters:
type: boolean
servers:
type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
RoleData:
type: json
default: {}
+ DeployIdentifier:
+ type: string
+ description: Value which changes if the node configuration may need to be re-applied
resources:
@@ -28,7 +28,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: ComputeArtifactsConfig}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ComputePuppetConfig:
type: OS::Heat::SoftwareConfig
@@ -57,7 +57,7 @@ resources:
config: {get_resource: ComputePuppetConfig}
input_values:
step: 2
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ComputeOvercloudServicesDeployment_Step3:
type: OS::Heat::StructuredDeployments
@@ -68,7 +68,7 @@ resources:
config: {get_resource: ComputePuppetConfig}
input_values:
step: 3
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ComputeOvercloudServicesDeployment_Step4:
type: OS::Heat::StructuredDeployments
@@ -79,7 +79,7 @@ resources:
config: {get_resource: ComputePuppetConfig}
input_values:
step: 4
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index db398f17..c2013b56 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -806,12 +806,3 @@ outputs:
description: Heat resource handle for the Nova compute server
value:
{get_resource: NovaCompute}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value:
- list_join:
- - ','
- - - {get_attr: [NovaComputeDeployment, deploy_stdout]}
- - {get_attr: [NodeTLSCAData, deploy_stdout]}
- - {get_attr: [ComputeExtraConfigPre, deploy_stdout]}
- - {get_param: UpdateIdentifier}
diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml
index 27fbdec0..4af6cb46 100644
--- a/puppet/controller-post.yaml
+++ b/puppet/controller-post.yaml
@@ -10,12 +10,12 @@ parameters:
type: boolean
servers:
type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
RoleData:
type: json
default: {}
+ DeployIdentifier:
+ type: string
+ description: Value which changes if the node configuration may need to be re-applied
resources:
@@ -33,7 +33,7 @@ resources:
properties:
servers: {get_param: servers}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerPuppetConfig:
type: OS::TripleO::ControllerConfig
@@ -53,7 +53,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 1
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerServicesBaseDeployment_Step2:
type: OS::Heat::StructuredDeployments
@@ -64,7 +64,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 2
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerOvercloudServicesDeployment_Step3:
type: OS::Heat::StructuredDeployments
@@ -75,7 +75,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 3
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerOvercloudServicesDeployment_Step4:
type: OS::Heat::StructuredDeployments
@@ -86,7 +86,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 4
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerOvercloudServicesDeployment_Step5:
type: OS::Heat::StructuredDeployments
@@ -97,7 +97,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 5
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerPostPuppet:
type: OS::TripleO::Tasks::ControllerPostPuppet
@@ -105,7 +105,7 @@ resources:
properties:
servers: {get_param: servers}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index f6d63d18..f45cdf31 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -1135,16 +1135,6 @@ outputs:
template: "IP:11211"
params:
IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
- config_identifier:
- description: identifier which changes if the controller configuration may need re-applying
- value:
- list_join:
- - ','
- - - {get_attr: [ControllerDeployment, deploy_stdout]}
- - {get_attr: [NodeTLSCAData, deploy_stdout]}
- - {get_attr: [NodeTLSData, deploy_stdout]}
- - {get_attr: [ControllerExtraConfigPre, deploy_stdout]}
- - {get_param: UpdateIdentifier}
tls_key_modulus_md5:
description: MD5 checksum of the TLS Key Modulus
value: {get_attr: [NodeTLSData, key_modulus_md5]}
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
index 3e455347..aa5c3c43 100644
--- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -109,11 +109,3 @@ resources:
properties:
config: {get_resource: NetworkMidoNetConfig}
servers: {get_param: compute_servers}
-
-outputs:
- config_identifier:
- value:
- list_join:
- - ' '
- - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]}
- - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]}
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index 71445800..e924fc87 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -343,11 +343,3 @@ resources:
input_values:
ucsm_config: {get_param: NetworkUCSMHostList}
actions: ['CREATE'] # Only do this on CREATE
-
-outputs:
- # The Deployment applying the hieradata outputs the derived config-id, which
- # changes if the input_values change, so if the stdouts from
- # NetworkCiscoDeployment change, we need to reapply puppet (which will
- # happen if we return a different config_identifier)
- config_identifier:
- value: {get_attr: [NetworkCiscoDeployment, deploy_stdouts]}
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index f84f7049..bc5a4098 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -4,7 +4,7 @@ nova::api::enabled: true
nova::vncproxy::enabled: true
# gnocchi
-gnocchi::db::sync::extra_opts: '--skip-storage'
+gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types'
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 2
gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26'
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index af6b0960..152694d9 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -16,14 +16,6 @@
include ::tripleo::packages
include ::tripleo::firewall
-if hiera('step') >= 1 {
-
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
-}
-
if hiera('step') >= 4 {
hiera_include('ceph_classes')
}
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index b8e267fc..b25d62f8 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -16,10 +16,6 @@
include ::tripleo::packages
include ::tripleo::firewall
-create_resources(kmod::load, hiera('kernel_modules'), { })
-create_resources(sysctl::value, hiera('sysctl_settings'), { })
-Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
if hiera('step') >= 4 {
# When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 72c31a31..de84bcf8 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -18,14 +18,6 @@ include ::tripleo::firewall
$enable_load_balancer = hiera('enable_load_balancer', true)
-if hiera('step') >= 1 {
-
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
-}
-
if hiera('step') >= 2 {
# MongoDB
@@ -95,66 +87,6 @@ if hiera('step') >= 4 {
}
include ::nova::config
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- # TODO(devvesa) provide non-controller ips for these services
- $zookeeper_node_ips = hiera('neutron_api_node_ips')
- $cassandra_node_ips = hiera('neutron_api_node_ips')
-
- # Run zookeeper in the controller if configured
- if hiera('enable_zookeeper_on_controller') {
- class {'::tripleo::cluster::zookeeper':
- zookeeper_server_ips => $zookeeper_node_ips,
- # TODO: create a 'bind' hiera key for zookeeper
- zookeeper_client_ip => hiera('neutron::bind_host'),
- zookeeper_hostnames => hiera('controller_node_names')
- }
- }
-
- # Run cassandra in the controller if configured
- if hiera('enable_cassandra_on_controller') {
- class {'::tripleo::cluster::cassandra':
- cassandra_servers => $cassandra_node_ips,
- # TODO: create a 'bind' hiera key for cassandra
- cassandra_ip => hiera('neutron::bind_host'),
- }
- }
-
- class {'::tripleo::network::midonet::agent':
- zookeeper_servers => $zookeeper_node_ips,
- cassandra_seeds => $cassandra_node_ips
- }
-
- class {'::tripleo::network::midonet::api':
- zookeeper_servers => $zookeeper_node_ips,
- vip => hiera('public_virtual_ip'),
- keystone_ip => hiera('public_virtual_ip'),
- keystone_admin_token => hiera('keystone::admin_token'),
- # TODO: create a 'bind' hiera key for api
- bind_address => hiera('neutron::bind_host'),
- admin_password => hiera('admin_password')
- }
-
- # TODO: find a way to get an empty list from hiera
- # TODO: when doing the composable midonet plugin, don't forget to
- # set service_plugins to an empty array in Hiera.
- class {'::neutron':
- service_plugins => []
- }
-
- }
-
- # If the value of core plugin is set to 'midonet',
- # skip all the ML2 configuration
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- class {'::neutron::plugins::midonet':
- midonet_api_ip => hiera('public_virtual_ip'),
- keystone_tenant => hiera('neutron::server::auth_tenant'),
- keystone_password => hiera('neutron::server::password')
- }
- }
-
# Ceilometer
$ceilometer_backend = downcase(hiera('ceilometer_backend'))
case $ceilometer_backend {
@@ -200,7 +132,7 @@ if hiera('step') >= 4 {
} else {
$_profile_support = 'None'
}
- $neutron_options = {'profile_support' => $_profile_support }
+ $neutron_options = merge({'profile_support' => $_profile_support },hiera('horizon::neutron_options',undef))
$memcached_ipv6 = hiera('memcached_ipv6', false)
if $memcached_ipv6 {
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 7205002a..d7b1ce54 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -22,9 +22,7 @@ Pcmk_resource <| |> {
Service <|
tag == 'aodh-service' or
tag == 'ceilometer-service' or
- tag == 'gnocchi-service' or
- tag == 'neutron-service' or
- tag == 'nova-service'
+ tag == 'gnocchi-service'
|> {
hasrestart => true,
restart => '/bin/true',
@@ -53,10 +51,6 @@ $non_pcmk_start = hiera('step') >= 5
if hiera('step') >= 1 {
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
$pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
$corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
if $corosync_ipv6 {
@@ -192,48 +186,25 @@ if hiera('step') >= 2 {
require => Class['::mysql::server'],
before => Exec['galera-ready'],
}
- }
- $mysql_root_password = hiera('mysql::server::root_password')
- $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
- # This step is to create a sysconfig clustercheck file with the root user and empty password
- # on the first install only (because later on the clustercheck db user will be used)
- # We are using exec and not file in order to not have duplicate definition errors in puppet
- # when we later set the the file to contain the clustercheck data
- exec { 'create-root-sysconfig-clustercheck':
- command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
- unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
- }
- exec { 'galera-ready' :
- command => '/usr/bin/clustercheck >/dev/null',
- timeout => 30,
- tries => 180,
- try_sleep => 10,
- environment => ['AVAILABLE_WHEN_READONLY=0'],
- require => Exec['create-root-sysconfig-clustercheck'],
- }
+ exec { 'galera-ready' :
+ command => '/usr/bin/clustercheck >/dev/null',
+ timeout => 30,
+ tries => 180,
+ try_sleep => 10,
+ environment => ['AVAILABLE_WHEN_READONLY=0'],
+ require => Exec['create-root-sysconfig-clustercheck'],
+ }
- xinetd::service { 'galera-monitor' :
- port => '9200',
- server => '/usr/bin/clustercheck',
- per_source => 'UNLIMITED',
- log_on_success => '',
- log_on_failure => 'HOST',
- flags => 'REUSE',
- service_type => 'UNLISTED',
- user => 'root',
- group => 'root',
- require => Exec['create-root-sysconfig-clustercheck'],
- }
- # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
- # to it in a later step. We do this only on one node as it will replicate on
- # the other members. We also make sure that the permissions are the minimum necessary
- if $pacemaker_master {
+ # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
+ # to it in a later step. We do this only on one node as it will replicate on
+ # the other members. We also make sure that the permissions are the minimum necessary
mysql_user { 'clustercheck@localhost':
ensure => 'present',
- password_hash => mysql_password($mysql_clustercheck_password),
+ password_hash => mysql_password(hiera('mysql_clustercheck_password')),
require => Exec['galera-ready'],
}
+
mysql_grant { 'clustercheck@localhost/*.*':
ensure => 'present',
options => ['GRANT'],
@@ -241,10 +212,8 @@ if hiera('step') >= 2 {
table => '*.*',
user => 'clustercheck@localhost',
}
- }
- # Create all the database schemas
- if $sync_db {
+ # Create all the database schemas
if downcase(hiera('ceilometer_backend')) == 'mysql' {
class { '::ceilometer::db::mysql':
require => Exec['galera-ready'],
@@ -259,7 +228,28 @@ if hiera('step') >= 2 {
class { '::aodh::db::mysql':
require => Exec['galera-ready'],
- }
+ }
+ }
+ # This step is to create a sysconfig clustercheck file with the root user and empty password
+ # on the first install only (because later on the clustercheck db user will be used)
+ # We are using exec and not file in order to not have duplicate definition errors in puppet
+ # when we later set the the file to contain the clustercheck data
+ exec { 'create-root-sysconfig-clustercheck':
+ command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
+ unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
+ }
+
+ xinetd::service { 'galera-monitor' :
+ port => '9200',
+ server => '/usr/bin/clustercheck',
+ per_source => 'UNLIMITED',
+ log_on_success => '',
+ log_on_failure => 'HOST',
+ flags => 'REUSE',
+ service_type => 'UNLISTED',
+ user => 'root',
+ group => 'root',
+ require => Exec['create-root-sysconfig-clustercheck'],
}
} #END STEP 2
@@ -267,6 +257,7 @@ if hiera('step') >= 2 {
if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
# At this stage we are guaranteed that the clustercheck db user exists
# so we switch the resource agent to use it.
+ $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
file { '/etc/sysconfig/clustercheck' :
ensure => file,
mode => '0600',
@@ -290,63 +281,6 @@ MYSQL_HOST=localhost\n",
include ::nova::config
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- # TODO(devvesa) provide non-controller ips for these services
- $zookeeper_node_ips = hiera('neutron_api_node_ips')
- $cassandra_node_ips = hiera('neutron_api_node_ips')
-
- # Run zookeeper in the controller if configured
- if hiera('enable_zookeeper_on_controller') {
- class {'::tripleo::cluster::zookeeper':
- zookeeper_server_ips => $zookeeper_node_ips,
- # TODO: create a 'bind' hiera key for zookeeper
- zookeeper_client_ip => hiera('neutron::bind_host'),
- zookeeper_hostnames => split(hiera('controller_node_names'), ',')
- }
- }
-
- # Run cassandra in the controller if configured
- if hiera('enable_cassandra_on_controller') {
- class {'::tripleo::cluster::cassandra':
- cassandra_servers => $cassandra_node_ips,
- # TODO: create a 'bind' hiera key for cassandra
- cassandra_ip => hiera('neutron::bind_host'),
- }
- }
-
- class {'::tripleo::network::midonet::agent':
- zookeeper_servers => $zookeeper_node_ips,
- cassandra_seeds => $cassandra_node_ips
- }
-
- class {'::tripleo::network::midonet::api':
- zookeeper_servers => $zookeeper_node_ips,
- vip => hiera('public_virtual_ip'),
- keystone_ip => hiera('public_virtual_ip'),
- keystone_admin_token => hiera('keystone::admin_token'),
- # TODO: create a 'bind' hiera key for api
- bind_address => hiera('neutron::bind_host'),
- admin_password => hiera('admin_password')
- }
-
- # Configure Neutron
- # TODO: when doing the composable midonet plugin, don't forget to
- # set service_plugins to an empty array in Hiera.
- class {'::neutron':
- service_plugins => []
- }
-
- }
-
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- class {'::neutron::plugins::midonet':
- midonet_api_ip => hiera('public_virtual_ip'),
- keystone_tenant => hiera('neutron::server::auth_tenant'),
- keystone_password => hiera('neutron::server::password')
- }
- }
-
# Ceilometer
case downcase(hiera('ceilometer_backend')) {
/mysql/: {
@@ -398,7 +332,7 @@ MYSQL_HOST=localhost\n",
} else {
$_profile_support = 'None'
}
- $neutron_options = {'profile_support' => $_profile_support }
+ $neutron_options = merge({'profile_support' => $_profile_support },hiera('horizon::neutron_options',undef))
$memcached_ipv6 = hiera('memcached_ipv6', false)
if $memcached_ipv6 {
@@ -482,6 +416,7 @@ if hiera('step') >= 5 {
# password. On second runs or updates /root/.my.cnf will already be populated
# with proper credentials. This step happens on every node because this sql
# statement does not automatically replicate across nodes.
+ $mysql_root_password = hiera('mysql::server::root_password')
exec { 'galera-set-root-password':
command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
}
@@ -527,49 +462,6 @@ password=\"${mysql_root_password}\"",
Pacemaker::Resource::Ocf['openstack-core']],
}
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- pacemaker::resource::service {'tomcat':
- clone_params => 'interleave=true',
- }
- }
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
- pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::server_service}-clone",
- second_resource => "${::neutron::params::dhcp_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
- }
- pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::dhcp_agent_service}-clone",
- second_resource => "${::neutron::params::metadata_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
- }
- pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::metadata_agent_service}-clone",
- second_resource => 'tomcat-clone',
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
- Pacemaker::Resource::Service['tomcat']],
- }
- pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
- source => "${::neutron::params::metadata_agent_service}-clone",
- target => "${::neutron::params::dhcp_agent_service}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
- }
- }
-
# Nova
pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
constraint_type => 'order',
diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp
index 418c56e6..1f04c581 100644
--- a/puppet/manifests/overcloud_object.pp
+++ b/puppet/manifests/overcloud_object.pp
@@ -16,12 +16,6 @@
include ::tripleo::packages
include ::tripleo::firewall
-if hiera('step') >= 1 {
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-}
-
if hiera('step') >= 4 {
hiera_include('object_classes')
}
diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp
index 7fc27d60..7c7da586 100644
--- a/puppet/manifests/overcloud_volume.pp
+++ b/puppet/manifests/overcloud_volume.pp
@@ -16,47 +16,7 @@
include ::tripleo::packages
include ::tripleo::firewall
-if hiera('step') >= 1 {
-
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
-}
-
if hiera('step') >= 4 {
-
- include ::cinder
- include ::cinder::config
- include ::cinder::glance
- include ::cinder::volume
- include ::cinder::setup_test_volume
-
- $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
- if $cinder_enable_iscsi {
- $cinder_iscsi_backend = 'tripleo_iscsi'
-
- cinder::backend::iscsi { $cinder_iscsi_backend :
- iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
- iscsi_helper => hiera('cinder_iscsi_helper'),
- }
- }
-
- $cinder_enabled_backends = any2array($cinder_iscsi_backend)
- class { '::cinder::backends' :
- enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
- }
-
- $snmpd_user = hiera('snmpd_readonly_user_name')
- snmp::snmpv3_user { $snmpd_user:
- authtype => 'MD5',
- authpass => hiera('snmpd_readonly_user_password'),
- }
- class { '::snmp':
- agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
- }
-
hiera_include('volume_classes')
}
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
new file mode 100644
index 00000000..b429c5ea
--- /dev/null
+++ b/puppet/services/kernel.yaml
@@ -0,0 +1,18 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Load kernel modules with kmod and configure kernel options with sysctl.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the Kernel modules
+ value:
+ step_config: |
+ include ::tripleo::profile::base::kernel
diff --git a/puppet/services/neutron-midonet.yaml b/puppet/services/neutron-midonet.yaml
new file mode 100644
index 00000000..736c01c3
--- /dev/null
+++ b/puppet/services/neutron-midonet.yaml
@@ -0,0 +1,48 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Midonet plugin and services
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronPassword:
+ description: The password for the neutron service and db account, used by neutron agents.
+ type: string
+ hidden: true
+ AdminPassword:
+ description: The password for the keystone admin account, used for monitoring, querying neutron etc.
+ type: string
+ hidden: true
+ AdminToken:
+ description: The keystone auth secret and db password.
+ type: string
+ hidden: true
+ EnableZookeeperOnController:
+ label: Enable Zookeeper On Controller
+ description: 'Whether enable Zookeeper cluster on Controller'
+ type: boolean
+ default: false
+ EnableCassandraOnController:
+ label: Enable Cassandra On Controller
+ description: 'Whether enable Cassandra cluster on Controller'
+ type: boolean
+ default: false
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Midonet plugin and services
+ value:
+ config_settings:
+ tripleo::profile::base::neutron::midonet::admin_password: {get_param: AdminPassword}
+ tripleo::profile::base::neutron::midonet::keystone_admin_token: {get_param: AdminToken}
+ tripleo::profile::base::neutron::midonet::neutron_auth_password: {get_param: NeutronPassword}
+ tripleo::profile::base::neutron::midonet::zk_on_controller: {get_param: EnableZookeeperOnController}
+ tripleo::profile::base::neutron::midonet::neutron_auth_tenant: 'service'
+ enable_cassandra_on_controller: {get_param: EnableCassandraOnController}
+ neutron::service_plugins: []
+ step_config: |
+ include tripleo::profile::base::neutron::plugins::midonet
diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml
index 5833c42d..780c295e 100644
--- a/puppet/services/pacemaker/heat-api-cfn.yaml
+++ b/puppet/services/pacemaker/heat-api-cfn.yaml
@@ -25,7 +25,5 @@ outputs:
- get_attr: [HeatApiCfnBase, role_data, config_settings]
- heat::api_cfn::manage_service: false
heat::api_cfn::enabled: false
- step_config:
- # No puppet manifests since heat-api-cfn is included in
- # ::tripleo::profile::pacemaker::heat which is maintained alongside of
- # pacemaker/heat-api.yaml.
+ step_config: |
+ include ::tripleo::profile::pacemaker::heat::api_cfn
diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
index 8b67702c..2fa82fe7 100644
--- a/puppet/services/pacemaker/heat-api-cloudwatch.yaml
+++ b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
@@ -25,7 +25,5 @@ outputs:
- get_attr: [HeatApiCloudwatchBase, role_data, config_settings]
- heat::api_cloudwatch::manage_service: false
heat::api_cloudwatch::enabled: false
- step_config:
- # No puppet manifests since heat-api-cloudwatch is included in
- # ::tripleo::profile::pacemaker::heat which is maintained alongside of
- # pacemaker/heat-api.yaml.
+ step_config: |
+ include ::tripleo::profile::pacemaker::heat::api_cloudwatch
diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml
index 6628e8dd..be897a55 100644
--- a/puppet/services/pacemaker/heat-api.yaml
+++ b/puppet/services/pacemaker/heat-api.yaml
@@ -26,4 +26,4 @@ outputs:
- heat::api::manage_service: false
heat::api::enabled: false
step_config: |
- include ::tripleo::profile::pacemaker::heat
+ include ::tripleo::profile::pacemaker::heat::api
diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml
index e1195780..a8ed5c0c 100644
--- a/puppet/services/pacemaker/heat-engine.yaml
+++ b/puppet/services/pacemaker/heat-engine.yaml
@@ -26,7 +26,5 @@ outputs:
- get_attr: [HeatEngineBase, role_data, config_settings]
- heat::engine::manage_service: false
heat::engine::enabled: false
- step_config:
- # No puppet manifests since heat-engine is included in
- # ::tripleo::profile::pacemaker::heat which is maintained alongside of
- # pacemaker/heat-api.yaml.
+ step_config: |
+ include ::tripleo::profile::pacemaker::heat::engine
diff --git a/puppet/services/pacemaker/neutron-midonet.yaml b/puppet/services/pacemaker/neutron-midonet.yaml
new file mode 100644
index 00000000..f9fd992c
--- /dev/null
+++ b/puppet/services/pacemaker/neutron-midonet.yaml
@@ -0,0 +1,28 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Midonet with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ NeutronMidonetBase:
+ type: ../neutron-midonet.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Midonet plugin.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMidonetBase, role_data, config_settings]
+ step_config: |
+ include ::tripleo::profile::pacemaker::neutron::plugins::midonet
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index a86aeaf5..930b9e3d 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -17,6 +17,10 @@ parameters:
description: The password for the swift service account, used by the swift proxy services.
type: string
hidden: true
+ SwiftProxyNodeTimeout:
+ default: 60
+ description: Timeout for requests going from swift-proxy to swift a/c/o services.
+ type: number
SwiftWorkers:
default: 0
description: Number of workers for Swift service.
@@ -36,6 +40,7 @@ outputs:
swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
swift::proxy::authtoken::admin_password: {get_param: SwiftPassword}
+ swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]}
swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]}
diff --git a/puppet/services/time/ntp.yaml b/puppet/services/time/ntp.yaml
index dbef6f91..930dca41 100644
--- a/puppet/services/time/ntp.yaml
+++ b/puppet/services/time/ntp.yaml
@@ -12,7 +12,7 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- NtpServers:
+ NtpServer:
default: []
description: NTP servers
type: comma_delimited_list
@@ -22,6 +22,6 @@ outputs:
description: Role ntp using composable services.
value:
config_settings:
- ntp::ntpservers: {get_param: NtpServers}
+ ntp::ntpservers: {get_param: NtpServer}
step_config: |
- include ::ntp \ No newline at end of file
+ include ::ntp
diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml
index b873d923..1c36a047 100644
--- a/puppet/swift-storage-post.yaml
+++ b/puppet/swift-storage-post.yaml
@@ -8,12 +8,12 @@ parameters:
type: boolean
servers:
type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
RoleData:
type: json
default: {}
+ DeployIdentifier:
+ type: string
+ description: Value which changes if the node configuration may need to be re-applied
resources:
@@ -26,7 +26,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: StorageArtifactsConfig}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
StoragePuppetConfig:
type: OS::Heat::SoftwareConfig
@@ -56,7 +56,7 @@ resources:
config: {get_resource: StoragePuppetConfig}
input_values:
step: 2
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
StorageRingbuilderDeployment_Step3:
type: OS::Heat::StructuredDeployments
@@ -67,7 +67,7 @@ resources:
config: {get_resource: StoragePuppetConfig}
input_values:
step: 3
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
StorageDeployment_Step4:
type: OS::Heat::StructuredDeployments
@@ -78,7 +78,7 @@ resources:
config: {get_resource: StoragePuppetConfig}
input_values:
step: 4
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index accbbfce..74eda89f 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -420,11 +420,3 @@ outputs:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value:
- list_join:
- - ','
- - - {get_attr: [SwiftStorageHieraDeploy, deploy_stdout]}
- - {get_attr: [NodeTLSCAData, deploy_stdout]}
- - {get_param: UpdateIdentifier}