aboutsummaryrefslogtreecommitdiffstats
path: root/puppet
diff options
context:
space:
mode:
Diffstat (limited to 'puppet')
-rw-r--r--puppet/all-nodes-config.yaml36
-rw-r--r--puppet/bootstrap-config.yaml28
-rw-r--r--puppet/ceph-cluster-config.yaml100
-rw-r--r--puppet/ceph-storage-post.yaml19
-rw-r--r--puppet/ceph-storage.yaml31
-rw-r--r--puppet/cinder-storage-post.yaml50
-rw-r--r--puppet/cinder-storage.yaml123
-rw-r--r--puppet/compute-post.yaml21
-rw-r--r--puppet/compute.yaml227
-rw-r--r--puppet/controller-post.yaml27
-rw-r--r--puppet/controller.yaml182
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml8
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml8
-rw-r--r--puppet/extraconfig/ceph/ceph-external-config.yaml120
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml91
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml62
-rw-r--r--puppet/hieradata/ceph.yaml2
-rw-r--r--puppet/hieradata/compute.yaml2
-rw-r--r--puppet/hieradata/controller.yaml5
-rw-r--r--puppet/hieradata/database.yaml48
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp42
-rw-r--r--puppet/manifests/overcloud_compute.pp181
-rw-r--r--puppet/manifests/overcloud_controller.pp236
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp449
-rw-r--r--puppet/manifests/overcloud_object.pp38
-rw-r--r--puppet/manifests/overcloud_volume.pp45
-rw-r--r--puppet/services/ceilometer-agent-central.yaml43
-rw-r--r--puppet/services/ceilometer-agent-notification.yaml27
-rw-r--r--puppet/services/ceilometer-api.yaml27
-rw-r--r--puppet/services/ceilometer-base.yaml105
-rw-r--r--puppet/services/ceilometer-collector.yaml26
-rw-r--r--puppet/services/ceilometer-expirer.yaml27
-rw-r--r--puppet/services/ceph-base.yaml94
-rw-r--r--puppet/services/ceph-client.yaml24
-rw-r--r--puppet/services/ceph-external.yaml65
-rw-r--r--puppet/services/ceph-mon.yaml56
-rw-r--r--puppet/services/ceph-osd.yaml24
-rw-r--r--puppet/services/cinder-base.yaml6
-rw-r--r--puppet/services/cinder-volume.yaml8
-rw-r--r--puppet/services/glance-api.yaml8
-rw-r--r--puppet/services/glance-registry.yaml7
-rw-r--r--puppet/services/heat-engine.yaml6
-rw-r--r--puppet/services/horizon.yaml34
-rw-r--r--puppet/services/ironic-api.yaml42
-rw-r--r--puppet/services/ironic-base.yaml69
-rw-r--r--puppet/services/ironic-conductor.yaml35
-rw-r--r--puppet/services/kernel.yaml18
-rw-r--r--puppet/services/keystone.yaml7
-rw-r--r--puppet/services/neutron-compute-plugin-midonet.yaml19
-rw-r--r--puppet/services/neutron-compute-plugin-nuage.yaml26
-rw-r--r--puppet/services/neutron-compute-plugin-opencontrail.yaml19
-rw-r--r--puppet/services/neutron-compute-plugin-plumgrid.yaml19
-rw-r--r--puppet/services/neutron-dhcp.yaml24
-rw-r--r--puppet/services/neutron-midonet.yaml48
-rw-r--r--puppet/services/neutron-plugin-ml2.yaml2
-rw-r--r--puppet/services/neutron-plugin-nuage.yaml49
-rw-r--r--puppet/services/neutron-plugin-opencontrail.yaml60
-rw-r--r--puppet/services/neutron-server.yaml9
-rw-r--r--puppet/services/nova-compute.yaml12
-rw-r--r--puppet/services/nova-libvirt.yaml31
-rw-r--r--puppet/services/pacemaker/ceilometer-agent-central.yaml29
-rw-r--r--puppet/services/pacemaker/ceilometer-agent-notification.yaml29
-rw-r--r--puppet/services/pacemaker/ceilometer-api.yaml29
-rw-r--r--puppet/services/pacemaker/ceilometer-collector.yaml29
-rw-r--r--puppet/services/pacemaker/heat-api-cfn.yaml6
-rw-r--r--puppet/services/pacemaker/heat-api-cloudwatch.yaml6
-rw-r--r--puppet/services/pacemaker/heat-api.yaml2
-rw-r--r--puppet/services/pacemaker/heat-engine.yaml6
-rw-r--r--puppet/services/pacemaker/neutron-midonet.yaml28
-rw-r--r--puppet/services/pacemaker/neutron-plugin-ml2.yaml2
-rw-r--r--puppet/services/pacemaker/neutron-plugin-opencontrail.yaml28
-rw-r--r--puppet/services/rabbitmq.yaml2
-rw-r--r--puppet/services/sahara-engine.yaml6
-rw-r--r--puppet/services/services.yaml11
-rw-r--r--puppet/services/snmp.yaml31
-rw-r--r--puppet/services/swift-proxy.yaml5
-rw-r--r--puppet/services/swift-storage.yaml44
-rw-r--r--puppet/services/time/ntp.yaml6
-rw-r--r--puppet/services/time/timezone.yaml24
-rw-r--r--puppet/swift-storage-post.yaml21
-rw-r--r--puppet/swift-storage.yaml33
-rw-r--r--puppet/vip-config.yaml1
82 files changed, 1589 insertions, 2046 deletions
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index b065ddd2..55120912 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -56,7 +56,12 @@ parameters:
type: comma_delimited_list
sahara_api_node_ips:
type: comma_delimited_list
-
+ ironic_api_node_ips:
+ type: comma_delimited_list
+ ceph_mon_node_ips:
+ type: comma_delimited_list
+ ceph_mon_node_names:
+ type: comma_delimited_list
DeployIdentifier:
type: string
description: >
@@ -103,6 +108,10 @@ resources:
datafiles:
RedHat:
raw_data: {get_file: hieradata/RedHat.yaml}
+ bootstrap_node:
+ mapped_data:
+ bootstrap_nodeid: {get_input: bootstrap_nodeid}
+ bootstrap_nodeid_ip: {get_input: bootstrap_nodeid_ip}
all_nodes:
mapped_data:
controller_node_ips:
@@ -294,7 +303,31 @@ resources:
list_join:
- "','"
- {get_param: sahara_api_node_ips}
+ ironic_api_node_ips:
+ str_replace:
+ template: "['SERVERS_LIST']"
+ params:
+ SERVERS_LIST:
+ list_join:
+ - "','"
+ - {get_param: ironic_api_node_ips}
+ tripleo::profile::base::ceph::ceph_mon_initial_members:
+ list_join:
+ - ','
+ - {get_param: ceph_mon_node_names}
+ tripleo::profile::base::ceph::ceph_mon_host:
+ list_join:
+ - ','
+ - {get_param: ceph_mon_node_ips}
+ tripleo::profile::base::ceph::ceph_mon_host_v6:
+ str_replace:
+ template: "'[IPS_LIST]'"
+ params:
+ IPS_LIST:
+ list_join:
+ - '],['
+ - {get_param: ceph_mon_node_ips}
# NOTE(gfidente): interpolation with %{} in the
# hieradata file can't be used as it returns string
ceilometer::rabbit_hosts: *rabbit_nodes_array
@@ -306,6 +339,7 @@ resources:
nova::rabbit_hosts: *rabbit_nodes_array
keystone::rabbit_hosts: *rabbit_nodes_array
sahara::rabbit_hosts: *rabbit_nodes_array
+ ironic::rabbit_hosts: *rabbit_nodes_array
deploy_identifier: {get_param: DeployIdentifier}
update_identifier: {get_param: UpdateIdentifier}
diff --git a/puppet/bootstrap-config.yaml b/puppet/bootstrap-config.yaml
deleted file mode 100644
index d88eebdf..00000000
--- a/puppet/bootstrap-config.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Bootstrap Config Puppet'
-
-parameters:
- bootstrap_nodeid:
- type: string
- bootstrap_nodeid_ip:
- type: string
-
-resources:
-
- BootstrapNodeConfigImpl:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- hiera:
- datafiles:
- bootstrap_node:
- mapped_data:
- bootstrap_nodeid: {get_param: bootstrap_nodeid}
- bootstrap_nodeid_ip: {get_param: bootstrap_nodeid_ip}
-
-outputs:
- config_id:
- description: The ID of the BootstrapNodeConfigImpl resource.
- value:
- {get_resource: BootstrapNodeConfigImpl}
diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml
index 6beb751f..2e329989 100644
--- a/puppet/ceph-cluster-config.yaml
+++ b/puppet/ceph-cluster-config.yaml
@@ -2,52 +2,15 @@ heat_template_version: 2015-04-30
description: 'Ceph Cluster config data for Puppet'
parameters:
- ceph_storage_count:
- default: 0
- type: number
- description: Number of Ceph storage nodes. Used to enable/disable managed Ceph installation.
- ceph_external_mon_ips:
- default: ''
- type: string
- description: List of external Ceph Mon host IPs.
- ceph_client_key:
- default: ''
- type: string
- description: Ceph key used to create the client user keyring.
- ceph_fsid:
- default: ''
- type: string
- ceph_admin_key:
- default: ''
- type: string
- ceph_mon_key:
- default: ''
- type: string
- ceph_mon_names:
- type: comma_delimited_list
- ceph_mon_ips:
- type: comma_delimited_list
NovaRbdPoolName:
default: vms
type: string
- CinderRbdPoolName:
- default: volumes
- type: string
- CinderBackupRbdPoolName:
- default: backups
- type: string
- GlanceRbdPoolName:
- default: images
- type: string
GnocchiRbdPoolName:
default: metrics
type: string
CephClientUserName:
default: openstack
type: string
- CephIPv6:
- default: False
- type: boolean
resources:
CephClusterConfigImpl:
@@ -59,66 +22,10 @@ resources:
datafiles:
ceph_cluster:
mapped_data:
- ceph_ipv6: {get_param: CephIPv6}
- ceph_storage_count: {get_param: ceph_storage_count}
- ceph_mon_initial_members:
- list_join:
- - ','
- - {get_param: ceph_mon_names}
- ceph_mon_host:
- list_join:
- - ','
- - {get_param: ceph_mon_ips}
- ceph_mon_host_v6:
- str_replace:
- template: "'[IPS_LIST]'"
- params:
- IPS_LIST:
- list_join:
- - '],['
- - {get_param: ceph_mon_ips}
- ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
- ceph::profile::params::fsid: {get_param: ceph_fsid}
- ceph::profile::params::mon_key: {get_param: ceph_mon_key}
- # We should use a separated key for the non-admin clients
- ceph::profile::params::client_keys:
- str_replace:
- template: "{
- client.admin: {
- secret: 'ADMIN_KEY',
- mode: '0600',
- cap_mon: 'allow *',
- cap_osd: 'allow *',
- cap_mds: 'allow *'
- },
- client.bootstrap-osd: {
- secret: 'ADMIN_KEY',
- keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring',
- cap_mon: 'allow profile bootstrap-osd'
- },
- client.CLIENT_USER: {
- secret: 'CLIENT_KEY',
- mode: '0644',
- cap_mon: 'allow r',
- cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
- }
- }"
- params:
- CLIENT_USER: {get_param: CephClientUserName}
- CLIENT_KEY: {get_param: ceph_client_key}
- ADMIN_KEY: {get_param: ceph_admin_key}
- NOVA_POOL: {get_param: NovaRbdPoolName}
- CINDER_POOL: {get_param: CinderRbdPoolName}
- CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
- GLANCE_POOL: {get_param: GlanceRbdPoolName}
- GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
- tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
- glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName}
gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName}
nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
- glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
nova::compute::rbd::rbd_keyring:
list_join:
- '.'
@@ -131,13 +38,6 @@ resources:
- 'client'
- {get_param: CephClientUserName}
- 'keyring'
- tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: {get_param: CephClientUserName}
- ceph_pools:
- - {get_param: CinderRbdPoolName}
- - {get_param: CinderBackupRbdPoolName}
- - {get_param: NovaRbdPoolName}
- - {get_param: GlanceRbdPoolName}
- - {get_param: GnocchiRbdPoolName}
outputs:
config_id:
diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml
index 2b9ae751..70baeb6e 100644
--- a/puppet/ceph-storage-post.yaml
+++ b/puppet/ceph-storage-post.yaml
@@ -10,13 +10,12 @@ parameters:
type: boolean
servers:
type: json
- NodeConfigIdentifiers:
- type: json
+ RoleData:
+ type: json
+ default: {}
+ DeployIdentifier:
+ type: string
description: Value which changes if the node configuration may need to be re-applied
- StepConfig:
- type: string
- description: Config manifests that will be used to step through the deployment.
- default: ''
resources:
@@ -29,7 +28,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: CephStorageArtifactsConfig}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
CephStoragePuppetConfig:
type: OS::Heat::SoftwareConfig
@@ -47,7 +46,7 @@ resources:
list_join:
- ''
- - get_file: manifests/overcloud_cephstorage.pp
- - {get_param: StepConfig}
+ - {get_param: [RoleData, step_config]}
CephStorageDeployment_Step2:
type: OS::Heat::StructuredDeployments
@@ -58,7 +57,7 @@ resources:
config: {get_resource: CephStoragePuppetConfig}
input_values:
step: 2
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
CephStorageDeployment_Step3:
type: OS::Heat::StructuredDeployments
@@ -69,7 +68,7 @@ resources:
config: {get_resource: CephStoragePuppetConfig}
input_values:
step: 3
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index 7a71ea80..b2948144 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -30,10 +30,6 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
- TimeZone:
- default: 'UTC'
- description: The timezone to be set on Ceph nodes.
- type: string
UpdateIdentifier:
default: ''
type: string
@@ -95,10 +91,17 @@ parameters:
ServiceConfigSettings:
type: json
default: {}
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
resources:
CephStorage:
- type: OS::Nova::Server
+ type: OS::TripleO::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
properties:
image: {get_param: Image}
image_update_policy: {get_param: ImageUpdatePolicy}
@@ -228,7 +231,6 @@ resources:
config: {get_resource: CephStorageConfig}
server: {get_resource: CephStorage}
input_values:
- timezone: {get_param: TimeZone}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]}
@@ -248,6 +250,7 @@ resources:
- service_configs
- ceph_cluster # provided by CephClusterConfig
- ceph
+ - all_nodes # provided by allNodesConfig
- '"%{::osfamily}"'
- common
- network
@@ -269,7 +272,6 @@ resources:
ceph:
raw_data: {get_file: hieradata/ceph.yaml}
mapped_data:
- timezone::timezone: {get_input: timezone}
tripleo::packages::enable_install: {get_input: enable_package_install}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
ceph::profile::params::cluster_network: {get_input: ceph_cluster_network}
@@ -310,6 +312,12 @@ resources:
get_param: UpdateIdentifier
outputs:
+ ip_address:
+ description: IP address of the server in the ctlplane network
+ value: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ hostname:
+ description: Hostname of the server
+ value: {get_attr: [CephStorage, name]}
hosts_entry:
value:
str_replace:
@@ -383,12 +391,3 @@ outputs:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value:
- list_join:
- - ','
- - - {get_attr: [CephStorageDeployment, deploy_stdout]}
- - {get_attr: [NodeTLSCAData, deploy_stdout]}
- - {get_attr: [CephStorageExtraConfigPre, deploy_stdout]}
- - {get_param: UpdateIdentifier}
diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml
index f470203f..c3dd403e 100644
--- a/puppet/cinder-storage-post.yaml
+++ b/puppet/cinder-storage-post.yaml
@@ -8,9 +8,12 @@ parameters:
type: boolean
servers:
type: json
- NodeConfigIdentifiers:
- type: json
+ DeployIdentifier:
+ type: string
description: Value which changes if the node configuration may need to be re-applied
+ RoleData:
+ type: json
+ default: {}
resources:
@@ -23,7 +26,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: VolumeArtifactsConfig}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
VolumePuppetConfig:
type: OS::Heat::SoftwareConfig
@@ -32,22 +35,55 @@ resources:
group: puppet
options:
enable_debug: {get_param: ConfigDebug}
+ enable_hiera: True
+ enable_facter: False
+ inputs:
+ - name: step
outputs:
- name: result
config:
- get_file: manifests/overcloud_volume.pp
+ list_join:
+ - ''
+ - - get_file: manifests/overcloud_volume.pp
+ - {get_param: [RoleData, step_config]}
+
+ VolumeDeployment_Step2:
+ type: OS::Heat::StructuredDeployments
+ depends_on: VolumeArtifactsDeploy
+ properties:
+ name: VolumeDeployment_Step2
+ servers: {get_param: servers}
+ config: {get_resource: VolumePuppetConfig}
+ input_values:
+ step: 2
+ update_identifier: {get_param: DeployIdentifier}
- VolumeDeployment_Step1:
+ VolumeDeployment_Step3:
type: OS::Heat::StructuredDeployments
+ depends_on: VolumeDeployment_Step2
properties:
- name: VolumeDeployment_Step1
+ name: VolumeDeployment_Step3
servers: {get_param: servers}
config: {get_resource: VolumePuppetConfig}
+ input_values:
+ step: 3
+ update_identifier: {get_param: DeployIdentifier}
+
+ VolumeDeployment_Step4:
+ type: OS::Heat::StructuredDeployments
+ depends_on: VolumeDeployment_Step3
+ properties:
+ name: VolumeDeployment_Step4
+ servers: {get_param: servers}
+ config: {get_resource: VolumePuppetConfig}
+ input_values:
+ step: 4
+ update_identifier: {get_param: DeployIdentifier}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
ExtraConfig:
- depends_on: VolumeDeployment_Step1
+ depends_on: VolumeDeployment_Step4
type: OS::TripleO::NodeExtraConfigPost
properties:
servers: {get_param: servers}
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index d760de5e..05918026 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -4,29 +4,6 @@ parameters:
Image:
default: overcloud-cinder-volume
type: string
- CinderEnableIscsiBackend:
- default: true
- description: Whether to enable or not the Iscsi backend for Cinder
- type: boolean
- CinderISCSIHelper:
- default: lioadm
- description: The iSCSI helper to use with cinder.
- type: string
- CinderLVMLoopDeviceSize:
- default: 10280
- description: The size of the loopback file used by the cinder LVM driver.
- type: number
- CinderPassword:
- description: The password for the cinder service and db account, used by cinder-api.
- type: string
- hidden: true
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- VirtualIP: # deprecated. Use per service VIPs instead.
- default: ''
- type: string
ExtraConfig:
default: {}
description: |
@@ -50,22 +27,6 @@ parameters:
default: default
description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
- RabbitPassword:
- type: string
- hidden: true
- RabbitUserName:
- default: 'guest'
- type: string
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
SnmpdReadonlyUserName:
default: ro_snmp_user
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
@@ -74,10 +35,6 @@ parameters:
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
- NtpServer:
- default: ''
- description: Comma-separated list of ntp servers
- type: comma_delimited_list
EnablePackageInstall:
default: 'false'
description: Set to true to enable package installation via Puppet
@@ -100,18 +57,6 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- TimeZone:
- default: 'UTC'
- description: The timezone to be set on Cinder nodes.
- type: string
- GlanceApiVirtualIP:
- type: string
- default: ''
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -143,11 +88,20 @@ parameters:
NodeIndex:
type: number
default: 0
-
+ ServiceConfigSettings:
+ type: json
+ default: {}
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
resources:
BlockStorage:
- type: OS::Nova::Server
+ type: OS::TripleO::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
properties:
image:
{get_param: Image}
@@ -277,37 +231,13 @@ resources:
server: {get_resource: BlockStorage}
config: {get_resource: BlockStorageConfig}
input_values:
- debug: {get_param: Debug}
- cinder_dsn:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://cinder:'
- - {get_param: CinderPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/cinder'
snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
- cinder_lvm_loop_device_size:
- str_replace:
- template: sizeM
- params:
- size: {get_param: CinderLVMLoopDeviceSize}
- cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
- cinder_iscsi_helper: {get_param: CinderISCSIHelper}
cinder_iscsi_ip_address:
str_replace:
template: "'IP'"
params:
IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]}
- glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
- rabbit_username: {get_param: RabbitUserName}
- rabbit_password: {get_param: RabbitPassword}
- rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
- rabbit_client_port: {get_param: RabbitClientPort}
- ntp_servers: {get_param: NtpServer}
- timezone: {get_param: TimeZone}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
@@ -323,6 +253,7 @@ resources:
- heat_config_%{::deploy_config_name}
- volume_extraconfig
- extraconfig
+ - service_configs
- volume
- all_nodes # provided by allNodesConfig
- '"%{::osfamily}"'
@@ -330,6 +261,8 @@ resources:
- network
merge_behavior: deeper
datafiles:
+ service_configs:
+ mapped_data: {get_param: ServiceConfigSettings}
common:
raw_data: {get_file: hieradata/common.yaml}
network:
@@ -345,19 +278,7 @@ resources:
raw_data: {get_file: hieradata/volume.yaml}
mapped_data:
# Cinder
- cinder::debug: {get_input: debug}
- cinder::setup_test_volume::size: {get_input: cinder_lvm_loop_device_size}
- cinder_iscsi_helper: {get_input: cinder_iscsi_helper}
- cinder::database_connection: {get_input: cinder_dsn}
- cinder::rabbit_userid: {get_input: rabbit_username}
- cinder::rabbit_password: {get_input: rabbit_password}
- cinder::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
- cinder::rabbit_port: {get_input: rabbit_client_port}
- cinder_enable_iscsi_backend: {get_input: cinder_enable_iscsi_backend}
- cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address}
- cinder::glance::glance_api_servers: {get_input: glance_api_servers}
- ntp::servers: {get_input: ntp_servers}
- timezone::timezone: {get_input: timezone}
+ tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address}
tripleo::packages::enable_install: {get_input: enable_package_install}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
@@ -392,6 +313,12 @@ resources:
get_param: UpdateIdentifier
outputs:
+ ip_address:
+ description: IP address of the server in the ctlplane network
+ value: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ hostname:
+ description: Hostname of the server
+ value: {get_attr: [BlockStorage, name]}
hosts_entry:
value:
str_replace:
@@ -465,11 +392,3 @@ outputs:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value:
- list_join:
- - ''
- - - {get_attr: [BlockStorageDeployment, deploy_stdout]}
- - {get_attr: [NodeTLSCAData, deploy_stdout]}
- - {get_param: UpdateIdentifier}
diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml
index 698cadba..c1b37772 100644
--- a/puppet/compute-post.yaml
+++ b/puppet/compute-post.yaml
@@ -10,13 +10,12 @@ parameters:
type: boolean
servers:
type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
- StepConfig:
+ RoleData:
+ type: json
+ default: {}
+ DeployIdentifier:
type: string
- description: Config manifests that will be used to step through the deployment.
- default: ''
+ description: Value which changes if the node configuration may need to be re-applied
resources:
@@ -29,7 +28,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: ComputeArtifactsConfig}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ComputePuppetConfig:
type: OS::Heat::SoftwareConfig
@@ -47,7 +46,7 @@ resources:
list_join:
- ''
- - get_file: manifests/overcloud_compute.pp
- - {get_param: StepConfig}
+ - {get_param: [RoleData, step_config]}
ComputeServicesBaseDeployment_Step2:
type: OS::Heat::StructuredDeployments
@@ -58,7 +57,7 @@ resources:
config: {get_resource: ComputePuppetConfig}
input_values:
step: 2
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ComputeOvercloudServicesDeployment_Step3:
type: OS::Heat::StructuredDeployments
@@ -69,7 +68,7 @@ resources:
config: {get_resource: ComputePuppetConfig}
input_values:
step: 3
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ComputeOvercloudServicesDeployment_Step4:
type: OS::Heat::StructuredDeployments
@@ -80,7 +79,7 @@ resources:
config: {get_resource: ComputePuppetConfig}
input_values:
step: 4
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index 7a863252..d7cf7787 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -69,43 +69,6 @@ parameters:
KeystonePublicApiVirtualIP:
type: string
default: ''
- NeutronBridgeMappings:
- description: >
- The OVS logical->physical bridge mappings to use. See the Neutron
- documentation for details. Defaults to mapping br-ex - the external
- bridge on hosts - to a physical name 'datacentre' which can be used
- to create provider networks (and we use this for the default floating
- network) - if changing this either use different post-install network
- scripts or be sure to keep 'datacentre' as a mapping network name.
- type: comma_delimited_list
- default: "datacentre:br-ex"
- NeutronEnableTunnelling:
- type: string
- default: "True"
- NeutronEnableL2Pop:
- type: string
- description: >
- Enable/disable the L2 population feature in the Neutron agents.
- default: "False"
- NeutronFlatNetworks:
- type: comma_delimited_list
- default: 'datacentre'
- description: >
- If set, flat networks to configure in neutron plugins.
- NeutronHost:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- NeutronNetworkType:
- type: comma_delimited_list
- description: The tenant network type for Neutron.
- default: 'vxlan'
- NeutronNetworkVLANRanges:
- default: 'datacentre:1:1000'
- description: >
- The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
- Neutron documentation for permitted values. Defaults to permitting any
- VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
- type: comma_delimited_list
NeutronPassword:
description: The password for the neutron service account, used by neutron agents.
type: string
@@ -118,73 +81,6 @@ parameters:
default: nic1
description: A port to add to the NeutronPhysicalBridge.
type: string
- NeutronTenantMtu:
- description: >
- The default MTU for tenant networks. For VXLAN/GRE tunneling, this should
- be at least 50 bytes smaller than the MTU on the physical network. This
- value will be used to set the MTU on the virtual Ethernet device.
- This number is related to the value of NeutronDnsmasqOptions, since that
- will determine the MTU that is assigned to the VM host through DHCP.
- default: 1400
- type: number
- NeutronTunnelTypes:
- type: comma_delimited_list
- description: |
- The tunnel types for the Neutron tenant network.
- default: 'vxlan'
- NeutronTunnelIdRanges:
- description: |
- Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
- of GRE tunnel IDs that are available for tenant network allocation
- default: ["1:4094", ]
- type: comma_delimited_list
- NeutronVniRanges:
- description: |
- Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges
- of VXLAN VNI IDs that are available for tenant network allocation
- default: ["1:4094", ]
- type: comma_delimited_list
- NeutronMetadataProxySharedSecret:
- description: Shared secret to prevent spoofing
- type: string
- hidden: true
- NeutronCorePlugin:
- default: 'ml2'
- description: |
- The core plugin for Neutron. The value should be the entrypoint to be loaded
- from neutron.core_plugins namespace.
- type: string
- NeutronServicePlugins:
- default: "router,qos"
- description: |
- Comma-separated list of service plugin entrypoints to be loaded from the
- neutron.service_plugins namespace.
- type: comma_delimited_list
- NeutronTypeDrivers:
- default: "vxlan,vlan,flat,gre"
- description: |
- Comma-separated list of network type driver entrypoints to be loaded.
- type: comma_delimited_list
- NeutronMechanismDrivers:
- default: 'openvswitch'
- description: |
- The mechanism drivers for the Neutron tenant network.
- type: comma_delimited_list
- NeutronAgentExtensions:
- default: "qos"
- description: |
- Comma-separated list of extensions enabled for the Neutron agents.
- type: comma_delimited_list
- # Not relevant for Computes, should be removed
- NeutronAllowL3AgentFailover:
- default: 'True'
- description: Allow automatic l3-agent failover
- type: string
- # Not relevant for Computes, should be removed
- NeutronL3HA:
- default: 'False'
- description: Whether to enable l3-agent HA
- type: string
NodeIndex:
type: number
default: 0
@@ -254,14 +150,6 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
UpgradeLevelNovaCompute:
type: string
description: Nova Compute upgrade level
@@ -280,10 +168,6 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- TimeZone:
- default: 'UTC'
- description: The timezone to be set on compute nodes.
- type: string
UpdateIdentifier:
default: ''
type: string
@@ -328,11 +212,18 @@ parameters:
ServiceConfigSettings:
type: json
default: {}
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
resources:
NovaCompute:
- type: OS::Nova::Server
+ type: OS::TripleO::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
properties:
image:
{get_param: Image}
@@ -512,6 +403,10 @@ resources:
nova::rabbit_port: {get_input: rabbit_client_port}
nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute}
nova_compute_driver: {get_input: nova_compute_driver}
+ # TODO(emilien): move libvirt & migration parameters in libvirt profile
+ # used to deploy libvirt/kvm dependencies:
+ nova::compute::libvirt::services::libvirt_virt_type: {get_input: nova_compute_libvirt_type}
+ # used to configured nova.conf:
nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type}
nova::compute::neutron::libvirt_vif_driver: {get_input: nova_compute_libvirt_vif_driver}
nova_api_host: {get_input: nova_api_host}
@@ -525,7 +420,6 @@ resources:
nova::migration::live_migration_tunnelled: {get_input: nova_enable_rbd_backend}
rbd_persistent_storage: {get_input: cinder_enable_rbd_backend}
nova_password: {get_input: nova_password}
- nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu}
nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address}
nova::vncproxy::common::vncproxy_protocol: {get_input: nova_vncproxy_protocol}
nova::vncproxy::common::vncproxy_host: {get_input: nova_vncproxy_host}
@@ -541,41 +435,19 @@ resources:
ceilometer::agent::auth::auth_password: {get_input: ceilometer_password}
ceilometer::agent::auth::auth_url: {get_input: ceilometer_agent_auth_url}
ceilometer_compute_agent: {get_input: ceilometer_compute_agent}
- snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
- snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
nova::glance_api_servers: {get_input: glance_api_servers}
neutron::debug: {get_input: debug}
neutron::rabbit_password: {get_input: rabbit_password}
neutron::rabbit_user: {get_input: rabbit_username}
neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
neutron::rabbit_port: {get_input: rabbit_client_port}
- neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks}
- neutron_host: {get_input: neutron_host}
neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
- neutron::network_device_mtu: {get_input: neutron_tenant_mtu}
- neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types}
- neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types}
- neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions}
- neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
- neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
- neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges}
- neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings}
- neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling}
- neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
- neutron_physical_bridge: {get_input: neutron_physical_bridge}
- neutron_public_interface: {get_input: neutron_public_interface}
nova::network::neutron::neutron_password: {get_input: neutron_password}
nova::network::neutron::neutron_url: {get_input: neutron_internal_url}
nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url}
- neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
- neutron::core_plugin: {get_input: neutron_core_plugin}
- neutron::service_plugins: {get_input: neutron_service_plugins}
- neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers}
- neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers}
keystone_public_api_virtual_ip: {get_input: keystone_vip}
admin_password: {get_input: admin_password}
- timezone::timezone: {get_input: timezone}
tripleo::packages::enable_install: {get_input: enable_package_install}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
@@ -616,74 +488,9 @@ resources:
ceilometer_password: {get_param: CeilometerPassword}
ceilometer_compute_agent: {get_param: CeilometerComputeAgent}
ceilometer_agent_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
- neutron_flat_networks:
- str_replace:
- template: NETWORKS
- params:
- NETWORKS: {get_param: NeutronFlatNetworks}
- neutron_host: {get_param: NeutronHost}
neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]}
- neutron_tunnel_id_ranges:
- str_replace:
- template: RANGES
- params:
- RANGES: {get_param: NeutronTunnelIdRanges}
- neutron_vni_ranges:
- str_replace:
- template: RANGES
- params:
- RANGES: {get_param: NeutronVniRanges}
- neutron_tenant_network_types:
- str_replace:
- template: TYPES
- params:
- TYPES: {get_param: NeutronNetworkType}
- neutron_tunnel_types:
- str_replace:
- template: TYPES
- params:
- TYPES: {get_param: NeutronTunnelTypes}
- neutron_network_vlan_ranges:
- str_replace:
- template: RANGES
- params:
- RANGES: {get_param: NeutronNetworkVLANRanges}
- neutron_bridge_mappings:
- str_replace:
- template: MAPPINGS
- params:
- MAPPINGS: {get_param: NeutronBridgeMappings}
- neutron_tenant_mtu: {get_param: NeutronTenantMtu}
- neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
- neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
- neutron_physical_bridge: {get_param: NeutronPhysicalBridge}
- neutron_public_interface: {get_param: NeutronPublicInterface}
neutron_password: {get_param: NeutronPassword}
- neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
- neutron_core_plugin: {get_param: NeutronCorePlugin}
- neutron_service_plugins:
- str_replace:
- template: PLUGINS
- params:
- PLUGINS: {get_param: NeutronServicePlugins}
- neutron_type_drivers:
- str_replace:
- template: DRIVERS
- params:
- DRIVERS: {get_param: NeutronTypeDrivers}
- neutron_mechanism_drivers:
- str_replace:
- template: MECHANISMS
- params:
- MECHANISMS: {get_param: NeutronMechanismDrivers}
- neutron_agent_extensions:
- str_replace:
- template: AGENT_EXTENSIONS
- params:
- AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions}
neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]}
neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]}
keystone_vip: {get_param: KeystonePublicApiVirtualIP}
@@ -692,7 +499,6 @@ resources:
rabbit_password: {get_param: RabbitPassword}
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
rabbit_client_port: {get_param: RabbitClientPort}
- timezone: {get_param: TimeZone}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
@@ -813,12 +619,3 @@ outputs:
description: Heat resource handle for the Nova compute server
value:
{get_resource: NovaCompute}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value:
- list_join:
- - ','
- - - {get_attr: [NovaComputeDeployment, deploy_stdout]}
- - {get_attr: [NodeTLSCAData, deploy_stdout]}
- - {get_attr: [ComputeExtraConfigPre, deploy_stdout]}
- - {get_param: UpdateIdentifier}
diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml
index 36f9b4f8..4af6cb46 100644
--- a/puppet/controller-post.yaml
+++ b/puppet/controller-post.yaml
@@ -10,13 +10,12 @@ parameters:
type: boolean
servers:
type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
- StepConfig:
+ RoleData:
+ type: json
+ default: {}
+ DeployIdentifier:
type: string
- description: Config manifests that will be used to step through the deployment.
- default: ''
+ description: Value which changes if the node configuration may need to be re-applied
resources:
@@ -34,12 +33,12 @@ resources:
properties:
servers: {get_param: servers}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerPuppetConfig:
type: OS::TripleO::ControllerConfig
properties:
- StepConfig: {get_param: StepConfig}
+ StepConfig: {get_param: [RoleData, step_config]}
# Step through a series of Puppet runs using the same manifest.
# NOTE: To enable stepping through the deployments via heat hooks,
@@ -54,7 +53,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 1
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerServicesBaseDeployment_Step2:
type: OS::Heat::StructuredDeployments
@@ -65,7 +64,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 2
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerOvercloudServicesDeployment_Step3:
type: OS::Heat::StructuredDeployments
@@ -76,7 +75,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 3
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerOvercloudServicesDeployment_Step4:
type: OS::Heat::StructuredDeployments
@@ -87,7 +86,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 4
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerOvercloudServicesDeployment_Step5:
type: OS::Heat::StructuredDeployments
@@ -98,7 +97,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 5
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
ControllerPostPuppet:
type: OS::TripleO::Tasks::ControllerPostPuppet
@@ -106,7 +105,7 @@ resources:
properties:
servers: {get_param: servers}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index 22b773c1..dfa63a90 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -8,47 +8,10 @@ parameters:
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
- AodhApiVirtualIP:
- type: string
- default: ''
AodhPassword:
description: The password for the aodh services.
type: string
hidden: true
- #TODO(composable Redis): Remove the Redis password param
- #As is used by ceilometer
- CeilometerApiVirtualIP:
- type: string
- default: ''
- CeilometerBackend:
- default: 'mongodb'
- description: The ceilometer backend type.
- type: string
- CeilometerMeteringSecret:
- description: Secret shared by the ceilometer services.
- type: string
- hidden: true
- CeilometerPassword:
- description: The password for the ceilometer service and db account.
- type: string
- hidden: true
- CeilometerStoreEvents:
- default: false
- description: Whether to store events in ceilometer.
- type: boolean
- CeilometerMeterDispatcher:
- default: 'database'
- description: Dispatcher to process meter data
- type: string
- constraints:
- - allowed_values: ['gnocchi', 'database']
- CinderApiVirtualIP:
- type: string
- default: ''
- CeilometerWorkers:
- default: 0
- description: Number of workers for Ceilometer service.
- type: number
controllerExtraConfig:
default: {}
description: |
@@ -92,14 +55,6 @@ parameters:
default: true
description: Whether to deploy a LoadBalancer on the Controller
type: boolean
- EnableCephStorage:
- default: false
- description: Whether to deploy Ceph Storage (OSD) on the Controller
- type: boolean
- EnableSwiftStorage:
- default: true
- description: Whether to enable Swift Storage on the Controller
- type: boolean
ExtraConfig:
default: {}
description: |
@@ -154,9 +109,6 @@ parameters:
default: 'mysql'
description: The short name of the Gnocchi indexer backend to use.
type: string
- GnocchiApiVirtualIP:
- type: string
- default: ''
GnocchiPassword:
description: The password for the gnocchi service and db account.
type: string
@@ -176,10 +128,6 @@ parameters:
description: Auth encryption key for heat-engine
type: string
hidden: true
- HorizonAllowedHosts:
- default: '*'
- description: A list of IP/Hostname allowed to connect to horizon
- type: comma_delimited_list
HorizonSecret:
description: Secret key for Django
type: string
@@ -219,9 +167,6 @@ parameters:
default: false
description: Whether IPtables rules should be purged before setting up the new ones.
type: boolean
- SaharaApiVirtualIP:
- type: string
- default: ''
MysqlClusterUniquePart:
description: A unique identifier of the MySQL cluster the controller is in.
type: string
@@ -259,18 +204,6 @@ parameters:
default: nic1
description: What interface to bridge onto br-ex for network nodes.
type: string
- NeutronTenantMtu:
- description: >
- The default MTU for tenant networks. For VXLAN/GRE tunneling, this should
- be at least 50 bytes smaller than the MTU on the physical network. This
- value will be used to set the MTU on the virtual Ethernet device.
- This number is related to the value of NeutronDnsmasqOptions, since that
- will determine the MTU that is assigned to the VM host through DHCP.
- default: 1400
- type: number
- NovaApiVirtualIP:
- type: string
- default: ''
NovaEnableDBPurge:
default: true
description: |
@@ -294,9 +227,6 @@ parameters:
Specifies the interface where the public-facing virtual ip will be assigned.
This should be int_public when a VLAN is being used.
type: string
- PublicVirtualIP:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
RabbitCookie:
type: string
default: '' # Has to be here because of the ignored empty value bug
@@ -330,23 +260,11 @@ parameters:
type: string
default: '' # Has to be here because of the ignored empty value bug
description: An IP address which is wrapped in brackets in case of IPv6
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
SwiftHashSuffix:
description: A random string to be used as a salt when hashing to determine mappings
in the ring.
hidden: true
type: string
- SwiftMountCheck:
- default: 'false'
- description: Value of mount_check in Swift account/container/object -server.conf
- type: boolean
SwiftMinPartHours:
type: number
default: 1
@@ -359,36 +277,17 @@ parameters:
default: true
description: Whether to manage Swift rings or not
type: boolean
- SwiftProxyVirtualIP:
- type: string
- default: ''
SwiftReplicas:
type: number
default: 3
description: How many replicas to use in the swift rings.
- TimeZone:
- default: 'UTC'
- description: The timezone to be set on controller nodes.
- type: string
UpgradeLevelNovaCompute:
type: string
description: Nova Compute upgrade level
default: ''
- VirtualIP: # DEPRECATED: use per service settings instead
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- HeatApiVirtualIP:
- type: string
- default: ''
- HeatApiVirtualIPUri:
- type: string
- default: ''
MysqlVirtualIP:
type: string
default: ''
- NeutronApiVirtualIP:
- type: string
- default: ''
EnablePackageInstall:
default: 'false'
description: Set to true to enable package installation via Puppet
@@ -450,6 +349,10 @@ parameters:
ServiceConfigSettings:
type: json
default: {}
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
parameter_groups:
- label: deprecated
@@ -460,7 +363,10 @@ parameter_groups:
resources:
Controller:
- type: OS::Nova::Server
+ type: OS::TripleO::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
properties:
image: {get_param: Image}
image_update_policy: {get_param: ImageUpdatePolicy}
@@ -610,12 +516,10 @@ resources:
server: {get_resource: Controller}
input_values:
bootstack_nodeid: {get_attr: [Controller, name]}
- ceilometer_workers: {get_param: CeilometerWorkers}
haproxy_log_address: {get_param: HAProxySyslogAddress}
haproxy_stats_password: {get_param: HAProxyStatsPassword}
haproxy_stats_user: {get_param: HAProxyStatsUser}
heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey}
- horizon_allowed_hosts: {get_param: HorizonAllowedHosts}
horizon_secret: {get_param: HorizonSecret}
admin_password: {get_param: AdminPassword}
debug: {get_param: Debug}
@@ -631,8 +535,6 @@ resources:
enable_fencing: {get_param: EnableFencing}
enable_galera: {get_param: EnableGalera}
enable_load_balancer: {get_param: EnableLoadBalancer}
- enable_ceph_storage: {get_param: EnableCephStorage}
- enable_swift_storage: {get_param: EnableSwiftStorage}
manage_firewall: {get_param: ManageFirewall}
purge_firewall_rules: {get_param: PurgeFirewallRules}
mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize}
@@ -646,21 +548,15 @@ resources:
CLUSTER: {get_param: MysqlClusterUniquePart}
neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
neutron_password: {get_param: NeutronPassword}
- neutron_tenant_mtu: {get_param: NeutronTenantMtu}
neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] }
neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] }
neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] }
neutron_auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] }
nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] }
- ceilometer_backend: {get_param: CeilometerBackend}
- ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
- ceilometer_password: {get_param: CeilometerPassword}
- ceilometer_store_events: {get_param: CeilometerStoreEvents}
aodh_password: {get_param: AodhPassword}
aodh_internal_url: { get_param: [ EndpointMap, AodhInternal, uri ] }
aodh_public_url: { get_param: [ EndpointMap, AodhPublic, uri ] }
aodh_admin_url: { get_param: [ EndpointMap, AodhAdmin, uri ] }
- ceilometer_meter_dispatcher: {get_param: CeilometerMeterDispatcher}
gnocchi_password: {get_param: GnocchiPassword}
gnocchi_backend: {get_param: GnocchiBackend}
gnocchi_indexer_backend: {get_param: GnocchiIndexerBackend}
@@ -672,15 +568,6 @@ resources:
- '@'
- {get_param: RedisVirtualIPUri}
- ':6379/'
- ceilometer_dsn:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://ceilometer:'
- - {get_param: CeilometerPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ceilometer'
gnocchi_dsn:
list_join:
- ''
@@ -702,11 +589,6 @@ resources:
gnocchi_internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]}
gnocchi_public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] }
gnocchi_admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] }
- ceilometer_public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
- ceilometer_internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
- ceilometer_admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
nova_ipv6: {get_param: NovaIPv6}
corosync_ipv6: {get_param: CorosyncIPv6}
@@ -742,7 +624,6 @@ resources:
rabbit_cookie: {get_param: RabbitCookie}
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
rabbit_client_port: {get_param: RabbitClientPort}
- timezone: {get_param: TimeZone}
control_virtual_interface: {get_param: ControlVirtualInterface}
public_virtual_interface: {get_param: PublicVirtualInterface}
swift_hash_suffix: {get_param: SwiftHashSuffix}
@@ -750,7 +631,6 @@ resources:
swift_ring_build: {get_param: SwiftRingBuild}
swift_replicas: {get_param: SwiftReplicas}
swift_min_part_hours: {get_param: SwiftMinPartHours}
- swift_mount_check: {get_param: SwiftMountCheck}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
@@ -792,6 +672,7 @@ resources:
ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]}
ceph_public_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]}
ceph_public_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]}
+ ironic_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, IronicApiNetwork]}]}
# Map heat metadata into hiera datafiles
ControllerConfig:
@@ -824,9 +705,7 @@ resources:
- neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
- neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
- cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
- - neutron_nuage_data # Optionally provided by ControllerExtraConfigPre
- midonet_data #Optionally provided by AllNodesExtraConfig
- - neutron_opencontrail_data # Optionally provided by ControllerExtraConfigPre
merge_behavior: deeper
datafiles:
service_configs:
@@ -876,7 +755,6 @@ resources:
tripleo::ringbuilder::part_power: {get_input: swift_part_power}
tripleo::ringbuilder::replicas: {get_input: swift_replicas}
tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours}
- swift_mount_check: {get_input: swift_mount_check}
# Cinder
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_address: {get_input: cinder_iscsi_network}
@@ -913,8 +791,6 @@ resources:
# MySQL
admin_password: {get_input: admin_password}
enable_galera: {get_input: enable_galera}
- enable_ceph_storage: {get_input: enable_ceph_storage}
- enable_swift_storage: {get_input: enable_swift_storage}
mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size}
mysql_max_connections: {get_input: mysql_max_connections}
mysql::server::root_password: {get_input: mysql_root_password}
@@ -925,7 +801,6 @@ resources:
# Neutron
neutron::bind_host: {get_input: neutron_api_network}
- neutron::network_device_mtu: {get_input: neutron_tenant_mtu}
neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network}
neutron::keystone::auth::public_url: {get_input: neutron_public_url }
@@ -935,33 +810,7 @@ resources:
neutron::keystone::auth::region: {get_input: keystone_region}
# Ceilometer
- ceilometer_backend: {get_input: ceilometer_backend}
- ceilometer_mysql_conn_string: {get_input: ceilometer_dsn}
- ceilometer::telemetry_secret: {get_input: ceilometer_metering_secret}
- ceilometer::rabbit_userid: {get_input: rabbit_username}
- ceilometer::rabbit_password: {get_input: rabbit_password}
- ceilometer::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
- ceilometer::rabbit_port: {get_input: rabbit_client_port}
- ceilometer::debug: {get_input: debug}
ceilometer::api::host: {get_input: ceilometer_api_network}
- ceilometer::api::keystone_password: {get_input: ceilometer_password}
- ceilometer::api::auth_uri: {get_input: keystone_auth_uri}
- ceilometer::api::identity_uri: {get_input: keystone_identity_uri}
- ceilometer::agent::auth::auth_password: {get_input: ceilometer_password}
- ceilometer::agent::auth::auth_url: {get_input: keystone_auth_uri}
- ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url}
- ceilometer::agent::notification::store_events: {get_input: ceilometer_store_events}
- ceilometer::db::mysql::password: {get_input: ceilometer_password}
- ceilometer::collector::meter_dispatcher: {get_input: ceilometer_meter_dispatcher}
- ceilometer::dispatcher::gnocchi::url: {get_input: gnocchi_internal_url }
- ceilometer::dispatcher::gnocchi::filter_project: 'service'
- ceilometer::dispatcher::gnocchi::archive_policy: 'low'
- ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml'
- ceilometer::keystone::auth::public_url: {get_input: ceilometer_public_url }
- ceilometer::keystone::auth::internal_url: {get_input: ceilometer_internal_url }
- ceilometer::keystone::auth::admin_url: {get_input: ceilometer_admin_url }
- ceilometer::keystone::auth::password: {get_input: ceilometer_password }
- ceilometer::keystone::auth::region: {get_input: keystone_region}
snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
@@ -1019,7 +868,6 @@ resources:
nova::api::api_bind_address: {get_input: nova_api_network}
nova::api::metadata_listen: {get_input: nova_metadata_network}
nova::api::admin_password: {get_input: nova_password}
- nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu}
nova::database_connection: {get_input: nova_dsn}
nova::api_database_connection: {get_input: nova_api_dsn}
nova::glance_api_servers: {get_input: glance_api_servers}
@@ -1041,7 +889,6 @@ resources:
# Horizon
apache::mod::remoteip::proxy_ips: {get_input: horizon_subnet}
apache::ip: {get_input: horizon_network}
- horizon::allowed_hosts: {get_input: horizon_allowed_hosts}
horizon::django_debug: {get_input: debug}
horizon::secret_key: {get_input: horizon_secret}
horizon::bind_address: {get_input: horizon_network}
@@ -1059,7 +906,6 @@ resources:
# Misc
memcached_ipv6: {get_input: memcached_ipv6}
memcached::listen_ip: {get_input: memcached_network}
- timezone::timezone: {get_input: timezone}
control_virtual_interface: {get_input: control_virtual_interface}
public_virtual_interface: {get_input: public_virtual_interface}
tripleo::keepalived::control_virtual_interface: {get_input: control_virtual_interface}
@@ -1198,16 +1044,6 @@ outputs:
template: "IP:11211"
params:
IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
- config_identifier:
- description: identifier which changes if the controller configuration may need re-applying
- value:
- list_join:
- - ','
- - - {get_attr: [ControllerDeployment, deploy_stdout]}
- - {get_attr: [NodeTLSCAData, deploy_stdout]}
- - {get_attr: [NodeTLSData, deploy_stdout]}
- - {get_attr: [ControllerExtraConfigPre, deploy_stdout]}
- - {get_param: UpdateIdentifier}
tls_key_modulus_md5:
description: MD5 checksum of the TLS Key Modulus
value: {get_attr: [NodeTLSData, key_modulus_md5]}
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
index 3e455347..aa5c3c43 100644
--- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -109,11 +109,3 @@ resources:
properties:
config: {get_resource: NetworkMidoNetConfig}
servers: {get_param: compute_servers}
-
-outputs:
- config_identifier:
- value:
- list_join:
- - ' '
- - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]}
- - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]}
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index 71445800..e924fc87 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -343,11 +343,3 @@ resources:
input_values:
ucsm_config: {get_param: NetworkUCSMHostList}
actions: ['CREATE'] # Only do this on CREATE
-
-outputs:
- # The Deployment applying the hieradata outputs the derived config-id, which
- # changes if the input_values change, so if the stdouts from
- # NetworkCiscoDeployment change, we need to reapply puppet (which will
- # happen if we return a different config_identifier)
- config_identifier:
- value: {get_attr: [NetworkCiscoDeployment, deploy_stdouts]}
diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml
deleted file mode 100644
index 7c05a5b9..00000000
--- a/puppet/extraconfig/ceph/ceph-external-config.yaml
+++ /dev/null
@@ -1,120 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Configure parameters for an external Ceph cluster via Puppet.'
-
-parameters:
- ceph_storage_count:
- default: 0
- type: number
- description: Number of Ceph storage nodes. Used to enable/disable managed Ceph installation.
- ceph_external_mon_ips:
- default: ''
- type: string
- description: List of external Ceph Mon host IPs.
- ceph_client_key:
- default: ''
- type: string
- description: Ceph key used to create the 'openstack' user keyring.
- ceph_fsid:
- default: ''
- type: string
- # The following parameters are unused for external Ceph clusters and
- # are here and exist for compatibility
- ceph_admin_key:
- default: ''
- type: string
- ceph_mon_key:
- default: ''
- type: string
- ceph_mon_names:
- type: comma_delimited_list
- ceph_mon_ips:
- type: comma_delimited_list
- NovaRbdPoolName:
- default: vms
- type: string
- CinderRbdPoolName:
- default: volumes
- type: string
- CinderBackupRbdPoolName:
- default: backups
- type: string
- GlanceRbdPoolName:
- default: images
- type: string
- GnocchiRbdPoolName:
- default: metrics
- type: string
- CephClientUserName:
- default: openstack
- type: string
- CephIPv6:
- default: False
- type: boolean
-
-resources:
- CephClusterConfigImpl:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- hiera:
- datafiles:
- ceph_cluster:
- mapped_data:
- ceph_storage_count: {get_param: ceph_storage_count}
- enable_external_ceph: true
- ceph_ipv6: {get_param: CephIPv6}
- ceph_mon_host: {get_param: ceph_external_mon_ips}
- ceph_mon_host_v6: {get_param: ceph_external_mon_ips}
- ceph::profile::params::fsid: {get_param: ceph_fsid}
- ceph::profile::params::client_keys:
- str_replace:
- template: "{
- client.CLIENT_USER: {
- secret: 'CLIENT_KEY',
- mode: '0644',
- cap_mon: 'allow r',
- cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
- }
- }"
- params:
- CLIENT_USER: {get_param: CephClientUserName}
- CLIENT_KEY: {get_param: ceph_client_key}
- NOVA_POOL: {get_param: NovaRbdPoolName}
- CINDER_POOL: {get_param: CinderRbdPoolName}
- CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
- GLANCE_POOL: {get_param: GlanceRbdPoolName}
- GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
- ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
- nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
- tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
- glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
- gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName}
- gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName}
- nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
- glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
- nova::compute::rbd::rbd_keyring:
- list_join:
- - '.'
- - - 'client'
- - {get_param: CephClientUserName}
- gnocchi::storage::ceph::ceph_keyring:
- list_join:
- - '.'
- - - '/etc/ceph/ceph'
- - 'client'
- - {get_param: CephClientUserName}
- - 'keyring'
- tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: {get_param: CephClientUserName}
- ceph_pools:
- - {get_param: CinderRbdPoolName}
- - {get_param: CinderBackupRbdPoolName}
- - {get_param: NovaRbdPoolName}
- - {get_param: GlanceRbdPoolName}
- - {get_param: GnocchiRbdPoolName}
-
-outputs:
- config_id:
- description: The ID of the CephClusterConfigImpl resource.
- value:
- {get_resource: CephClusterConfigImpl}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml
deleted file mode 100644
index a4cfea07..00000000
--- a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: Configure hieradata for Nuage configuration on the Controller
-
-parameters:
- server:
- description: ID of the controller node to apply this config to
- type: string
-
- # Config specific parameters, to be provided via parameter_defaults
- NeutronNuageOSControllerIp:
- description: IP address of the OpenStack Controller
- type: string
-
- NeutronNuageNetPartitionName:
- description: Specifies the title that you will see on the VSD
- type: string
- default: 'default_name'
-
- NeutronNuageVSDIp:
- description: IP address and port of the Virtual Services Directory
- type: string
-
- NeutronNuageVSDUsername:
- description: Username to be used to log into VSD
- type: string
-
- NeutronNuageVSDPassword:
- description: Password to be used to log into VSD
- type: string
-
- NeutronNuageVSDOrganization:
- description: Organization parameter required to log into VSD
- type: string
- default: 'organization'
-
- NeutronNuageBaseURIVersion:
- description: URI version to be used based on the VSD release
- type: string
- default: 'default_uri_version'
-
- NeutronNuageCMSId:
- description: Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD
- type: string
-
- UseForwardedFor:
- description: Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.
- type: boolean
- default: false
-
-resources:
- NeutronNuageConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- hiera:
- datafiles:
- neutron_nuage_data:
- mapped_data:
- neutron::plugins::nuage::nuage_oscontroller_ip: {get_input: NuageOSControllerIp}
- neutron::plugins::nuage::nuage_net_partition_name: {get_input: NuageNetPartitionName}
- neutron::plugins::nuage::nuage_vsd_ip: {get_input: NuageVSDIp}
- neutron::plugins::nuage::nuage_vsd_username: {get_input: NuageVSDUsername}
- neutron::plugins::nuage::nuage_vsd_password: {get_input: NuageVSDPassword}
- neutron::plugins::nuage::nuage_vsd_organization: {get_input: NuageVSDOrganization}
- neutron::plugins::nuage::nuage_base_uri_version: {get_input: NuageBaseURIVersion}
- neutron::plugins::nuage::nuage_cms_id: {get_input: NuageCMSId}
- nova::api::use_forwarded_for: {get_input: NovaUseForwardedFor}
-
- NeutronNuageDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- name: NeutronNuageDeployment
- config: {get_resource: NeutronNuageConfig}
- server: {get_param: server}
- input_values:
- NuageOSControllerIp: {get_param: NeutronNuageOSControllerIp}
- NuageNetPartitionName: {get_param: NeutronNuageNetPartitionName}
- NuageVSDIp: {get_param: NeutronNuageVSDIp}
- NuageVSDUsername: {get_param: NeutronNuageVSDUsername}
- NuageVSDPassword: {get_param: NeutronNuageVSDPassword}
- NuageVSDOrganization: {get_param: NeutronNuageVSDOrganization}
- NuageBaseURIVersion: {get_param: NeutronNuageBaseURIVersion}
- NuageCMSId: {get_param: NeutronNuageCMSId}
- NovaUseForwardedFor: {get_param: UseForwardedFor}
-
-outputs:
- deploy_stdout:
- description: Deployment reference, used to trigger puppet apply on changes
- value: {get_attr: [NeutronNuageDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml
deleted file mode 100644
index 5c686fe7..00000000
--- a/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: Controller hieradata for Neutron OpenContrail configuration
-
-parameters:
- server:
- description: ID of the controller node to apply this config to
- type: string
- ContrailApiServerIp:
- description: IP address of the OpenContrail API server
- type: string
- ContrailApiServerPort:
- description: Port of the OpenContrail API
- type: string
- default: 8082
- ContrailMultiTenancy:
- description: Whether to enable multi tenancy
- type: boolean
- default: false
- ContrailExtensions:
- description: List of OpenContrail extensions to be enabled
- type: comma_delimited_list
- default: ''
-
-resources:
- ControllerContrailConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- hiera:
- datafiles:
- neutron_opencontrail_data:
- mapped_data:
- neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions
-
- neutron::plugins::opencontrail::api_server_ip: {get_input: contrail_api_server_ip}
- neutron::plugins::opencontrail::api_server_port: {get_input: contrail_api_server_port}
- neutron::plugins::opencontrail::multi_tenancy: {get_input: contrail_multi_tenancy}
- neutron::plugins::opencontrail::contrail_extensions: {get_input: contrail_extensions}
- neutron::plugins::opencontrail::keystone_auth_url: '"%{hiera(''keystone_auth_uri'')}"'
- neutron::plugins::opencontrail::keystone_admin_user: admin
- neutron::plugins::opencontrail::keystone_admin_tenant_name: admin
- neutron::plugins::opencontrail::keystone_admin_password: '"%{hiera(''admin_password'')}"'
- neutron::plugins::opencontrail::keystone_admin_token: '"%{hiera(''keystone::admin_token'')}"'
-
- ControllerContrailDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: ControllerContrailConfig}
- server: {get_param: server}
- input_values:
- contrail_api_server_ip: {get_param: ContrailApiServerIp}
- contrail_api_server_port: {get_param: ContrailApiServerPort}
- contrail_multi_tenancy: {get_param: ContrailMultiTenancy}
- contrail_extensions: {get_param: ContrailExtensions}
-
-
-outputs:
- deploy_stdout:
- description: Output of the extra hiera data deployment
- value: {get_attr: [ControllerContrailDeployment, deploy_stdout]}
diff --git a/puppet/hieradata/ceph.yaml b/puppet/hieradata/ceph.yaml
index b29b91cf..ccb41cc4 100644
--- a/puppet/hieradata/ceph.yaml
+++ b/puppet/hieradata/ceph.yaml
@@ -7,5 +7,3 @@ ceph::profile::params::manage_repo: false
ceph::profile::params::authentication_type: cephx
ceph_classes: []
-
-ceph_osd_selinux_permissive: true
diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml
index 2d928cbf..62728332 100644
--- a/puppet/hieradata/compute.yaml
+++ b/puppet/hieradata/compute.yaml
@@ -6,8 +6,6 @@ nova::notification_driver: messagingv2
nova::compute::instance_usage_audit: true
nova::compute::instance_usage_audit_period: 'hour'
-nova::compute::libvirt::migration_support: true
-
nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
nova::network::neutron::neutron_auth_type: 'v3password'
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index 7db2b5de..860c8fb5 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -4,7 +4,7 @@ nova::api::enabled: true
nova::vncproxy::enabled: true
# gnocchi
-gnocchi::db::sync::extra_opts: '--skip-storage'
+gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types'
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 2
gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26'
@@ -82,8 +82,10 @@ keystone::wsgi::apache::ssl: false
swift::proxy::pipeline:
- 'catch_errors'
- 'healthcheck'
+ - 'proxy-logging'
- 'cache'
- 'ratelimit'
+ - 'bulk'
- 'tempurl'
- 'formpost'
- 'authtoken'
@@ -212,6 +214,7 @@ tripleo::firewall::firewall_rules:
- 26379
'109 rabbitmq':
dport:
+ - 4369
- 5672
- 35672
'110 ceph':
diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml
index f2c95de6..9b2ea4f4 100644
--- a/puppet/hieradata/database.yaml
+++ b/puppet/hieradata/database.yaml
@@ -13,46 +13,6 @@ nova::db::mysql_api::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
-# Glance
-glance::db::mysql::user: glance
-glance::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-glance::db::mysql::dbname: glance
-glance::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
-# Keystone
-keystone::db::mysql::user: keystone
-keystone::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-keystone::db::mysql::dbname: keystone
-keystone::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
-# Neutron
-neutron::db::mysql::user: neutron
-neutron::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-neutron::db::mysql::dbname: ovs_neutron
-neutron::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
-# Cinder
-cinder::db::mysql::user: cinder
-cinder::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-cinder::db::mysql::dbname: cinder
-cinder::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
-# Heat
-heat::db::mysql::user: heat
-heat::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-heat::db::mysql::dbname: heat
-heat::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
# Ceilometer
ceilometer::db::mysql::user: ceilometer
ceilometer::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
@@ -76,11 +36,3 @@ aodh::db::mysql::dbname: aodh
aodh::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
-
-
-sahara::db::mysql::user: sahara
-sahara::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
-sahara::db::mysql::dbname: sahara
-sahara::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index e69353b0..152694d9 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -16,43 +16,9 @@
include ::tripleo::packages
include ::tripleo::firewall
-if hiera('step') >= 1 {
-
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
- include ::timezone
-
-}
-
-if hiera('step') >= 3 {
- if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
- exec { 'set selinux to permissive on boot':
- command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
- onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
- path => ['/usr/bin', '/usr/sbin'],
- }
-
- exec { 'set selinux to permissive':
- command => 'setenforce 0',
- onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
- path => ['/usr/bin', '/usr/sbin'],
- } -> Class['ceph::profile::osd']
- }
-
- if str2bool(hiera('ceph_ipv6', false)) {
- $mon_host = hiera('ceph_mon_host_v6')
- } else {
- $mon_host = hiera('ceph_mon_host')
- }
- class { '::ceph::profile::params':
- mon_host => $mon_host,
- }
- include ::ceph::conf
- include ::ceph::profile::client
- include ::ceph::profile::osd
-
+if hiera('step') >= 4 {
hiera_include('ceph_classes')
- package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present}
}
+
+$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_ceph', hiera('step')])
+package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index da84927e..6e446fd8 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -16,195 +16,20 @@
include ::tripleo::packages
include ::tripleo::firewall
-create_resources(kmod::load, hiera('kernel_modules'), { })
-create_resources(sysctl::value, hiera('sysctl_settings'), { })
-Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
-include ::timezone
-
if hiera('step') >= 4 {
- file { ['/etc/libvirt/qemu/networks/autostart/default.xml',
- '/etc/libvirt/qemu/networks/default.xml']:
- ensure => absent,
- before => Service['libvirt'],
- }
- # in case libvirt has been already running before the Puppet run, make
- # sure the default network is destroyed
- exec { 'libvirt-default-net-destroy':
- command => '/usr/bin/virsh net-destroy default',
- onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"',
- before => Service['libvirt'],
- }
-
- # When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique
- exec { 'reset-iscsi-initiator-name':
- command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi',
- onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset',
- }->
-
- file { '/etc/iscsi/.initiator_reset':
- ensure => present,
- }
-
- $rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false)
- $rbd_persistent_storage = hiera('rbd_persistent_storage', false)
- if $rbd_ephemeral_storage or $rbd_persistent_storage {
- if str2bool(hiera('ceph_ipv6', false)) {
- $mon_host = hiera('ceph_mon_host_v6')
- } else {
- $mon_host = hiera('ceph_mon_host')
- }
- class { '::ceph::profile::params':
- mon_host => $mon_host,
- }
- include ::ceph::conf
- include ::ceph::profile::client
-
- $client_keys = hiera('ceph::profile::params::client_keys')
- $client_user = join(['client.', hiera('tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name')])
- class { '::nova::compute::rbd':
- libvirt_rbd_secret_key => $client_keys[$client_user]['secret'],
- }
- }
-
- if hiera('cinder_enable_nfs_backend', false) {
- if str2bool($::selinux) {
- selboolean { 'virt_use_nfs':
- value => on,
- persistent => true,
- } -> Package['nfs-utils']
- }
-
- package { 'nfs-utils': } -> Service['nova-compute']
- }
-
- if str2bool(hiera('nova::use_ipv6', false)) {
- $vncserver_listen = '::0'
- } else {
- $vncserver_listen = '0.0.0.0'
- }
-
- if $rbd_ephemeral_storage {
- class { '::nova::compute::libvirt':
- libvirt_disk_cachemodes => ['network=writeback'],
- libvirt_hw_disk_discard => 'unmap',
- vncserver_listen => $vncserver_listen,
- }
- } else {
- class { '::nova::compute::libvirt' :
- vncserver_listen => $vncserver_listen,
- }
- }
-
nova_config {
'DEFAULT/my_ip': value => $ipaddress;
'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
}
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- file { '/etc/libvirt/qemu.conf':
- ensure => present,
- content => hiera('midonet_libvirt_qemu_data')
- }
- }
- include ::nova::network::neutron
- include ::neutron
- include ::neutron::config
-
- # If the value of core plugin is set to 'nuage',
- # include nuage agent,
- # If the value of core plugin is set to 'midonet',
- # include midonet agent,
- # else use the default value of 'ml2'
- if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
- include ::nuage::vrs
- include ::nova::compute::neutron
-
- class { '::nuage::metadataagent':
- nova_os_tenant_name => hiera('nova::api::admin_tenant_name'),
- nova_os_password => hiera('nova_password'),
- nova_metadata_ip => hiera('nova_metadata_node_ips'),
- nova_auth_ip => hiera('keystone_public_api_virtual_ip'),
- }
- }
- elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- # TODO(devvesa) provide non-controller ips for these services
- $zookeeper_node_ips = hiera('neutron_api_node_ips')
- $cassandra_node_ips = hiera('neutron_api_node_ips')
-
- class { '::tripleo::network::midonet::agent':
- zookeeper_servers => $zookeeper_node_ips,
- cassandra_seeds => $cassandra_node_ips
- }
- }
- elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
-
- include ::contrail::vrouter
- # NOTE: it's not possible to use this class without a functional
- # contrail controller up and running
- #class {'::contrail::vrouter::provision_vrouter':
- # require => Class['contrail::vrouter'],
- #}
- }
- elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
- # forward all ipv4 traffic
- # this is required for the vms to pass through the gateways public interface
- sysctl::value { 'net.ipv4.ip_forward': value => '1' }
-
- # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on
- file { '/etc/sudoers.d/ifc_ctl_sudoers':
- ensure => file,
- owner => root,
- group => root,
- mode => '0440',
- content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n",
- }
- }
- else {
-
- # NOTE: this code won't live in puppet-neutron until Neutron OVS agent
- # can be gracefully restarted. See https://review.openstack.org/#/c/297211
- # In the meantime, it's safe to restart the agent on each change in neutron.conf,
- # because Puppet changes are supposed to be done during bootstrap and upgrades.
- # Some resource managed by Neutron_config (like messaging and logging options) require
- # a restart of OVS agent. This code does it.
- # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code
- # from here and fix it in puppet-neutron.
- Neutron_config<||> ~> Service['neutron-ovs-agent-service']
-
- include ::neutron::plugins::ml2
- include ::neutron::agents::ml2::ovs
-
- if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
- class { '::neutron::agents::n1kv_vem':
- n1kv_source => hiera('n1kv_vem_source', undef),
- n1kv_version => hiera('n1kv_vem_version', undef),
- }
- }
-
- if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
- include ::neutron::agents::bigswitch
- }
- }
-
include ::ceilometer
include ::ceilometer::config
include ::ceilometer::agent::compute
include ::ceilometer::agent::auth
- $snmpd_user = hiera('snmpd_readonly_user_name')
- snmp::snmpv3_user { $snmpd_user:
- authtype => 'MD5',
- authpass => hiera('snmpd_readonly_user_password'),
- }
- class { '::snmp':
- agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
- }
-
hiera_include('compute_classes')
- package_manifest{ '/var/lib/tripleo/installed-packages/overcloud_compute': ensure => present }
-
}
+
+$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_compute', hiera('step')])
+package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index c07383c8..9cdbda0f 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -18,37 +18,7 @@ include ::tripleo::firewall
$enable_load_balancer = hiera('enable_load_balancer', true)
-if hiera('step') >= 1 {
-
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
-}
-
if hiera('step') >= 2 {
-
- include ::timezone
-
- # MongoDB
- if downcase(hiera('ceilometer_backend')) == 'mongodb' {
- # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and
- # without the brackets as 'members' argument for the 'mongodb_replset'
- # resource.
- if str2bool(hiera('mongodb::server::ipv6', false)) {
- $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
- $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
- $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
- } else {
- $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
- $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
- }
- $mongo_node_string = join($mongo_node_ips_with_port, ',')
-
- $mongodb_replset = hiera('mongodb::server::replset')
- $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
- }
-
if str2bool(hiera('enable_galera', true)) {
$mysql_config_file = '/etc/my.cnf.d/galera.cnf'
} else {
@@ -76,60 +46,8 @@ if hiera('step') >= 2 {
if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
include ::gnocchi::db::mysql
}
- if downcase(hiera('ceilometer_backend')) == 'mysql' {
- include ::ceilometer::db::mysql
- }
include ::aodh::db::mysql
- $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
-
- if $enable_ceph {
- $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
- if str2bool(hiera('ceph_ipv6', false)) {
- $mon_host = hiera('ceph_mon_host_v6')
- } else {
- $mon_host = hiera('ceph_mon_host')
- }
- class { '::ceph::profile::params':
- mon_initial_members => $mon_initial_members,
- mon_host => $mon_host,
- }
- include ::ceph::conf
- include ::ceph::profile::mon
- }
-
- if str2bool(hiera('enable_ceph_storage', false)) {
- if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
- exec { 'set selinux to permissive on boot':
- command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
- onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
- path => ['/usr/bin', '/usr/sbin'],
- }
-
- exec { 'set selinux to permissive':
- command => 'setenforce 0',
- onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
- path => ['/usr/bin', '/usr/sbin'],
- } -> Class['ceph::profile::osd']
- }
-
- include ::ceph::conf
- include ::ceph::profile::osd
- }
-
- if str2bool(hiera('enable_external_ceph', false)) {
- if str2bool(hiera('ceph_ipv6', false)) {
- $mon_host = hiera('ceph_mon_host_v6')
- } else {
- $mon_host = hiera('ceph_mon_host')
- }
- class { '::ceph::profile::params':
- mon_host => $mon_host,
- }
- include ::ceph::conf
- include ::ceph::profile::client
- }
-
} #END STEP 2
if hiera('step') >= 4 {
@@ -146,129 +64,6 @@ if hiera('step') >= 4 {
}
include ::nova::config
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- # TODO(devvesa) provide non-controller ips for these services
- $zookeeper_node_ips = hiera('neutron_api_node_ips')
- $cassandra_node_ips = hiera('neutron_api_node_ips')
-
- # Run zookeeper in the controller if configured
- if hiera('enable_zookeeper_on_controller') {
- class {'::tripleo::cluster::zookeeper':
- zookeeper_server_ips => $zookeeper_node_ips,
- # TODO: create a 'bind' hiera key for zookeeper
- zookeeper_client_ip => hiera('neutron::bind_host'),
- zookeeper_hostnames => hiera('controller_node_names')
- }
- }
-
- # Run cassandra in the controller if configured
- if hiera('enable_cassandra_on_controller') {
- class {'::tripleo::cluster::cassandra':
- cassandra_servers => $cassandra_node_ips,
- # TODO: create a 'bind' hiera key for cassandra
- cassandra_ip => hiera('neutron::bind_host'),
- }
- }
-
- class {'::tripleo::network::midonet::agent':
- zookeeper_servers => $zookeeper_node_ips,
- cassandra_seeds => $cassandra_node_ips
- }
-
- class {'::tripleo::network::midonet::api':
- zookeeper_servers => $zookeeper_node_ips,
- vip => hiera('public_virtual_ip'),
- keystone_ip => hiera('public_virtual_ip'),
- keystone_admin_token => hiera('keystone::admin_token'),
- # TODO: create a 'bind' hiera key for api
- bind_address => hiera('neutron::bind_host'),
- admin_password => hiera('admin_password')
- }
-
- # TODO: find a way to get an empty list from hiera
- # TODO: when doing the composable midonet plugin, don't forget to
- # set service_plugins to an empty array in Hiera.
- class {'::neutron':
- service_plugins => []
- }
-
- }
-
- # If the value of core plugin is set to 'opencontrail'
- # include opencontrail core plugins
- # else use the default value of 'ml2'
- if hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
- include ::neutron::plugins::opencontrail
- } else {
-
- # If the value of core plugin is set to 'midonet',
- # skip all the ML2 configuration
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- class {'::neutron::plugins::midonet':
- midonet_api_ip => hiera('public_virtual_ip'),
- keystone_tenant => hiera('neutron::server::auth_tenant'),
- keystone_password => hiera('neutron::server::password')
- }
- }
-
- Service['neutron-server'] -> Service['neutron-metadata']
-
- }
-
- if $enable_ceph {
- $ceph_pools = hiera('ceph_pools')
- ceph::pool { $ceph_pools :
- pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'),
- pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
- size => hiera('ceph::profile::params::osd_pool_default_size'),
- }
- }
-
- # swift storage
- if str2bool(hiera('enable_swift_storage', true)) {
- class { '::swift::storage::all':
- mount_check => str2bool(hiera('swift_mount_check')),
- }
- if(!defined(File['/srv/node'])) {
- file { '/srv/node':
- ensure => directory,
- owner => 'swift',
- group => 'swift',
- require => Package['openstack-swift'],
- }
- }
- $swift_components = ['account', 'container', 'object']
- swift::storage::filter::recon { $swift_components : }
- swift::storage::filter::healthcheck { $swift_components : }
- }
-
- # Ceilometer
- $ceilometer_backend = downcase(hiera('ceilometer_backend'))
- case $ceilometer_backend {
- /mysql/ : {
- $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
- }
- default : {
- $ceilometer_database_connection = $ceilometer_mongodb_conn_string
- }
- }
- include ::ceilometer
- include ::ceilometer::config
- include ::ceilometer::api
- include ::ceilometer::agent::notification
- include ::ceilometer::agent::central
- include ::ceilometer::expirer
- include ::ceilometer::collector
- include ::ceilometer::agent::auth
- include ::ceilometer::dispatcher::gnocchi
- class { '::ceilometer::db' :
- database_connection => $ceilometer_database_connection,
- }
-
- Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
-
# Aodh
class { '::aodh' :
database_connection => hiera('aodh_mysql_conn_string'),
@@ -282,27 +77,6 @@ if hiera('step') >= 4 {
include ::aodh::listener
include ::aodh::client
- # Horizon
- include ::apache::mod::remoteip
- if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
- $_profile_support = 'cisco'
- } else {
- $_profile_support = 'None'
- }
- $neutron_options = {'profile_support' => $_profile_support }
-
- $memcached_ipv6 = hiera('memcached_ipv6', false)
- if $memcached_ipv6 {
- $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
- } else {
- $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
- }
-
- class { '::horizon':
- cache_server_ip => $horizon_memcached_servers,
- neutron_options => $neutron_options,
- }
-
# Gnocchi
$gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
class { '::gnocchi':
@@ -323,16 +97,6 @@ if hiera('step') >= 4 {
default: { fail('Unrecognized gnocchi_backend parameter.') }
}
- $snmpd_user = hiera('snmpd_readonly_user_name')
- snmp::snmpv3_user { $snmpd_user:
- authtype => 'MD5',
- authpass => hiera('snmpd_readonly_user_password'),
- }
- class { '::snmp':
- agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
- }
-
hiera_include('controller_classes')
} #END STEP 4
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 0bee6b00..cfa693be 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -21,10 +21,7 @@ Pcmk_resource <| |> {
# TODO(jistr): use pcs resource provider instead of just no-ops
Service <|
tag == 'aodh-service' or
- tag == 'ceilometer-service' or
- tag == 'gnocchi-service' or
- tag == 'neutron-service' or
- tag == 'nova-service'
+ tag == 'gnocchi-service'
|> {
hasrestart => true,
restart => '/bin/true',
@@ -53,12 +50,6 @@ $non_pcmk_start = hiera('step') >= 5
if hiera('step') >= 1 {
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
- include ::timezone
-
$pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
$corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
if $corosync_ipv6 {
@@ -95,10 +86,6 @@ if hiera('step') >= 1 {
op_params => 'start timeout=200s stop timeout=200s',
}
- if downcase(hiera('ceilometer_backend')) == 'mongodb' {
- include ::mongodb::params
- }
-
# Galera
if str2bool(hiera('enable_galera', true)) {
$mysql_config_file = '/etc/my.cnf.d/galera.cnf'
@@ -194,48 +181,25 @@ if hiera('step') >= 2 {
require => Class['::mysql::server'],
before => Exec['galera-ready'],
}
- }
- $mysql_root_password = hiera('mysql::server::root_password')
- $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
- # This step is to create a sysconfig clustercheck file with the root user and empty password
- # on the first install only (because later on the clustercheck db user will be used)
- # We are using exec and not file in order to not have duplicate definition errors in puppet
- # when we later set the the file to contain the clustercheck data
- exec { 'create-root-sysconfig-clustercheck':
- command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
- unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
- }
- exec { 'galera-ready' :
- command => '/usr/bin/clustercheck >/dev/null',
- timeout => 30,
- tries => 180,
- try_sleep => 10,
- environment => ['AVAILABLE_WHEN_READONLY=0'],
- require => Exec['create-root-sysconfig-clustercheck'],
- }
+ exec { 'galera-ready' :
+ command => '/usr/bin/clustercheck >/dev/null',
+ timeout => 30,
+ tries => 180,
+ try_sleep => 10,
+ environment => ['AVAILABLE_WHEN_READONLY=0'],
+ require => Exec['create-root-sysconfig-clustercheck'],
+ }
- xinetd::service { 'galera-monitor' :
- port => '9200',
- server => '/usr/bin/clustercheck',
- per_source => 'UNLIMITED',
- log_on_success => '',
- log_on_failure => 'HOST',
- flags => 'REUSE',
- service_type => 'UNLISTED',
- user => 'root',
- group => 'root',
- require => Exec['create-root-sysconfig-clustercheck'],
- }
- # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
- # to it in a later step. We do this only on one node as it will replicate on
- # the other members. We also make sure that the permissions are the minimum necessary
- if $pacemaker_master {
+ # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
+ # to it in a later step. We do this only on one node as it will replicate on
+ # the other members. We also make sure that the permissions are the minimum necessary
mysql_user { 'clustercheck@localhost':
ensure => 'present',
- password_hash => mysql_password($mysql_clustercheck_password),
+ password_hash => mysql_password(hiera('mysql_clustercheck_password')),
require => Exec['galera-ready'],
}
+
mysql_grant { 'clustercheck@localhost/*.*':
ensure => 'present',
options => ['GRANT'],
@@ -243,15 +207,6 @@ if hiera('step') >= 2 {
table => '*.*',
user => 'clustercheck@localhost',
}
- }
-
- # Create all the database schemas
- if $sync_db {
- if downcase(hiera('ceilometer_backend')) == 'mysql' {
- class { '::ceilometer::db::mysql':
- require => Exec['galera-ready'],
- }
- }
if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
class { '::gnocchi::db::mysql':
@@ -261,65 +216,36 @@ if hiera('step') >= 2 {
class { '::aodh::db::mysql':
require => Exec['galera-ready'],
- }
- }
-
- # Ceph
- $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
-
- if $enable_ceph {
- $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
- if str2bool(hiera('ceph_ipv6', false)) {
- $mon_host = hiera('ceph_mon_host_v6')
- } else {
- $mon_host = hiera('ceph_mon_host')
- }
- class { '::ceph::profile::params':
- mon_initial_members => $mon_initial_members,
- mon_host => $mon_host,
}
- include ::ceph::conf
- include ::ceph::profile::mon
}
-
- if str2bool(hiera('enable_ceph_storage', false)) {
- if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
- exec { 'set selinux to permissive on boot':
- command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
- onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
- path => ['/usr/bin', '/usr/sbin'],
- }
-
- exec { 'set selinux to permissive':
- command => 'setenforce 0',
- onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
- path => ['/usr/bin', '/usr/sbin'],
- } -> Class['ceph::profile::osd']
- }
-
- include ::ceph::conf
- include ::ceph::profile::osd
+ # This step is to create a sysconfig clustercheck file with the root user and empty password
+ # on the first install only (because later on the clustercheck db user will be used)
+ # We are using exec and not file in order to not have duplicate definition errors in puppet
+ # when we later set the the file to contain the clustercheck data
+ exec { 'create-root-sysconfig-clustercheck':
+ command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
+ unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
}
- if str2bool(hiera('enable_external_ceph', false)) {
- if str2bool(hiera('ceph_ipv6', false)) {
- $mon_host = hiera('ceph_mon_host_v6')
- } else {
- $mon_host = hiera('ceph_mon_host')
- }
- class { '::ceph::profile::params':
- mon_host => $mon_host,
- }
- include ::ceph::conf
- include ::ceph::profile::client
+ xinetd::service { 'galera-monitor' :
+ port => '9200',
+ server => '/usr/bin/clustercheck',
+ per_source => 'UNLIMITED',
+ log_on_success => '',
+ log_on_failure => 'HOST',
+ flags => 'REUSE',
+ service_type => 'UNLISTED',
+ user => 'root',
+ group => 'root',
+ require => Exec['create-root-sysconfig-clustercheck'],
}
-
} #END STEP 2
if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
# At this stage we are guaranteed that the clustercheck db user exists
# so we switch the resource agent to use it.
+ $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
file { '/etc/sysconfig/clustercheck' :
ensure => file,
mode => '0600',
@@ -343,170 +269,6 @@ MYSQL_HOST=localhost\n",
include ::nova::config
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
-
- # TODO(devvesa) provide non-controller ips for these services
- $zookeeper_node_ips = hiera('neutron_api_node_ips')
- $cassandra_node_ips = hiera('neutron_api_node_ips')
-
- # Run zookeeper in the controller if configured
- if hiera('enable_zookeeper_on_controller') {
- class {'::tripleo::cluster::zookeeper':
- zookeeper_server_ips => $zookeeper_node_ips,
- # TODO: create a 'bind' hiera key for zookeeper
- zookeeper_client_ip => hiera('neutron::bind_host'),
- zookeeper_hostnames => split(hiera('controller_node_names'), ',')
- }
- }
-
- # Run cassandra in the controller if configured
- if hiera('enable_cassandra_on_controller') {
- class {'::tripleo::cluster::cassandra':
- cassandra_servers => $cassandra_node_ips,
- # TODO: create a 'bind' hiera key for cassandra
- cassandra_ip => hiera('neutron::bind_host'),
- }
- }
-
- class {'::tripleo::network::midonet::agent':
- zookeeper_servers => $zookeeper_node_ips,
- cassandra_seeds => $cassandra_node_ips
- }
-
- class {'::tripleo::network::midonet::api':
- zookeeper_servers => $zookeeper_node_ips,
- vip => hiera('public_virtual_ip'),
- keystone_ip => hiera('public_virtual_ip'),
- keystone_admin_token => hiera('keystone::admin_token'),
- # TODO: create a 'bind' hiera key for api
- bind_address => hiera('neutron::bind_host'),
- admin_password => hiera('admin_password')
- }
-
- # Configure Neutron
- # TODO: when doing the composable midonet plugin, don't forget to
- # set service_plugins to an empty array in Hiera.
- class {'::neutron':
- service_plugins => []
- }
-
- }
-
- if hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
- include ::neutron::plugins::opencontrail
- }
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- class {'::neutron::plugins::midonet':
- midonet_api_ip => hiera('public_virtual_ip'),
- keystone_tenant => hiera('neutron::server::auth_tenant'),
- keystone_password => hiera('neutron::server::password')
- }
- }
-
- if $enable_ceph {
- $ceph_pools = hiera('ceph_pools')
- ceph::pool { $ceph_pools :
- pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'),
- pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
- size => hiera('ceph::profile::params::osd_pool_default_size'),
- }
- }
-
- # swift storage
- if str2bool(hiera('enable_swift_storage', true)) {
- class {'::swift::storage::all':
- mount_check => str2bool(hiera('swift_mount_check')),
- }
- class {'::swift::storage::account':
- manage_service => $non_pcmk_start,
- enabled => $non_pcmk_start,
- }
- class {'::swift::storage::container':
- manage_service => $non_pcmk_start,
- enabled => $non_pcmk_start,
- }
- class {'::swift::storage::object':
- manage_service => $non_pcmk_start,
- enabled => $non_pcmk_start,
- }
- if(!defined(File['/srv/node'])) {
- file { '/srv/node':
- ensure => directory,
- owner => 'swift',
- group => 'swift',
- require => Package['openstack-swift'],
- }
- }
- $swift_components = ['account', 'container', 'object']
- swift::storage::filter::recon { $swift_components : }
- swift::storage::filter::healthcheck { $swift_components : }
- }
-
- # Ceilometer
- case downcase(hiera('ceilometer_backend')) {
- /mysql/: {
- $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
- }
- default: {
- $mongo_node_string = join($mongo_node_ips_with_port, ',')
- $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
- }
- }
- include ::ceilometer
- include ::ceilometer::config
- class { '::ceilometer::api' :
- manage_service => false,
- enabled => false,
- }
- class { '::ceilometer::agent::notification' :
- manage_service => false,
- enabled => false,
- }
- class { '::ceilometer::agent::central' :
- manage_service => false,
- enabled => false,
- }
- class { '::ceilometer::collector' :
- manage_service => false,
- enabled => false,
- }
- include ::ceilometer::expirer
- class { '::ceilometer::db' :
- database_connection => $ceilometer_database_connection,
- sync_db => $sync_db,
- }
- include ::ceilometer::agent::auth
- include ::ceilometer::dispatcher::gnocchi
-
- Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
-
- # httpd/apache and horizon
- # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
- class { '::apache' :
- service_enable => false,
- # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
- }
- include ::apache::mod::remoteip
- include ::apache::mod::status
- if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
- $_profile_support = 'cisco'
- } else {
- $_profile_support = 'None'
- }
- $neutron_options = {'profile_support' => $_profile_support }
-
- $memcached_ipv6 = hiera('memcached_ipv6', false)
- if $memcached_ipv6 {
- $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
- } else {
- $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
- }
-
- class { '::horizon':
- cache_server_ip => $horizon_memcached_servers,
- neutron_options => $neutron_options,
- }
-
# Aodh
class { '::aodh' :
database_connection => hiera('aodh_mysql_conn_string'),
@@ -567,16 +329,6 @@ MYSQL_HOST=localhost\n",
enabled => false,
}
- $snmpd_user = hiera('snmpd_readonly_user_name')
- snmp::snmpv3_user { $snmpd_user:
- authtype => 'MD5',
- authpass => hiera('snmpd_readonly_user_password'),
- }
- class { '::snmp':
- agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
- }
-
hiera_include('controller_classes')
} #END STEP 4
@@ -587,6 +339,7 @@ if hiera('step') >= 5 {
# password. On second runs or updates /root/.my.cnf will already be populated
# with proper credentials. This step happens on every node because this sql
# statement does not automatically replicate across nodes.
+ $mysql_root_password = hiera('mysql::server::root_password')
exec { 'galera-set-root-password':
command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
}
@@ -632,49 +385,6 @@ password=\"${mysql_root_password}\"",
Pacemaker::Resource::Ocf['openstack-core']],
}
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- pacemaker::resource::service {'tomcat':
- clone_params => 'interleave=true',
- }
- }
- if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
- pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::server_service}-clone",
- second_resource => "${::neutron::params::dhcp_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
- }
- pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::dhcp_agent_service}-clone",
- second_resource => "${::neutron::params::metadata_agent_service}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
- }
- pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
- constraint_type => 'order',
- first_resource => "${::neutron::params::metadata_agent_service}-clone",
- second_resource => 'tomcat-clone',
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
- Pacemaker::Resource::Service['tomcat']],
- }
- pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
- source => "${::neutron::params::metadata_agent_service}-clone",
- target => "${::neutron::params::dhcp_agent_service}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
- }
- }
-
# Nova
pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
constraint_type => 'order',
@@ -757,49 +467,12 @@ password=\"${mysql_root_password}\"",
Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
}
- # Ceilometer and Aodh
- case downcase(hiera('ceilometer_backend')) {
- /mysql/: {
- pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
- clone_params => 'interleave=true',
- require => Pacemaker::Resource::Ocf['openstack-core'],
- }
- }
- default: {
- pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
- clone_params => 'interleave=true',
- require => [Pacemaker::Resource::Ocf['openstack-core'],
- Pacemaker::Resource::Service[$::mongodb::params::service_name]],
- }
- }
- }
- pacemaker::resource::service { $::ceilometer::params::collector_service_name :
- clone_params => 'interleave=true',
- }
- pacemaker::resource::service { $::ceilometer::params::api_service_name :
- clone_params => 'interleave=true',
- }
- pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
- clone_params => 'interleave=true',
- }
# Fedora doesn't know `require-all` parameter for constraints yet
if $::operatingsystem == 'Fedora' {
- $redis_ceilometer_constraint_params = undef
$redis_aodh_constraint_params = undef
} else {
- $redis_ceilometer_constraint_params = 'require-all=false'
$redis_aodh_constraint_params = 'require-all=false'
}
- pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
- constraint_type => 'order',
- first_resource => 'redis-master',
- second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- first_action => 'promote',
- second_action => 'start',
- constraint_params => $redis_ceilometer_constraint_params,
- require => [Pacemaker::Resource::Ocf['redis'],
- Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
- }
pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
constraint_type => 'order',
first_resource => 'redis-master',
@@ -810,49 +483,6 @@ password=\"${mysql_root_password}\"",
require => [Pacemaker::Resource::Ocf['redis'],
Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
}
- pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
- constraint_type => 'order',
- first_resource => 'openstack-core-clone',
- second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
- pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint':
- constraint_type => 'order',
- first_resource => 'openstack-core-clone',
- second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Ocf['openstack-core']],
- }
- pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
- constraint_type => 'order',
- first_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- second_resource => "${::ceilometer::params::collector_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
- }
- pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
- constraint_type => 'order',
- first_resource => "${::ceilometer::params::collector_service_name}-clone",
- second_resource => "${::ceilometer::params::api_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
- }
- pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
- source => "${::ceilometer::params::api_service_name}-clone",
- target => "${::ceilometer::params::collector_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
- }
# Aodh
pacemaker::resource::service { $::aodh::params::evaluator_service_name :
clone_params => 'interleave=true',
@@ -895,17 +525,6 @@ password=\"${mysql_root_password}\"",
require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
}
- if downcase(hiera('ceilometer_backend')) == 'mongodb' {
- pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
- constraint_type => 'order',
- first_resource => "${::mongodb::params::service_name}-clone",
- second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Service[$::mongodb::params::service_name]],
- }
- }
# gnocchi
pacemaker::resource::service { $::gnocchi::params::metricd_service_name :
diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp
index 4fca8f5f..1f04c581 100644
--- a/puppet/manifests/overcloud_object.pp
+++ b/puppet/manifests/overcloud_object.pp
@@ -16,43 +16,9 @@
include ::tripleo::packages
include ::tripleo::firewall
-if hiera('step') >= 1 {
- create_resources(kmod::load, hiera('kernel_modules'), {})
- create_resources(sysctl::value, hiera('sysctl_settings'), {})
- Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
- include ::timezone
-
-}
-
if hiera('step') >= 4 {
- class { '::swift::storage::all':
- mount_check => str2bool(hiera('swift_mount_check')),
- }
- if(!defined(File['/srv/node'])) {
- file { '/srv/node':
- ensure => directory,
- owner => 'swift',
- group => 'swift',
- require => Package['openstack-swift'],
- }
- }
-
- $swift_components = ['account', 'container', 'object']
- swift::storage::filter::recon { $swift_components : }
- swift::storage::filter::healthcheck { $swift_components : }
-
- $snmpd_user = hiera('snmpd_readonly_user_name')
- snmp::snmpv3_user { $snmpd_user:
- authtype => 'MD5',
- authpass => hiera('snmpd_readonly_user_password'),
- }
- class { '::snmp':
- agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
- }
-
hiera_include('object_classes')
}
-package_manifest{'/var/lib/tripleo/installed-packages/overcloud_object': ensure => present}
+$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_object', hiera('step')])
+package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp
index 134dc43b..7c7da586 100644
--- a/puppet/manifests/overcloud_volume.pp
+++ b/puppet/manifests/overcloud_volume.pp
@@ -16,46 +16,9 @@
include ::tripleo::packages
include ::tripleo::firewall
-create_resources(kmod::load, hiera('kernel_modules'), {})
-create_resources(sysctl::value, hiera('sysctl_settings'), {})
-Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
-
-if count(hiera('ntp::servers')) > 0 {
- include ::ntp
-}
-
-include ::timezone
-
-include ::cinder
-include ::cinder::config
-include ::cinder::glance
-include ::cinder::volume
-include ::cinder::setup_test_volume
-
-$cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
-if $cinder_enable_iscsi {
- $cinder_iscsi_backend = 'tripleo_iscsi'
-
- cinder::backend::iscsi { $cinder_iscsi_backend :
- iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
- iscsi_helper => hiera('cinder_iscsi_helper'),
- }
-}
-
-$cinder_enabled_backends = any2array($cinder_iscsi_backend)
-class { '::cinder::backends' :
- enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
-}
-
-$snmpd_user = hiera('snmpd_readonly_user_name')
-snmp::snmpv3_user { $snmpd_user:
- authtype => 'MD5',
- authpass => hiera('snmpd_readonly_user_password'),
-}
-class { '::snmp':
- agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
+if hiera('step') >= 4 {
+ hiera_include('volume_classes')
}
-hiera_include('volume_classes')
-package_manifest{'/var/lib/tripleo/installed-packages/overcloud_volume': ensure => present}
+$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_volume', hiera('step')])
+package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/services/ceilometer-agent-central.yaml b/puppet/services/ceilometer-agent-central.yaml
new file mode 100644
index 00000000..294e7dd2
--- /dev/null
+++ b/puppet/services/ceilometer-agent-central.yaml
@@ -0,0 +1,43 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ceilometer Central Agent service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ RedisPassword:
+ description: The password for the redis service account.
+ type: string
+ hidden: true
+ RedisVirtualIPUri:
+ type: string
+ default: ''
+
+
+resources:
+ CeilometerServiceBase:
+ type: ./ceilometer-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Central Agent role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer::agent::central::coordination_url:
+ list_join:
+ - ''
+ - - 'redis://:'
+ - {get_param: RedisPassword}
+ - '@'
+ - {get_param: RedisVirtualIPUri}
+ - ':6379/'
+ step_config: |
+ include ::tripleo::profile::base::ceilometer::agent::central
diff --git a/puppet/services/ceilometer-agent-notification.yaml b/puppet/services/ceilometer-agent-notification.yaml
new file mode 100644
index 00000000..523dabb9
--- /dev/null
+++ b/puppet/services/ceilometer-agent-notification.yaml
@@ -0,0 +1,27 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ceilometer Notification Agent service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+
+resources:
+ CeilometerServiceBase:
+ type: ./ceilometer-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Notification Agent role.
+ value:
+ config_settings:
+ get_attr: [CeilometerServiceBase, role_data, config_settings]
+ step_config: |
+ include ::tripleo::profile::base::ceilometer::agent::notification
diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml
new file mode 100644
index 00000000..06c2ed12
--- /dev/null
+++ b/puppet/services/ceilometer-api.yaml
@@ -0,0 +1,27 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ceilometer API service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+
+resources:
+ CeilometerServiceBase:
+ type: ./ceilometer-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer API role.
+ value:
+ config_settings:
+ get_attr: [CeilometerServiceBase, role_data, config_settings]
+ step_config: |
+ include ::tripleo::profile::base::ceilometer::api
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
new file mode 100644
index 00000000..1dea785f
--- /dev/null
+++ b/puppet/services/ceilometer-base.yaml
@@ -0,0 +1,105 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ceilometer service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ CeilometerBackend:
+ default: 'mongodb'
+ description: The ceilometer backend type.
+ type: string
+ CeilometerMeteringSecret:
+ description: Secret shared by the ceilometer services.
+ type: string
+ hidden: true
+ CeilometerPassword:
+ description: The password for the ceilometer service account.
+ type: string
+ hidden: true
+ CeilometerMeterDispatcher:
+ default: 'gnocchi'
+ description: Dispatcher to process meter data
+ type: string
+ constraints:
+ - allowed_values: ['gnocchi', 'database']
+ CeilometerWorkers:
+ default: 0
+ description: Number of workers for Ceilometer service.
+ type: number
+ CeilometerStoreEvents:
+ default: false
+ description: Whether to store events in ceilometer.
+ type: boolean
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer role.
+ value:
+ config_settings:
+ ceilometer::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - - '://ceilometer:'
+ - {get_param: CeilometerPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/ceilometer'
+ ceilometer_backend: {get_param: CeilometerBackend}
+ ceilometer::metering_secret: {get_param: CeilometerMeteringSecret}
+ # we include db_sync class in puppet-tripleo
+ ceilometer::db::sync_db: false
+ ceilometer::api::keystone_password: {get_param: CeilometerPassword}
+ ceilometer::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ceilometer::api::keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
+ ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
+ ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents}
+ ceilometer::db::mysql::password: {get_param: CeilometerPassword}
+ ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
+ ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]}
+ ceilometer::dispatcher::gnocchi::filter_project: 'service'
+ ceilometer::dispatcher::gnocchi::archive_policy: 'low'
+ ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml'
+ ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
+ ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
+ ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
+ ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
+ ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
+ ceilometer::rabbit_userid: {get_param: RabbitUserName}
+ ceilometer::rabbit_password: {get_param: RabbitPassword}
+ ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ ceilometer::rabbit_port: {get_param: RabbitClientPort}
+ ceilometer::db::mysql::user: ceilometer
+ ceilometer::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]}
+ ceilometer::db::mysql::dbname: ceilometer
+ ceilometer::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/ceilometer-collector.yaml b/puppet/services/ceilometer-collector.yaml
new file mode 100644
index 00000000..29627210
--- /dev/null
+++ b/puppet/services/ceilometer-collector.yaml
@@ -0,0 +1,26 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ceilometer Collector service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+ CeilometerServiceBase:
+ type: ./ceilometer-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Collector role.
+ value:
+ config_settings:
+ get_attr: [CeilometerServiceBase, role_data, config_settings]
+ step_config: |
+ include ::tripleo::profile::base::ceilometer::collector
diff --git a/puppet/services/ceilometer-expirer.yaml b/puppet/services/ceilometer-expirer.yaml
new file mode 100644
index 00000000..796abe1f
--- /dev/null
+++ b/puppet/services/ceilometer-expirer.yaml
@@ -0,0 +1,27 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ceilometer Expirer service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+
+resources:
+ CeilometerServiceBase:
+ type: ./ceilometer-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Expirer role.
+ value:
+ config_settings:
+ get_attr: [CeilometerServiceBase, role_data, config_settings]
+ step_config: |
+ include ::tripleo::profile::base::ceilometer::expirer
diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml
new file mode 100644
index 00000000..065901b8
--- /dev/null
+++ b/puppet/services/ceph-base.yaml
@@ -0,0 +1,94 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Ceph base service. Shared by all Ceph services.
+
+parameters:
+ CephAdminKey:
+ default: ''
+ description: The Ceph admin client key. Can be created with ceph-authtool --gen-print-key.
+ type: string
+ hidden: true
+ CephClientKey:
+ default: ''
+ description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
+ type: string
+ hidden: true
+ CephClientUserName:
+ default: openstack
+ type: string
+ CephClusterFSID:
+ default: ''
+ type: string
+ description: The Ceph cluster FSID. Must be a UUID.
+ CephIPv6:
+ default: False
+ type: boolean
+ CinderRbdPoolName:
+ default: volumes
+ type: string
+ CinderBackupRbdPoolName:
+ default: backups
+ type: string
+ GlanceRbdPoolName:
+ default: images
+ type: string
+ GnocchiRbdPoolName:
+ default: metrics
+ type: string
+ NovaRbdPoolName:
+ default: vms
+ type: string
+ # DEPRECATED options for compatibility with overcloud.yaml
+ # This should be removed and manipulation of the ControllerServices list
+ # used instead, but we need client support for that first
+ ControllerEnableCephStorage:
+ default: false
+ description: Whether to deploy Ceph Storage (OSD) on the Controller
+ type: boolean
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - ControllerEnableCephStorage
+
+outputs:
+ role_data:
+ description: Role data for the Ceph base service.
+ value:
+ config_settings:
+ tripleo::profile::base::ceph::ceph_ipv6: {get_param: CephIPv6}
+ tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage}
+ ceph::profile::params::fsid: {get_param: CephClusterFSID}
+ ceph::profile::params::client_keys:
+ str_replace:
+ template: "{
+ client.admin: {
+ secret: 'ADMIN_KEY',
+ mode: '0600',
+ cap_mon: 'allow *',
+ cap_osd: 'allow *',
+ cap_mds: 'allow *'
+ },
+ client.bootstrap-osd: {
+ secret: 'ADMIN_KEY',
+ keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring',
+ cap_mon: 'allow profile bootstrap-osd'
+ },
+ client.CLIENT_USER: {
+ secret: 'CLIENT_KEY',
+ mode: '0644',
+ cap_mon: 'allow r',
+ cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
+ }
+ }"
+ params:
+ CLIENT_USER: {get_param: CephClientUserName}
+ CLIENT_KEY: {get_param: CephClientKey}
+ ADMIN_KEY: {get_param: CephAdminKey}
+ NOVA_POOL: {get_param: NovaRbdPoolName}
+ CINDER_POOL: {get_param: CinderRbdPoolName}
+ CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
+ GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
diff --git a/puppet/services/ceph-client.yaml b/puppet/services/ceph-client.yaml
new file mode 100644
index 00000000..ca920a5f
--- /dev/null
+++ b/puppet/services/ceph-client.yaml
@@ -0,0 +1,24 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Ceph Client service.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+ CephBase:
+ type: ./ceph-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Cinder OSD service.
+ value:
+ config_settings:
+ get_attr: [CephBase, role_data, config_settings]
+ step_config: |
+ include ::tripleo::profile::base::ceph::client
diff --git a/puppet/services/ceph-external.yaml b/puppet/services/ceph-external.yaml
new file mode 100644
index 00000000..4522f416
--- /dev/null
+++ b/puppet/services/ceph-external.yaml
@@ -0,0 +1,65 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Ceph External service.
+
+parameters:
+ CephClientKey:
+ default: ''
+ description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
+ type: string
+ hidden: true
+ CephClientUserName:
+ default: openstack
+ type: string
+ CephClusterFSID:
+ default: ''
+ type: string
+ description: The Ceph cluster FSID. Must be a UUID.
+ CephExternalMonHost:
+ default: ''
+ type: string
+ description: List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments.
+ CinderRbdPoolName:
+ default: volumes
+ type: string
+ CinderBackupRbdPoolName:
+ default: backups
+ type: string
+ GlanceRbdPoolName:
+ default: images
+ type: string
+ GnocchiRbdPoolName:
+ default: metrics
+ type: string
+ NovaRbdPoolName:
+ default: vms
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the Ceph External service.
+ value:
+ config_settings:
+ tripleo::profile::base::ceph::ceph_mon_host: {get_param: CephExternalMonHost}
+ ceph::profile::params::fsid: {get_param: CephClusterFSID}
+ ceph::profile::params::client_keys:
+ str_replace:
+ template: "{
+ client.CLIENT_USER: {
+ secret: 'CLIENT_KEY',
+ mode: '0644',
+ cap_mon: 'allow r',
+ cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
+ }
+ }"
+ params:
+ CLIENT_USER: {get_param: CephClientUserName}
+ CLIENT_KEY: {get_param: CephClientKey}
+ NOVA_POOL: {get_param: NovaRbdPoolName}
+ CINDER_POOL: {get_param: CinderRbdPoolName}
+ CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
+ GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ step_config: |
+ include ::tripleo::profile::base::ceph::client
diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml
new file mode 100644
index 00000000..d6e3aa70
--- /dev/null
+++ b/puppet/services/ceph-mon.yaml
@@ -0,0 +1,56 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Ceph Monitor service.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ CephIPv6:
+ default: False
+ type: boolean
+ CephMonKey:
+ default: ''
+ description: The Ceph monitors key. Can be created with ceph-authtool --gen-print-key.
+ type: string
+ hidden: true
+ CinderRbdPoolName:
+ default: volumes
+ type: string
+ CinderBackupRbdPoolName:
+ default: backups
+ type: string
+ GlanceRbdPoolName:
+ default: images
+ type: string
+ GnocchiRbdPoolName:
+ default: metrics
+ type: string
+ NovaRbdPoolName:
+ default: vms
+ type: string
+
+resources:
+ CephBase:
+ type: ./ceph-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Ceph Monitor service.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [CephBase, role_data, config_settings]
+ - ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
+ ceph::profile::params::mon_key: {get_param: CephMonKey}
+ tripleo::profile::base::ceph::mon::ceph_pools:
+ - {get_param: CinderRbdPoolName}
+ - {get_param: CinderBackupRbdPoolName}
+ - {get_param: NovaRbdPoolName}
+ - {get_param: GlanceRbdPoolName}
+ - {get_param: GnocchiRbdPoolName}
+ step_config: |
+ include ::tripleo::profile::base::ceph::mon
diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml
new file mode 100644
index 00000000..24f60283
--- /dev/null
+++ b/puppet/services/ceph-osd.yaml
@@ -0,0 +1,24 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Ceph OSD service.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+ CephBase:
+ type: ./ceph-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Cinder OSD service.
+ value:
+ config_settings:
+ get_attr: [CephBase, role_data, config_settings]
+ step_config: |
+ include ::tripleo::profile::base::ceph::osd
diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml
index 85682448..f6d2b645 100644
--- a/puppet/services/cinder-base.yaml
+++ b/puppet/services/cinder-base.yaml
@@ -56,3 +56,9 @@ outputs:
cinder::rabbit_userid: {get_param: RabbitUserName}
cinder::rabbit_password: {get_param: RabbitPassword}
cinder::rabbit_port: {get_param: RabbitClientPort}
+ cinder::db::mysql::user: cinder
+ cinder::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]}
+ cinder::db::mysql::dbname: cinder
+ cinder::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml
index d28f40e6..41f3827d 100644
--- a/puppet/services/cinder-volume.yaml
+++ b/puppet/services/cinder-volume.yaml
@@ -36,6 +36,12 @@ parameters:
NFS servers used by Cinder NFS backend. Effective when
CinderEnableNfsBackend is true.
type: comma_delimited_list
+ CinderRbdPoolName:
+ default: volumes
+ type: string
+ CephClientUserName:
+ default: openstack
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
@@ -67,5 +73,7 @@ outputs:
SERVERS: {get_param: CinderNfsServers}
tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
+ tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
+ tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: {get_param: CephClientUserName}
step_config: |
include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index 89e6ee0f..f1f98a8e 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -9,6 +9,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ CephClientUserName:
+ default: openstack
+ type: string
Debug:
default: ''
description: Set to True to enable debugging on all services.
@@ -36,6 +39,9 @@ parameters:
default: 0
description: Number of workers for Glance service.
type: number
+ GlanceRbdPoolName:
+ default: images
+ type: string
RabbitPassword:
description: The password for RabbitMQ
type: string
@@ -85,6 +91,8 @@ outputs:
glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
glance::backend::swift::swift_store_user: service:glance
glance::backend::swift::swift_store_key: {get_param: GlancePassword}
+ glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+ glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
glance_backend: {get_param: GlanceBackend}
glance::db::mysql::password: {get_param: GlancePassword}
glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml
index 6f2f0372..d71157f9 100644
--- a/puppet/services/glance-registry.yaml
+++ b/puppet/services/glance-registry.yaml
@@ -41,5 +41,12 @@ outputs:
glance::registry::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
glance::registry::debug: {get_param: Debug}
glance::registry::workers: {get_param: GlanceWorkers}
+ glance::db::mysql::user: glance
+ glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]}
+ glance::db::mysql::dbname: glance
+ glance::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+
step_config: |
include ::tripleo::profile::base::glance::registry
diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml
index 4a5ec2c0..77af55ef 100644
--- a/puppet/services/heat-engine.yaml
+++ b/puppet/services/heat-engine.yaml
@@ -54,5 +54,11 @@ outputs:
heat::keystone_password: {get_param: HeatPassword}
heat::db::mysql::password: {get_param: HeatPassword}
heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
+ heat::db::mysql::user: heat
+ heat::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]}
+ heat::db::mysql::dbname: heat
+ heat::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
step_config: |
include ::tripleo::profile::base::heat::engine
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
new file mode 100644
index 00000000..01cf5791
--- /dev/null
+++ b/puppet/services/horizon.yaml
@@ -0,0 +1,34 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Horizon service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ HorizonAllowedHosts:
+ default: '*'
+ description: A list of IP/Hostname allowed to connect to horizon
+ type: comma_delimited_list
+ NeutronMechanismDrivers:
+ default: 'openvswitch'
+ description: |
+ The mechanism drivers for the Neutron tenant network.
+ type: comma_delimited_list
+
+outputs:
+ role_data:
+ description: Role data for the Horizon role.
+ value:
+ config_settings:
+ horizon::allowed_hosts: {get_param: HorizonAllowedHosts}
+ neutron::plugins::ml2::mechanism_drivers:
+ str_replace:
+ template: MECHANISMS
+ params:
+ MECHANISMS: {get_param: NeutronMechanismDrivers}
+ step_config: |
+ include ::tripleo::profile::base::horizon
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
new file mode 100644
index 00000000..5ab03fcb
--- /dev/null
+++ b/puppet/services/ironic-api.yaml
@@ -0,0 +1,42 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ironic API configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ IronicPassword:
+ description: The password for the Ironic service and db account, used by the Ironic services
+ type: string
+ hidden: true
+
+resources:
+ IronicBase:
+ type: ./ironic-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ironic API role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [IronicBase, role_data, config_settings]
+ # NOTE(dtantsur): the my_ip parameter is heavily overloaded in
+ # ironic. It's used as a default value for e.g. TFTP server IP,
+ # glance and neutron endpoints, virtual console IP. We override
+ # the TFTP server IP in ironic-conductor.yaml as it should not be
+ # the VIP, but rather a real IP of the controller.
+ - ironic::my_ip: {get_param: [EndpointMap, MysqlInternal, host]}
+ ironic::api::admin_password: {get_param: IronicPassword}
+ ironic::keystone::auth::public_url: {get_param: [EndpointMap, IronicPublic, uri]}
+ ironic::keystone::auth::internal_url: {get_param: [EndpointMap, IronicInternal, uri]}
+ ironic::keystone::auth::admin_url: {get_param: [EndpointMap, IronicAdmin, uri]}
+ ironic::keystone::auth::password: {get_param: IronicPassword }
+ step_config: |
+ include ::tripleo::profile::base::ironic::api
diff --git a/puppet/services/ironic-base.yaml b/puppet/services/ironic-base.yaml
new file mode 100644
index 00000000..df82bb6c
--- /dev/null
+++ b/puppet/services/ironic-base.yaml
@@ -0,0 +1,69 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ironic services configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ IronicPassword:
+ description: The password for the Ironic service and db account, used by the Ironic services
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the Ironic role.
+ value:
+ config_settings:
+ ironic::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://ironic:'
+ - {get_param: IronicPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/ironic'
+ ironic::admin_tenant_name: 'service'
+ ironic::debug: {get_param: Debug}
+ ironic::rabbit_userid: {get_param: RabbitUserName}
+ ironic::rabbit_password: {get_param: RabbitPassword}
+ ironic::rabbit_port: {get_param: RabbitClientPort}
+ ironic::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ ironic::db::mysql::password: {get_param: IronicPassword}
+ ironic::db::mysql::user: ironic
+ ironic::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]}
+ ironic::db::mysql::dbname: ironic
+ ironic::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+
+ ironic::keystone::auth::tenant: 'service'
+ step_config: |
+ include ::tripleo::profile::base::ironic
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
new file mode 100644
index 00000000..26d4e0ed
--- /dev/null
+++ b/puppet/services/ironic-conductor.yaml
@@ -0,0 +1,35 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ironic conductor configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ IronicEnabledDrivers:
+ default: ['pxe_ipmitool', 'agent_ipmitool']
+ description: Enabled Ironic drivers
+ type: comma_delimited_list
+
+resources:
+ IronicBase:
+ type: ./ironic-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ironic conductor role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [IronicBase, role_data, config_settings]
+ - ironic::enabled_drivers: {get_param: IronicEnabledDrivers}
+ # Prevent tftp_server from defaulting to my_ip setting, which is
+ # controller VIP, not a real IP.
+ ironic::drivers::pxe::tftp_server: {get_input: ironic_api_network}
+ step_config: |
+ include ::tripleo::profile::base::ironic::conductor
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
new file mode 100644
index 00000000..b429c5ea
--- /dev/null
+++ b/puppet/services/kernel.yaml
@@ -0,0 +1,18 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Load kernel modules with kmod and configure kernel options with sysctl.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the Kernel modules
+ value:
+ step_config: |
+ include ::tripleo::profile::base::kernel
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index 25d92d4a..0ad6025c 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -128,5 +128,12 @@ outputs:
keystone::public_workers: {get_param: KeystoneWorkers}
keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]}
+ keystone::db::mysql::user: keystone
+ keystone::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]}
+ keystone::db::mysql::dbname: keystone
+ keystone::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+
step_config: |
include ::tripleo::profile::base::keystone
diff --git a/puppet/services/neutron-compute-plugin-midonet.yaml b/puppet/services/neutron-compute-plugin-midonet.yaml
new file mode 100644
index 00000000..c3b65c49
--- /dev/null
+++ b/puppet/services/neutron-compute-plugin-midonet.yaml
@@ -0,0 +1,19 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Compute Midonet plugin
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Compute Plumgrid plugin
+ value:
+ config_settings:
+ step_config: |
+ include ::tripleo::profile::base::neutron::agents::midonet
diff --git a/puppet/services/neutron-compute-plugin-nuage.yaml b/puppet/services/neutron-compute-plugin-nuage.yaml
new file mode 100644
index 00000000..c5fbeeca
--- /dev/null
+++ b/puppet/services/neutron-compute-plugin-nuage.yaml
@@ -0,0 +1,26 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Compute Nuage plugin
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NovaPassword:
+ description: The password for the nova service account, used by nova-api.
+ type: string
+ hidden: true
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Compute Nuage plugin
+ value:
+ config_settings:
+ tripleo::profile::base::neutron::agents::nuage::nova_os_tenant_name: 'service'
+ tripleo::profile::base::neutron::agents::nuage::nova_os_password: {get_param: NovaPassword}
+ tripleo::profile::base::neutron::agents::nuage::nova_auth_ip: {get_param: [EndpointMap, KeystoneInternal, host]}
+ step_config: |
+ include ::tripleo::profile::base::neutron::agents::nuage
diff --git a/puppet/services/neutron-compute-plugin-opencontrail.yaml b/puppet/services/neutron-compute-plugin-opencontrail.yaml
new file mode 100644
index 00000000..2c79c56b
--- /dev/null
+++ b/puppet/services/neutron-compute-plugin-opencontrail.yaml
@@ -0,0 +1,19 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Compute OpenContrail plugin
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Compute OpenContrail plugin
+ value:
+ config_settings:
+ step_config: |
+ include ::tripleo::profile::base::neutron::opencontrail::vrouter
diff --git a/puppet/services/neutron-compute-plugin-plumgrid.yaml b/puppet/services/neutron-compute-plugin-plumgrid.yaml
new file mode 100644
index 00000000..b8ec389e
--- /dev/null
+++ b/puppet/services/neutron-compute-plugin-plumgrid.yaml
@@ -0,0 +1,19 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Compute Plumgrid plugin
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Compute Plumgrid plugin
+ value:
+ config_settings:
+ step_config: |
+ include tripleo::profile::base::neutron::plumgrid
diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml
index 80ccf1c2..5d02bc90 100644
--- a/puppet/services/neutron-dhcp.yaml
+++ b/puppet/services/neutron-dhcp.yaml
@@ -13,22 +13,6 @@ parameters:
default: 'False'
description: If True, DHCP provide metadata route to VM.
type: string
- NeutronDnsmasqOptions:
- default: 'dhcp-option-force=26,%MTU%'
- description: >
- Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU
- to be set to the value of NeutronTenantMtu, which should be set to account
- for tunnel overhead.
- type: string
- NeutronTenantMtu:
- description: >
- The default MTU for tenant networks. For VXLAN/GRE tunneling, this should
- be at least 50 bytes smaller than the MTU on the physical network. This
- value will be used to set the MTU on the virtual Ethernet device.
- This value will be used to construct the NeutronDnsmasqOptions, since that
- will determine the MTU that is assigned to the VM host through DHCP.
- default: "1400"
- type: string
resources:
@@ -42,12 +26,6 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf
- tripleo::profile::base::neutron::dhcp:
- str_replace:
- template: {get_param: NeutronDnsmasqOptions}
- params:
- '%MTU%': {get_param: NeutronTenantMtu}
- neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
+ - neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
step_config: |
include tripleo::profile::base::neutron::dhcp
diff --git a/puppet/services/neutron-midonet.yaml b/puppet/services/neutron-midonet.yaml
new file mode 100644
index 00000000..736c01c3
--- /dev/null
+++ b/puppet/services/neutron-midonet.yaml
@@ -0,0 +1,48 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Midonet plugin and services
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronPassword:
+ description: The password for the neutron service and db account, used by neutron agents.
+ type: string
+ hidden: true
+ AdminPassword:
+ description: The password for the keystone admin account, used for monitoring, querying neutron etc.
+ type: string
+ hidden: true
+ AdminToken:
+ description: The keystone auth secret and db password.
+ type: string
+ hidden: true
+ EnableZookeeperOnController:
+ label: Enable Zookeeper On Controller
+ description: 'Whether enable Zookeeper cluster on Controller'
+ type: boolean
+ default: false
+ EnableCassandraOnController:
+ label: Enable Cassandra On Controller
+ description: 'Whether enable Cassandra cluster on Controller'
+ type: boolean
+ default: false
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Midonet plugin and services
+ value:
+ config_settings:
+ tripleo::profile::base::neutron::midonet::admin_password: {get_param: AdminPassword}
+ tripleo::profile::base::neutron::midonet::keystone_admin_token: {get_param: AdminToken}
+ tripleo::profile::base::neutron::midonet::neutron_auth_password: {get_param: NeutronPassword}
+ tripleo::profile::base::neutron::midonet::zk_on_controller: {get_param: EnableZookeeperOnController}
+ tripleo::profile::base::neutron::midonet::neutron_auth_tenant: 'service'
+ enable_cassandra_on_controller: {get_param: EnableCassandraOnController}
+ neutron::service_plugins: []
+ step_config: |
+ include tripleo::profile::base::neutron::plugins::midonet
diff --git a/puppet/services/neutron-plugin-ml2.yaml b/puppet/services/neutron-plugin-ml2.yaml
index 435a6de0..ff13d5d8 100644
--- a/puppet/services/neutron-plugin-ml2.yaml
+++ b/puppet/services/neutron-plugin-ml2.yaml
@@ -106,4 +106,4 @@ outputs:
TYPES: {get_param: NeutronNetworkType}
step_config: |
- include ::tripleo::profile::base::neutron::ml2
+ include ::tripleo::profile::base::neutron::plugins::ml2
diff --git a/puppet/services/neutron-plugin-nuage.yaml b/puppet/services/neutron-plugin-nuage.yaml
index db87f504..3c3d8b63 100644
--- a/puppet/services/neutron-plugin-nuage.yaml
+++ b/puppet/services/neutron-plugin-nuage.yaml
@@ -9,6 +9,46 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ # Config specific parameters, to be provided via parameter_defaults
+ NeutronNuageOSControllerIp:
+ description: IP address of the OpenStack Controller
+ type: string
+
+ NeutronNuageNetPartitionName:
+ description: Specifies the title that you will see on the VSD
+ type: string
+ default: 'default_name'
+
+ NeutronNuageVSDIp:
+ description: IP address and port of the Virtual Services Directory
+ type: string
+
+ NeutronNuageVSDUsername:
+ description: Username to be used to log into VSD
+ type: string
+
+ NeutronNuageVSDPassword:
+ description: Password to be used to log into VSD
+ type: string
+
+ NeutronNuageVSDOrganization:
+ description: Organization parameter required to log into VSD
+ type: string
+ default: 'organization'
+
+ NeutronNuageBaseURIVersion:
+ description: URI version to be used based on the VSD release
+ type: string
+ default: 'default_uri_version'
+
+ NeutronNuageCMSId:
+ description: Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD
+ type: string
+
+ UseForwardedFor:
+ description: Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.
+ type: boolean
+ default: false
resources:
@@ -22,5 +62,14 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
+ - neutron::plugins::nuage::nuage_oscontroller_ip: {get_param: NeutronNuageOSControllerIp}
+ neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName}
+ neutron::plugins::nuage::nuage_vsd_ip: {get_param: NeutronNuageVSDIp}
+ neutron::plugins::nuage::nuage_vsd_username: {get_param: NeutronNuageVSDUsername}
+ neutron::plugins::nuage::nuage_vsd_password: {get_param: NeutronNuageVSDPassword}
+ neutron::plugins::nuage::nuage_vsd_organization: {get_param: NeutronNuageVSDOrganization}
+ neutron::plugins::nuage::nuage_base_uri_version: {get_param: NeutronNuageBaseURIVersion}
+ neutron::plugins::nuage::nuage_cms_id: {get_param: NeutronNuageCMSId}
+ nova::api::use_forwarded_for: {get_param: UseForwardedFor}
step_config: |
include tripleo::profile::base::neutron::plugins::nuage
diff --git a/puppet/services/neutron-plugin-opencontrail.yaml b/puppet/services/neutron-plugin-opencontrail.yaml
new file mode 100644
index 00000000..9c58c03c
--- /dev/null
+++ b/puppet/services/neutron-plugin-opencontrail.yaml
@@ -0,0 +1,60 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Opencontrail plugin
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ AdminPassword:
+ description: The password for the keystone admin account, used for monitoring, querying neutron etc.
+ type: string
+ hidden: true
+ AdminToken:
+ description: The keystone auth secret and db password.
+ type: string
+ hidden: true
+ ContrailApiServerIp:
+ description: IP address of the OpenContrail API server
+ type: string
+ ContrailApiServerPort:
+ description: Port of the OpenContrail API
+ type: string
+ default: 8082
+ ContrailMultiTenancy:
+ description: Whether to enable multi tenancy
+ type: boolean
+ default: false
+ ContrailExtensions:
+ description: List of OpenContrail extensions to be enabled
+ type: comma_delimited_list
+ default: ''
+
+resources:
+
+ NeutronBase:
+ type: ./neutron-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Opencontrail plugin
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions
+
+ neutron::plugins::opencontrail::api_server_ip: {get_param: ContrailApiServerIp}
+ neutron::plugins::opencontrail::api_server_port: {get_param: ContrailApiServerPort}
+ neutron::plugins::opencontrail::multi_tenancy: {get_param: ContrailMultiTenancy}
+ neutron::plugins::opencontrail::contrail_extensions: {get_param: ContrailExtensions}
+ neutron::plugins::opencontrail::keystone_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ neutron::plugins::opencontrail::keystone_admin_user: admin
+ neutron::plugins::opencontrail::keystone_admin_tenant_name: admin
+ neutron::plugins::opencontrail::keystone_admin_password: {get_param: AdminPassword}
+ neutron::plugins::opencontrail::keystone_admin_token: {get_param: AdminToken}
+ step_config: |
+ include tripleo::profile::base::neutron::plugins::opencontrail
diff --git a/puppet/services/neutron-server.yaml b/puppet/services/neutron-server.yaml
index 6299c39e..d759d420 100644
--- a/puppet/services/neutron-server.yaml
+++ b/puppet/services/neutron-server.yaml
@@ -42,7 +42,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- neutron_dsn: &neutron_dsn
+ neutron::server::database_connection:
list_join:
- ''
- - {get_param: [EndpointMap, MysqlInternal, protocol]}
@@ -54,7 +54,6 @@ outputs:
neutron::server::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
neutron::server::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
neutron::server::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
- neutron::server::database_connection: *neutron_dsn
neutron::server::api_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron::server::l3_ha: {get_param: NeutronL3HA}
@@ -66,5 +65,11 @@ outputs:
neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_param: NovaPassword}
neutron::db::mysql::password: {get_param: NeutronPassword}
+ neutron::db::mysql::user: neutron
+ neutron::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]}
+ neutron::db::mysql::dbname: ovs_neutron
+ neutron::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
step_config: |
include tripleo::profile::base::neutron::server
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index 0844aa85..679586f7 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -16,10 +16,18 @@ resources:
outputs:
role_data:
- description: Role data for the Nova Conductor service.
+ description: Role data for the Nova Compute service.
value:
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
+ - nova::compute::libvirt::manage_libvirt_services: false
+ # we manage migration in nova common puppet profile
+ nova::compute::libvirt::migration_support: false
+ tripleo::profile::base::nova::manage_migration: true
+ tripleo::profile::base::nova::nova_compute_enabled: true
step_config: |
- include tripleo::profile::base::nova::compute
+ # TODO(emilien): figure how to deal with libvirt profile.
+ # We'll probably threat it like we do with Neutron plugins.
+ # Until then, just include it in the default nova-compute role.
+ include tripleo::profile::base::nova::compute::libvirt
diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml
new file mode 100644
index 00000000..e3309c32
--- /dev/null
+++ b/puppet/services/nova-libvirt.yaml
@@ -0,0 +1,31 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Libvirt service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+ NovaBase:
+ type: ./nova-base.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Libvirt service.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NovaBase, role_data, config_settings]
+ # we include ::nova::compute::libvirt::services in nova/libvirt profile
+ - nova::compute::libvirt::manage_libvirt_services: false
+ # we manage migration in nova common puppet profile
+ nova::compute::libvirt::migration_support: false
+ tripleo::profile::base::nova::manage_migration: true
+ tripleo::profile::base::nova::libvirt_enabled: true
+ step_config: |
+ include tripleo::profile::base::nova::libvirt
diff --git a/puppet/services/pacemaker/ceilometer-agent-central.yaml b/puppet/services/pacemaker/ceilometer-agent-central.yaml
new file mode 100644
index 00000000..8fb7bd23
--- /dev/null
+++ b/puppet/services/pacemaker/ceilometer-agent-central.yaml
@@ -0,0 +1,29 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ceilometer Central Agent service with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+ CeilometerServiceBase:
+ type: ../ceilometer-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Central Agent pacemaker role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer::agent::central::manage_service: false
+ ceilometer::agent::central::enabled: false
+ step_config: |
+ include ::tripleo::profile::pacemaker::ceilometer::agent::central
diff --git a/puppet/services/pacemaker/ceilometer-agent-notification.yaml b/puppet/services/pacemaker/ceilometer-agent-notification.yaml
new file mode 100644
index 00000000..54709783
--- /dev/null
+++ b/puppet/services/pacemaker/ceilometer-agent-notification.yaml
@@ -0,0 +1,29 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ceilometer Notification Agent service with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+ CeilometerServiceBase:
+ type: ../ceilometer-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Notification Agent pacemaker role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer::agent::notification::manage_service: false
+ ceilometer::agent::notification::enabled: false
+ step_config: |
+ include ::tripleo::profile::pacemaker::ceilometer::agent::notification
diff --git a/puppet/services/pacemaker/ceilometer-api.yaml b/puppet/services/pacemaker/ceilometer-api.yaml
new file mode 100644
index 00000000..d45b1578
--- /dev/null
+++ b/puppet/services/pacemaker/ceilometer-api.yaml
@@ -0,0 +1,29 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ceilometer API service with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+ CeilometerServiceBase:
+ type: ../ceilometer-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer API pacemaker role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer::api::manage_service: false
+ ceilometer::api::enabled: false
+ step_config: |
+ include ::tripleo::profile::pacemaker::ceilometer::api
diff --git a/puppet/services/pacemaker/ceilometer-collector.yaml b/puppet/services/pacemaker/ceilometer-collector.yaml
new file mode 100644
index 00000000..487a557c
--- /dev/null
+++ b/puppet/services/pacemaker/ceilometer-collector.yaml
@@ -0,0 +1,29 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Ceilometer Collector service with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+ CeilometerServiceBase:
+ type: ../ceilometer-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Collector pacemaker role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer::collector::manage_service: false
+ ceilometer::collector::enabled: false
+ step_config: |
+ include ::tripleo::profile::pacemaker::ceilometer::collector
diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml
index 5833c42d..780c295e 100644
--- a/puppet/services/pacemaker/heat-api-cfn.yaml
+++ b/puppet/services/pacemaker/heat-api-cfn.yaml
@@ -25,7 +25,5 @@ outputs:
- get_attr: [HeatApiCfnBase, role_data, config_settings]
- heat::api_cfn::manage_service: false
heat::api_cfn::enabled: false
- step_config:
- # No puppet manifests since heat-api-cfn is included in
- # ::tripleo::profile::pacemaker::heat which is maintained alongside of
- # pacemaker/heat-api.yaml.
+ step_config: |
+ include ::tripleo::profile::pacemaker::heat::api_cfn
diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
index 8b67702c..2fa82fe7 100644
--- a/puppet/services/pacemaker/heat-api-cloudwatch.yaml
+++ b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
@@ -25,7 +25,5 @@ outputs:
- get_attr: [HeatApiCloudwatchBase, role_data, config_settings]
- heat::api_cloudwatch::manage_service: false
heat::api_cloudwatch::enabled: false
- step_config:
- # No puppet manifests since heat-api-cloudwatch is included in
- # ::tripleo::profile::pacemaker::heat which is maintained alongside of
- # pacemaker/heat-api.yaml.
+ step_config: |
+ include ::tripleo::profile::pacemaker::heat::api_cloudwatch
diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml
index 6628e8dd..be897a55 100644
--- a/puppet/services/pacemaker/heat-api.yaml
+++ b/puppet/services/pacemaker/heat-api.yaml
@@ -26,4 +26,4 @@ outputs:
- heat::api::manage_service: false
heat::api::enabled: false
step_config: |
- include ::tripleo::profile::pacemaker::heat
+ include ::tripleo::profile::pacemaker::heat::api
diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml
index e1195780..a8ed5c0c 100644
--- a/puppet/services/pacemaker/heat-engine.yaml
+++ b/puppet/services/pacemaker/heat-engine.yaml
@@ -26,7 +26,5 @@ outputs:
- get_attr: [HeatEngineBase, role_data, config_settings]
- heat::engine::manage_service: false
heat::engine::enabled: false
- step_config:
- # No puppet manifests since heat-engine is included in
- # ::tripleo::profile::pacemaker::heat which is maintained alongside of
- # pacemaker/heat-api.yaml.
+ step_config: |
+ include ::tripleo::profile::pacemaker::heat::engine
diff --git a/puppet/services/pacemaker/neutron-midonet.yaml b/puppet/services/pacemaker/neutron-midonet.yaml
new file mode 100644
index 00000000..f9fd992c
--- /dev/null
+++ b/puppet/services/pacemaker/neutron-midonet.yaml
@@ -0,0 +1,28 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Midonet with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ NeutronMidonetBase:
+ type: ../neutron-midonet.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Midonet plugin.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMidonetBase, role_data, config_settings]
+ step_config: |
+ include ::tripleo::profile::pacemaker::neutron::plugins::midonet
diff --git a/puppet/services/pacemaker/neutron-plugin-ml2.yaml b/puppet/services/pacemaker/neutron-plugin-ml2.yaml
index ac9d2402..9091b5b9 100644
--- a/puppet/services/pacemaker/neutron-plugin-ml2.yaml
+++ b/puppet/services/pacemaker/neutron-plugin-ml2.yaml
@@ -25,4 +25,4 @@ outputs:
- neutron::agents::ml2::ovs::enabled: false
neutron::agents::ml2::ovs::manage_service: false
step_config: |
- include ::tripleo::profile::pacemaker::neutron::ml2
+ include ::tripleo::profile::pacemaker::neutron::plugins::ml2
diff --git a/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml b/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml
new file mode 100644
index 00000000..d8c75509
--- /dev/null
+++ b/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml
@@ -0,0 +1,28 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron OpenContrail Plugin with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ NeutronPluginOpenContrail:
+ type: ../neutron-plugin-nuage.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron OpenContrail plugin.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronPluginOpenContrail, role_data, config_settings]
+ step_config: |
+ include ::tripleo::profile::pacemaker::neutron::plugins::opencontrail
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index 581b4ba4..3688c4a8 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -18,7 +18,7 @@ parameters:
type: string
hidden: true
RabbitFDLimit:
- default: 16384
+ default: 65536
description: Configures RabbitMQ FD limit
type: string
RabbitIPv6:
diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml
index 17ef49fa..f0411a35 100644
--- a/puppet/services/sahara-engine.yaml
+++ b/puppet/services/sahara-engine.yaml
@@ -37,5 +37,11 @@ outputs:
- '/sahara'
sahara::database_connection: *sahara_dsn
sahara::db::mysql::password: {get_param: SaharaPassword}
+ sahara::db::mysql::user: sahara
+ sahara::db::mysql::host: {get_param: [EndpointMap, MysqlVirtual, host]}
+ sahara::db::mysql::dbname: sahara
+ sahara::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
step_config: |
include ::tripleo::profile::base::sahara::engine
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
index 7ed880fc..91f0e049 100644
--- a/puppet/services/services.yaml
+++ b/puppet/services/services.yaml
@@ -27,9 +27,8 @@ resources:
EndpointMap: {get_param: EndpointMap}
outputs:
- config_settings:
- description: Configuration settings.
- value: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
- step_config:
- description: Step configuration.
- value: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
+ role_data:
+ description: Combined Role data for this set of services.
+ value:
+ config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
+ step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
diff --git a/puppet/services/snmp.yaml b/puppet/services/snmp.yaml
new file mode 100644
index 00000000..24ee2933
--- /dev/null
+++ b/puppet/services/snmp.yaml
@@ -0,0 +1,31 @@
+heat_template_version: 2016-04-08
+
+description: >
+ SNMP client configured with Puppet, to facilitate Ceilometer Hardware
+ monitoring in the undercloud. This service is required to enable hardware
+ monitoring.
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ SnmpdReadonlyUserName:
+ default: ro_snmp_user
+ description: The user name for SNMPd with readonly rights running on all Overcloud nodes
+ type: string
+ SnmpdReadonlyUserPassword:
+ description: The user password for SNMPd with readonly rights running on all Overcloud nodes
+ type: string
+ hidden: true
+
+outputs:
+ role_data:
+ description: Role data for the SNMP services
+ value:
+ config_settings:
+ snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
+ snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
+ step_config: |
+ include ::tripleo::profile::base::snmp
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index a86aeaf5..930b9e3d 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -17,6 +17,10 @@ parameters:
description: The password for the swift service account, used by the swift proxy services.
type: string
hidden: true
+ SwiftProxyNodeTimeout:
+ default: 60
+ description: Timeout for requests going from swift-proxy to swift a/c/o services.
+ type: number
SwiftWorkers:
default: 0
description: Number of workers for Swift service.
@@ -36,6 +40,7 @@ outputs:
swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
swift::proxy::authtoken::admin_password: {get_param: SwiftPassword}
+ swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]}
swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]}
diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml
new file mode 100644
index 00000000..980c95f5
--- /dev/null
+++ b/puppet/services/swift-storage.yaml
@@ -0,0 +1,44 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Swift Storage service configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ SwiftMountCheck:
+ default: false
+ description: Value of mount_check in Swift account/container/object -server.conf
+ type: boolean
+
+ # DEPRECATED options for compatibility with overcloud.yaml
+ # This should be removed and manipulation of the ControllerServices list
+ # used instead, but we need client support for that first
+ ControllerEnableSwiftStorage:
+ default: true
+ description: Whether to enable Swift Storage on the Controller
+ type: boolean
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - ControllerEnableSwiftStorage
+
+outputs:
+ role_data:
+ description: Role data for the Swift Proxy role.
+ value:
+ config_settings:
+ # Swift
+ swift::storage::all::mount_check: {get_param: SwiftMountCheck}
+ tripleo::profile::base::swift::storage::enable_swift_storage: {get_param: ControllerEnableSwiftStorage}
+ step_config: |
+ include ::tripleo::profile::base::swift::storage
diff --git a/puppet/services/time/ntp.yaml b/puppet/services/time/ntp.yaml
index dbef6f91..930dca41 100644
--- a/puppet/services/time/ntp.yaml
+++ b/puppet/services/time/ntp.yaml
@@ -12,7 +12,7 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- NtpServers:
+ NtpServer:
default: []
description: NTP servers
type: comma_delimited_list
@@ -22,6 +22,6 @@ outputs:
description: Role ntp using composable services.
value:
config_settings:
- ntp::ntpservers: {get_param: NtpServers}
+ ntp::ntpservers: {get_param: NtpServer}
step_config: |
- include ::ntp \ No newline at end of file
+ include ::ntp
diff --git a/puppet/services/time/timezone.yaml b/puppet/services/time/timezone.yaml
new file mode 100644
index 00000000..13fda986
--- /dev/null
+++ b/puppet/services/time/timezone.yaml
@@ -0,0 +1,24 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Composable Timezone service
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ TimeZone:
+ default: 'UTC'
+ description: The timezone to be set on the overcloud.
+ type: string
+
+outputs:
+ role_data:
+ description: Timezone role using composable services.
+ value:
+ config_settings:
+ timezone::timezone: {get_param: TimeZone}
+ step_config: |
+ include ::timezone
diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml
index 1aba2bb4..1c36a047 100644
--- a/puppet/swift-storage-post.yaml
+++ b/puppet/swift-storage-post.yaml
@@ -8,13 +8,12 @@ parameters:
type: boolean
servers:
type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
- StepConfig:
+ RoleData:
+ type: json
+ default: {}
+ DeployIdentifier:
type: string
- description: Config manifests that will be used to step through the deployment.
- default: ''
+ description: Value which changes if the node configuration may need to be re-applied
resources:
@@ -27,7 +26,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: StorageArtifactsConfig}
input_values:
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
StoragePuppetConfig:
type: OS::Heat::SoftwareConfig
@@ -46,7 +45,7 @@ resources:
- ''
- - get_file: manifests/overcloud_object.pp
- get_file: manifests/ringbuilder.pp
- - {get_param: StepConfig}
+ - {get_param: [RoleData, step_config]}
StorageRingbuilderDeployment_Step2:
type: OS::Heat::StructuredDeployments
@@ -57,7 +56,7 @@ resources:
config: {get_resource: StoragePuppetConfig}
input_values:
step: 2
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
StorageRingbuilderDeployment_Step3:
type: OS::Heat::StructuredDeployments
@@ -68,7 +67,7 @@ resources:
config: {get_resource: StoragePuppetConfig}
input_values:
step: 3
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
StorageDeployment_Step4:
type: OS::Heat::StructuredDeployments
@@ -79,7 +78,7 @@ resources:
config: {get_resource: StoragePuppetConfig}
input_values:
step: 4
- update_identifier: {get_param: NodeConfigIdentifiers}
+ update_identifier: {get_param: DeployIdentifier}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index 30f7657b..9d049bd3 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -18,10 +18,6 @@ parameters:
default: default
description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
- MountCheck:
- default: 'false'
- description: Value of mount_check in Swift account/container/object -server.conf
- type: boolean
MinPartHours:
type: number
default: 1
@@ -61,10 +57,6 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
- TimeZone:
- default: 'UTC'
- description: The timezone to be set on Ceph nodes.
- type: string
Hostname:
type: string
default: '' # Defaults to Heat created hostname
@@ -120,11 +112,18 @@ parameters:
ServiceConfigSettings:
type: json
default: {}
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
resources:
SwiftStorage:
type: OS::Nova::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
properties:
image: {get_param: Image}
flavor: {get_param: Flavor}
@@ -286,9 +285,7 @@ resources:
tripleo::ringbuilder::part_power: { get_input: swift_part_power }
tripleo::ringbuilder::replicas: {get_input: swift_replicas }
swift::storage::all::storage_local_net_ip: {get_input: swift_management_network}
- swift_mount_check: {get_input: swift_mount_check }
tripleo::ringbuilder::min_part_hours: { get_input: swift_min_part_hours }
- timezone::timezone: {get_input: timezone}
snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
tripleo::packages::enable_install: {get_input: enable_package_install}
@@ -307,12 +304,10 @@ resources:
snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
swift_hash_suffix: {get_param: HashSuffix}
- swift_mount_check: {get_param: MountCheck}
swift_min_part_hours: {get_param: MinPartHours}
swift_ring_build: {get_param: RingBuild}
swift_part_power: {get_param: PartPower}
swift_replicas: { get_param: Replicas}
- timezone: {get_param: TimeZone}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
@@ -345,6 +340,12 @@ resources:
get_param: UpdateIdentifier
outputs:
+ ip_address:
+ description: IP address of the server in the ctlplane network
+ value: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ hostname:
+ description: Hostname of the server
+ value: {get_attr: [SwiftStorage, name]}
hosts_entry:
value:
str_replace:
@@ -425,11 +426,3 @@ outputs:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value:
- list_join:
- - ','
- - - {get_attr: [SwiftStorageHieraDeploy, deploy_stdout]}
- - {get_attr: [NodeTLSCAData, deploy_stdout]}
- - {get_param: UpdateIdentifier}
diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml
index 92234b6c..51129053 100644
--- a/puppet/vip-config.yaml
+++ b/puppet/vip-config.yaml
@@ -37,6 +37,7 @@ resources:
internal_api_virtual_ip: {get_input: internal_api_virtual_ip}
storage_virtual_ip: {get_input: storage_virtual_ip}
storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip}
+ ironic_api_vip: {get_input: ironic_api_vip}
# public_virtual_ip and controller_virtual_ip are needed in
# both HAproxy & keepalived.
tripleo::haproxy::public_virtual_ip: {get_input: public_virtual_ip}