diff options
Diffstat (limited to 'puppet')
136 files changed, 4360 insertions, 3878 deletions
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index b065ddd2..7602d8d0 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -56,7 +56,12 @@ parameters: type: comma_delimited_list sahara_api_node_ips: type: comma_delimited_list - + ironic_api_node_ips: + type: comma_delimited_list + ceph_mon_node_ips: + type: comma_delimited_list + ceph_mon_node_names: + type: comma_delimited_list DeployIdentifier: type: string description: > @@ -294,7 +299,31 @@ resources: list_join: - "','" - {get_param: sahara_api_node_ips} + ironic_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: ironic_api_node_ips} + tripleo::profile::base::ceph::ceph_mon_initial_members: + list_join: + - ',' + - {get_param: ceph_mon_node_names} + tripleo::profile::base::ceph::ceph_mon_host: + list_join: + - ',' + - {get_param: ceph_mon_node_ips} + tripleo::profile::base::ceph::ceph_mon_host_v6: + str_replace: + template: "'[IPS_LIST]'" + params: + IPS_LIST: + list_join: + - '],[' + - {get_param: ceph_mon_node_ips} # NOTE(gfidente): interpolation with %{} in the # hieradata file can't be used as it returns string ceilometer::rabbit_hosts: *rabbit_nodes_array @@ -306,6 +335,7 @@ resources: nova::rabbit_hosts: *rabbit_nodes_array keystone::rabbit_hosts: *rabbit_nodes_array sahara::rabbit_hosts: *rabbit_nodes_array + ironic::rabbit_hosts: *rabbit_nodes_array deploy_identifier: {get_param: DeployIdentifier} update_identifier: {get_param: UpdateIdentifier} diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml index fd161886..2e329989 100644 --- a/puppet/ceph-cluster-config.yaml +++ b/puppet/ceph-cluster-config.yaml @@ -2,49 +2,15 @@ heat_template_version: 2015-04-30 description: 'Ceph Cluster config data for Puppet' parameters: - ceph_storage_count: - default: 0 - type: number - description: Number of Ceph storage nodes. Used to enable/disable managed Ceph installation. - ceph_external_mon_ips: - default: '' - type: string - description: List of external Ceph Mon host IPs. - ceph_client_key: - default: '' - type: string - description: Ceph key used to create the client user keyring. - ceph_fsid: - default: '' - type: string - ceph_admin_key: - default: '' - type: string - ceph_mon_key: - default: '' - type: string - ceph_mon_names: - type: comma_delimited_list - ceph_mon_ips: - type: comma_delimited_list NovaRbdPoolName: default: vms type: string - CinderRbdPoolName: - default: volumes - type: string - GlanceRbdPoolName: - default: images - type: string GnocchiRbdPoolName: default: metrics type: string CephClientUserName: default: openstack type: string - CephIPv6: - default: False - type: boolean resources: CephClusterConfigImpl: @@ -56,65 +22,10 @@ resources: datafiles: ceph_cluster: mapped_data: - ceph_ipv6: {get_param: CephIPv6} - ceph_storage_count: {get_param: ceph_storage_count} - ceph_mon_initial_members: - list_join: - - ',' - - {get_param: ceph_mon_names} - ceph_mon_host: - list_join: - - ',' - - {get_param: ceph_mon_ips} - ceph_mon_host_v6: - str_replace: - template: "'[IPS_LIST]'" - params: - IPS_LIST: - list_join: - - '],[' - - {get_param: ceph_mon_ips} - ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6} - ceph::profile::params::fsid: {get_param: ceph_fsid} - ceph::profile::params::mon_key: {get_param: ceph_mon_key} - # We should use a separated key for the non-admin clients - ceph::profile::params::client_keys: - str_replace: - template: "{ - client.admin: { - secret: 'ADMIN_KEY', - mode: '0600', - cap_mon: 'allow *', - cap_osd: 'allow *', - cap_mds: 'allow *' - }, - client.bootstrap-osd: { - secret: 'ADMIN_KEY', - keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring', - cap_mon: 'allow profile bootstrap-osd' - }, - client.CLIENT_USER: { - secret: 'CLIENT_KEY', - mode: '0644', - cap_mon: 'allow r', - cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' - } - }" - params: - CLIENT_USER: {get_param: CephClientUserName} - CLIENT_KEY: {get_param: ceph_client_key} - ADMIN_KEY: {get_param: ceph_admin_key} - NOVA_POOL: {get_param: NovaRbdPoolName} - CINDER_POOL: {get_param: CinderRbdPoolName} - GLANCE_POOL: {get_param: GlanceRbdPoolName} - GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} - cinder_rbd_pool_name: {get_param: CinderRbdPoolName} - glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName} gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName} nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} - glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} nova::compute::rbd::rbd_keyring: list_join: - '.' @@ -123,14 +34,10 @@ resources: gnocchi::storage::ceph::ceph_keyring: list_join: - '.' - - - 'client' + - - '/etc/ceph/ceph' + - 'client' - {get_param: CephClientUserName} - ceph_client_user_name: {get_param: CephClientUserName} - ceph_pools: - - {get_param: CinderRbdPoolName} - - {get_param: NovaRbdPoolName} - - {get_param: GlanceRbdPoolName} - - {get_param: GnocchiRbdPoolName} + - 'keyring' outputs: config_id: diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml index e90710c7..70baeb6e 100644 --- a/puppet/ceph-storage-post.yaml +++ b/puppet/ceph-storage-post.yaml @@ -10,8 +10,11 @@ parameters: type: boolean servers: type: json - NodeConfigIdentifiers: - type: json + RoleData: + type: json + default: {} + DeployIdentifier: + type: string description: Value which changes if the node configuration may need to be re-applied resources: @@ -25,7 +28,7 @@ resources: servers: {get_param: servers} config: {get_resource: CephStorageArtifactsConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} CephStoragePuppetConfig: type: OS::Heat::SoftwareConfig @@ -33,26 +36,44 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: - get_file: manifests/overcloud_cephstorage.pp + list_join: + - '' + - - get_file: manifests/overcloud_cephstorage.pp + - {get_param: [RoleData, step_config]} - CephStorageDeployment_Step1: + CephStorageDeployment_Step2: type: OS::Heat::StructuredDeployments depends_on: CephStorageArtifactsDeploy properties: - name: CephStorageDeployment_Step1 + name: CephStorageDeployment_Step2 + servers: {get_param: servers} + config: {get_resource: CephStoragePuppetConfig} + input_values: + step: 2 + update_identifier: {get_param: DeployIdentifier} + + CephStorageDeployment_Step3: + type: OS::Heat::StructuredDeployments + depends_on: CephStorageDeployment_Step2 + properties: + name: CephStorageDeployment_Step3 servers: {get_param: servers} config: {get_resource: CephStoragePuppetConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + step: 3 + update_identifier: {get_param: DeployIdentifier} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: CephStorageDeployment_Step1 + depends_on: CephStorageDeployment_Step3 type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} - diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index f0eb71e4..8a43b673 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -21,10 +21,6 @@ parameters: default: default constraints: - custom_constraint: nova.keypair - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -34,10 +30,6 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json - TimeZone: - default: 'UTC' - description: The timezone to be set on Ceph nodes. - type: string UpdateIdentifier: default: '' type: string @@ -96,10 +88,20 @@ parameters: NodeIndex: type: number default: 0 + ServiceConfigSettings: + type: json + default: {} + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: CephStorage: type: OS::Nova::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} image_update_policy: {get_param: ImageUpdatePolicy} @@ -195,28 +197,23 @@ resources: properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} - ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} - - NetIpSubnetMap: - type: OS::TripleO::Network::Ports::NetIpSubnetMap - properties: - ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} - ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} - InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} - StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} - StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} - TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -234,12 +231,10 @@ resources: config: {get_resource: CephStorageConfig} server: {get_resource: CephStorage} input_values: - ntp_servers: {get_param: NtpServer} - timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} - ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} - ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} + ceph_public_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} CephStorageConfig: type: OS::Heat::StructuredConfig @@ -252,14 +247,24 @@ resources: - heat_config_%{::deploy_config_name} - ceph_extraconfig - extraconfig + - service_configs - ceph_cluster # provided by CephClusterConfig - ceph + - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - network merge_behavior: deeper datafiles: + service_configs: + mapped_data: {get_param: ServiceConfigSettings} common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} ceph_extraconfig: mapped_data: {get_param: CephStorageExtraConfig} extraconfig: @@ -267,8 +272,6 @@ resources: ceph: raw_data: {get_file: hieradata/ceph.yaml} mapped_data: - ntp::servers: {get_input: ntp_servers} - timezone::timezone: {get_input: timezone} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} ceph::profile::params::cluster_network: {get_input: ceph_cluster_network} @@ -382,12 +385,3 @@ outputs: management_ip_address: description: IP address of the server in the management network value: {get_attr: [ManagementPort, ip_address]} - config_identifier: - description: identifier which changes if the node configuration may need re-applying - value: - list_join: - - ',' - - - {get_attr: [CephStorageDeployment, deploy_stdout]} - - {get_attr: [NodeTLSCAData, deploy_stdout]} - - {get_attr: [CephStorageExtraConfigPre, deploy_stdout]} - - {get_param: UpdateIdentifier} diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml index f470203f..c3dd403e 100644 --- a/puppet/cinder-storage-post.yaml +++ b/puppet/cinder-storage-post.yaml @@ -8,9 +8,12 @@ parameters: type: boolean servers: type: json - NodeConfigIdentifiers: - type: json + DeployIdentifier: + type: string description: Value which changes if the node configuration may need to be re-applied + RoleData: + type: json + default: {} resources: @@ -23,7 +26,7 @@ resources: servers: {get_param: servers} config: {get_resource: VolumeArtifactsConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} VolumePuppetConfig: type: OS::Heat::SoftwareConfig @@ -32,22 +35,55 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: - get_file: manifests/overcloud_volume.pp + list_join: + - '' + - - get_file: manifests/overcloud_volume.pp + - {get_param: [RoleData, step_config]} + + VolumeDeployment_Step2: + type: OS::Heat::StructuredDeployments + depends_on: VolumeArtifactsDeploy + properties: + name: VolumeDeployment_Step2 + servers: {get_param: servers} + config: {get_resource: VolumePuppetConfig} + input_values: + step: 2 + update_identifier: {get_param: DeployIdentifier} - VolumeDeployment_Step1: + VolumeDeployment_Step3: type: OS::Heat::StructuredDeployments + depends_on: VolumeDeployment_Step2 properties: - name: VolumeDeployment_Step1 + name: VolumeDeployment_Step3 servers: {get_param: servers} config: {get_resource: VolumePuppetConfig} + input_values: + step: 3 + update_identifier: {get_param: DeployIdentifier} + + VolumeDeployment_Step4: + type: OS::Heat::StructuredDeployments + depends_on: VolumeDeployment_Step3 + properties: + name: VolumeDeployment_Step4 + servers: {get_param: servers} + config: {get_resource: VolumePuppetConfig} + input_values: + step: 4 + update_identifier: {get_param: DeployIdentifier} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: VolumeDeployment_Step1 + depends_on: VolumeDeployment_Step4 type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index c1a04e24..d0f562ed 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -4,29 +4,6 @@ parameters: Image: default: overcloud-cinder-volume type: string - CinderEnableIscsiBackend: - default: true - description: Whether to enable or not the Iscsi backend for Cinder - type: boolean - CinderISCSIHelper: - default: lioadm - description: The iSCSI helper to use with cinder. - type: string - CinderLVMLoopDeviceSize: - default: 10280 - description: The size of the loopback file used by the cinder LVM driver. - type: number - CinderPassword: - description: The password for the cinder service and db account, used by cinder-api. - type: string - hidden: true - Debug: - default: '' - description: Set to True to enable debugging on all services. - type: string - VirtualIP: # deprecated. Use per service VIPs instead. - default: '' - type: string ExtraConfig: default: {} description: | @@ -50,22 +27,6 @@ parameters: default: default description: Name of an existing Nova key pair to enable SSH access to the instances type: string - RabbitPassword: - type: string - hidden: true - RabbitUserName: - default: 'guest' - type: string - RabbitClientUseSSL: - default: false - description: > - Rabbit client subscriber parameter to specify - an SSL connection to the RabbitMQ host. - type: string - RabbitClientPort: - default: 5672 - description: Set rabbit subscriber port, change this if using SSL - type: number SnmpdReadonlyUserName: default: ro_snmp_user description: The user name for SNMPd with readonly rights running on all Overcloud nodes @@ -74,10 +35,6 @@ parameters: description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -100,21 +57,6 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - TimeZone: - default: 'UTC' - description: The timezone to be set on Cinder nodes. - type: string - GlanceApiVirtualIP: - type: string - default: '' - MysqlVirtualIPUri: - type: string - default: '' NetworkDeploymentActions: type: comma_delimited_list description: > @@ -146,11 +88,20 @@ parameters: NodeIndex: type: number default: 0 - + ServiceConfigSettings: + type: json + default: {} + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: BlockStorage: type: OS::Nova::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} @@ -246,16 +197,22 @@ resources: properties: ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: @@ -274,29 +231,13 @@ resources: server: {get_resource: BlockStorage} config: {get_resource: BlockStorageConfig} input_values: - debug: {get_param: Debug} - cinder_dsn: {list_join: ['', ['mysql+pymysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIPUri} , '/cinder']]} snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} - cinder_lvm_loop_device_size: - str_replace: - template: sizeM - params: - size: {get_param: CinderLVMLoopDeviceSize} - cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} - cinder_iscsi_helper: {get_param: CinderISCSIHelper} cinder_iscsi_ip_address: str_replace: template: "'IP'" params: IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]} - glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} - rabbit_username: {get_param: RabbitUserName} - rabbit_password: {get_param: RabbitPassword} - rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} - rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: {get_param: NtpServer} - timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} @@ -312,14 +253,23 @@ resources: - heat_config_%{::deploy_config_name} - volume_extraconfig - extraconfig + - service_configs - volume - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - network merge_behavior: deeper datafiles: + service_configs: + mapped_data: {get_param: ServiceConfigSettings} common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} volume_extraconfig: mapped_data: {get_param: BlockStorageExtraConfig} extraconfig: @@ -328,19 +278,7 @@ resources: raw_data: {get_file: hieradata/volume.yaml} mapped_data: # Cinder - cinder::debug: {get_input: debug} - cinder::setup_test_volume::size: {get_input: cinder_lvm_loop_device_size} - cinder_iscsi_helper: {get_input: cinder_iscsi_helper} - cinder::database_connection: {get_input: cinder_dsn} - cinder::rabbit_userid: {get_input: rabbit_username} - cinder::rabbit_password: {get_input: rabbit_password} - cinder::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - cinder::rabbit_port: {get_input: rabbit_client_port} - cinder_enable_iscsi_backend: {get_input: cinder_enable_iscsi_backend} - cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address} - cinder::glance::glance_api_servers: {get_input: glance_api_servers} - ntp::servers: {get_input: ntp_servers} - timezone::timezone: {get_input: timezone} + tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} @@ -448,11 +386,3 @@ outputs: management_ip_address: description: IP address of the server in the management network value: {get_attr: [ManagementPort, ip_address]} - config_identifier: - description: identifier which changes if the node configuration may need re-applying - value: - list_join: - - '' - - - {get_attr: [BlockStorageDeployment, deploy_stdout]} - - {get_attr: [NodeTLSCAData, deploy_stdout]} - - {get_param: UpdateIdentifier} diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml index a122df0e..c1b37772 100644 --- a/puppet/compute-post.yaml +++ b/puppet/compute-post.yaml @@ -10,11 +10,13 @@ parameters: type: boolean servers: type: json - NodeConfigIdentifiers: - type: json + RoleData: + type: json + default: {} + DeployIdentifier: + type: string description: Value which changes if the node configuration may need to be re-applied - resources: ComputeArtifactsConfig: @@ -26,7 +28,7 @@ resources: servers: {get_param: servers} config: {get_resource: ComputeArtifactsConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ComputePuppetConfig: type: OS::Heat::SoftwareConfig @@ -34,25 +36,55 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: - get_file: manifests/overcloud_compute.pp + list_join: + - '' + - - get_file: manifests/overcloud_compute.pp + - {get_param: [RoleData, step_config]} + + ComputeServicesBaseDeployment_Step2: + type: OS::Heat::StructuredDeployments + depends_on: [ComputeArtifactsDeploy] + properties: + name: ComputeServicesBaseDeployment_Step2 + servers: {get_param: servers} + config: {get_resource: ComputePuppetConfig} + input_values: + step: 2 + update_identifier: {get_param: DeployIdentifier} + + ComputeOvercloudServicesDeployment_Step3: + type: OS::Heat::StructuredDeployments + depends_on: ComputeServicesBaseDeployment_Step2 + properties: + name: ComputeOvercloudServicesDeployment_Step3 + servers: {get_param: servers} + config: {get_resource: ComputePuppetConfig} + input_values: + step: 3 + update_identifier: {get_param: DeployIdentifier} - ComputePuppetDeployment: + ComputeOvercloudServicesDeployment_Step4: type: OS::Heat::StructuredDeployments - depends_on: ComputeArtifactsDeploy + depends_on: ComputeOvercloudServicesDeployment_Step3 properties: - name: ComputePuppetDeployment + name: ComputeOvercloudServicesDeployment_Step4 servers: {get_param: servers} config: {get_resource: ComputePuppetConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + step: 4 + update_identifier: {get_param: DeployIdentifier} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: ComputePuppetDeployment + depends_on: ComputeOvercloudServicesDeployment_Step4 type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} diff --git a/puppet/compute.yaml b/puppet/compute.yaml index 4c18067a..01807f35 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -118,15 +118,6 @@ parameters: default: nic1 description: A port to add to the NeutronPhysicalBridge. type: string - NeutronTenantMtu: - description: > - The default MTU for tenant networks. For VXLAN/GRE tunneling, this should - be at least 50 bytes smaller than the MTU on the physical network. This - value will be used to set the MTU on the virtual Ethernet device. - This number is related to the value of NeutronDnsmasqOptions, since that - will determine the MTU that is assigned to the VM host through DHCP. - default: 1400 - type: number NeutronTunnelTypes: type: comma_delimited_list description: | @@ -144,12 +135,6 @@ parameters: of VXLAN VNI IDs that are available for tenant network allocation default: ["1:4094", ] type: comma_delimited_list - NeutronPublicInterfaceRawDevice: - default: '' - type: string - NeutronDVR: - default: 'False' - type: string NeutronMetadataProxySharedSecret: description: Shared secret to prevent spoofing type: string @@ -191,10 +176,6 @@ parameters: default: 'False' description: Whether to enable l3-agent HA type: string - NeutronAgentMode: - default: 'dvr_snat' - description: Agent mode for the neutron-l3-agent on the controller hosts - type: string NodeIndex: type: number default: 0 @@ -243,10 +224,6 @@ parameters: default: 'neutron' description: The full class name of the security API class type: string - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list RabbitHost: type: string default: '' # Has to be here because of the ignored empty value bug @@ -268,14 +245,6 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number - SnmpdReadonlyUserName: - default: ro_snmp_user - description: The user name for SNMPd with readonly rights running on all Overcloud nodes - type: string - SnmpdReadonlyUserPassword: - description: The user password for SNMPd with readonly rights running on all Overcloud nodes - type: string - hidden: true UpgradeLevelNovaCompute: type: string description: Nova Compute upgrade level @@ -294,10 +263,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - TimeZone: - default: 'UTC' - description: The timezone to be set on compute nodes. - type: string UpdateIdentifier: default: '' type: string @@ -339,11 +304,21 @@ parameters: type: json description: Optional scheduler hints to pass to nova default: {} + ServiceConfigSettings: + type: json + default: {} + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: NovaCompute: type: OS::Nova::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} @@ -430,16 +405,22 @@ resources: properties: ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkConfig: @@ -475,12 +456,14 @@ resources: - heat_config_%{::deploy_config_name} - compute_extraconfig - extraconfig + - service_configs - compute - ceph_cluster # provided by CephClusterConfig - ceph - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - network - neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre - nova_nuage_data # Optionally provided by ComputeExtraConfigPre @@ -488,12 +471,19 @@ resources: - neutron_opencontrail_data # Optionally provided by ComputeExtraConfigPre merge_behavior: deeper datafiles: + service_configs: + mapped_data: {get_param: ServiceConfigSettings} compute_extraconfig: mapped_data: {get_param: NovaComputeExtraConfig} extraconfig: mapped_data: {get_param: ExtraConfig} common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} ceph: raw_data: {get_file: hieradata/ceph.yaml} compute: @@ -508,14 +498,23 @@ resources: nova::rabbit_port: {get_input: rabbit_client_port} nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute} nova_compute_driver: {get_input: nova_compute_driver} + # TODO(emilien): move libvirt & migration parameters in libvirt profile + # used to deploy libvirt/kvm dependencies: + nova::compute::libvirt::services::libvirt_virt_type: {get_input: nova_compute_libvirt_type} + # used to configured nova.conf: nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type} nova::compute::neutron::libvirt_vif_driver: {get_input: nova_compute_libvirt_vif_driver} nova_api_host: {get_input: nova_api_host} nova::compute::vncproxy_host: {get_input: nova_public_ip} nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend} + # TUNNELLED mode provides a security enhancement when using shared storage but is not + # supported when not using shared storage. + # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12 + # In future versions of QEMU (2.6, mostly), Dan's native encryption + # work will obsolete the need to use TUNNELLED transport mode. + nova::migration::live_migration_tunnelled: {get_input: nova_enable_rbd_backend} rbd_persistent_storage: {get_input: cinder_enable_rbd_backend} nova_password: {get_input: nova_password} - nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address} nova::vncproxy::common::vncproxy_protocol: {get_input: nova_vncproxy_protocol} nova::vncproxy::common::vncproxy_host: {get_input: nova_vncproxy_host} @@ -527,12 +526,10 @@ resources: ceilometer::rabbit_password: {get_input: rabbit_password} ceilometer::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} ceilometer::rabbit_port: {get_input: rabbit_client_port} - ceilometer::metering_secret: {get_input: ceilometer_metering_secret} + ceilometer::telemetry_secret: {get_input: ceilometer_metering_secret} ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} ceilometer::agent::auth::auth_url: {get_input: ceilometer_agent_auth_url} ceilometer_compute_agent: {get_input: ceilometer_compute_agent} - snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} - snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} nova::glance_api_servers: {get_input: glance_api_servers} neutron::debug: {get_input: debug} neutron::rabbit_password: {get_input: rabbit_password} @@ -543,7 +540,6 @@ resources: neutron_host: {get_input: neutron_host} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} - neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} @@ -558,18 +554,13 @@ resources: nova::network::neutron::neutron_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url} - neutron_router_distributed: {get_input: neutron_router_distributed} - neutron_agent_mode: {get_input: neutron_agent_mode} neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} neutron::core_plugin: {get_input: neutron_core_plugin} neutron::service_plugins: {get_input: neutron_service_plugins} neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} - neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} keystone_public_api_virtual_ip: {get_input: keystone_vip} admin_password: {get_input: admin_password} - ntp::servers: {get_input: ntp_servers} - timezone::timezone: {get_input: timezone} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} @@ -610,8 +601,6 @@ resources: ceilometer_password: {get_param: CeilometerPassword} ceilometer_compute_agent: {get_param: CeilometerComputeAgent} ceilometer_agent_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]} - snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} - snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} neutron_flat_networks: str_replace: @@ -650,14 +639,11 @@ resources: template: MAPPINGS params: MAPPINGS: {get_param: NeutronBridgeMappings} - neutron_tenant_mtu: {get_param: NeutronTenantMtu} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} neutron_physical_bridge: {get_param: NeutronPhysicalBridge} neutron_public_interface: {get_param: NeutronPublicInterface} neutron_password: {get_param: NeutronPassword} - neutron_agent_mode: {get_param: NeutronAgentMode} - neutron_router_distributed: {get_param: NeutronDVR} neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} neutron_core_plugin: {get_param: NeutronCorePlugin} neutron_service_plugins: @@ -680,7 +666,6 @@ resources: template: AGENT_EXTENSIONS params: AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} - neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]} neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]} keystone_vip: {get_param: KeystonePublicApiVirtualIP} @@ -689,8 +674,6 @@ resources: rabbit_password: {get_param: RabbitPassword} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: {get_param: NtpServer} - timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} @@ -811,12 +794,3 @@ outputs: description: Heat resource handle for the Nova compute server value: {get_resource: NovaCompute} - config_identifier: - description: identifier which changes if the node configuration may need re-applying - value: - list_join: - - ',' - - - {get_attr: [NovaComputeDeployment, deploy_stdout]} - - {get_attr: [NodeTLSCAData, deploy_stdout]} - - {get_attr: [ComputeExtraConfigPre, deploy_stdout]} - - {get_param: UpdateIdentifier} diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index 80b08a06..4af6cb46 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -10,13 +10,12 @@ parameters: type: boolean servers: type: json - NodeConfigIdentifiers: - type: json - description: Value which changes if the node configuration may need to be re-applied - StepConfig: + RoleData: + type: json + default: {} + DeployIdentifier: type: string - description: Config manifests that will be used to step through the deployment. - default: '' + description: Value which changes if the node configuration may need to be re-applied resources: @@ -34,12 +33,12 @@ resources: properties: servers: {get_param: servers} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ControllerPuppetConfig: type: OS::TripleO::ControllerConfig properties: - StepConfig: {get_param: StepConfig} + StepConfig: {get_param: [RoleData, step_config]} # Step through a series of Puppet runs using the same manifest. # NOTE: To enable stepping through the deployments via heat hooks, @@ -54,8 +53,7 @@ resources: config: {get_resource: ControllerPuppetConfig} input_values: step: 1 - update_identifier: {get_param: NodeConfigIdentifiers} - actions: ['CREATE'] # no need for two passes on an UPDATE + update_identifier: {get_param: DeployIdentifier} ControllerServicesBaseDeployment_Step2: type: OS::Heat::StructuredDeployments @@ -66,8 +64,7 @@ resources: config: {get_resource: ControllerPuppetConfig} input_values: step: 2 - update_identifier: {get_param: NodeConfigIdentifiers} - actions: ['CREATE'] # no need for two passes on an UPDATE + update_identifier: {get_param: DeployIdentifier} ControllerOvercloudServicesDeployment_Step3: type: OS::Heat::StructuredDeployments @@ -78,7 +75,7 @@ resources: config: {get_resource: ControllerPuppetConfig} input_values: step: 3 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ControllerOvercloudServicesDeployment_Step4: type: OS::Heat::StructuredDeployments @@ -89,7 +86,7 @@ resources: config: {get_resource: ControllerPuppetConfig} input_values: step: 4 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ControllerOvercloudServicesDeployment_Step5: type: OS::Heat::StructuredDeployments @@ -100,37 +97,15 @@ resources: config: {get_resource: ControllerPuppetConfig} input_values: step: 5 - update_identifier: {get_param: NodeConfigIdentifiers} - - ControllerOvercloudServicesDeployment_Step6: - type: OS::Heat::StructuredDeployments - depends_on: ControllerOvercloudServicesDeployment_Step5 - properties: - name: ControllerOvercloudServicesDeployment_Step6 - servers: {get_param: servers} - config: {get_resource: ControllerPuppetConfig} - input_values: - step: 6 - update_identifier: {get_param: NodeConfigIdentifiers} - - ControllerOvercloudServicesDeployment_Step7: - type: OS::Heat::StructuredDeployments - depends_on: ControllerOvercloudServicesDeployment_Step6 - properties: - name: ControllerOvercloudServicesDeployment_Step7 - servers: {get_param: servers} - config: {get_resource: ControllerPuppetConfig} - input_values: - step: 7 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ControllerPostPuppet: type: OS::TripleO::Tasks::ControllerPostPuppet - depends_on: ControllerOvercloudServicesDeployment_Step7 + depends_on: ControllerOvercloudServicesDeployment_Step5 properties: servers: {get_param: servers} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. diff --git a/puppet/controller.yaml b/puppet/controller.yaml index bf196d24..65238eb5 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -8,95 +8,10 @@ parameters: description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true - AodhApiVirtualIP: - type: string - default: '' AodhPassword: description: The password for the aodh services. type: string hidden: true - CeilometerApiVirtualIP: - type: string - default: '' - CeilometerBackend: - default: 'mongodb' - description: The ceilometer backend type. - type: string - CeilometerMeteringSecret: - description: Secret shared by the ceilometer services. - type: string - hidden: true - CeilometerPassword: - description: The password for the ceilometer service and db account. - type: string - hidden: true - CeilometerStoreEvents: - default: false - description: Whether to store events in ceilometer. - type: boolean - CeilometerMeterDispatcher: - default: 'database' - description: Dispatcher to process meter data - type: string - constraints: - - allowed_values: ['gnocchi', 'database'] - CinderApiVirtualIP: - type: string - default: '' - CeilometerWorkers: - default: 0 - description: Number of workers for Ceilometer service. - type: number - CinderEnableDBPurge: - default: true - description: | - Whether to create cron job for purging soft deleted rows in Cinder database. - type: boolean - CinderEnableNfsBackend: - default: false - description: Whether to enable or not the NFS backend for Cinder - type: boolean - CinderEnableIscsiBackend: - default: true - description: Whether to enable or not the Iscsi backend for Cinder - type: boolean - CinderEnableRbdBackend: - default: false - description: Whether to enable or not the Rbd backend for Cinder - type: boolean - CinderISCSIHelper: - default: lioadm - description: The iSCSI helper to use with cinder. - type: string - CinderLVMLoopDeviceSize: - default: 10280 - description: The size of the loopback file used by the cinder LVM driver. - type: number - CinderNfsMountOptions: - default: '' - description: > - Mount options for NFS mounts used by Cinder NFS backend. Effective - when CinderEnableNfsBackend is true. - type: string - CinderNfsServers: - default: '' - description: > - NFS servers used by Cinder NFS backend. Effective when - CinderEnableNfsBackend is true. - type: comma_delimited_list - CinderPassword: - description: The password for the cinder service and db account, used by cinder-api. - type: string - hidden: true - CinderBackendConfig: - default: {} - description: Contains parameters to configure Cinder backends. Typically - set via parameter_defaults in the resource registry. - type: json - CinderWorkers: - default: 0 - description: Number of workers for Cinder service. - type: number controllerExtraConfig: default: {} description: | @@ -140,14 +55,6 @@ parameters: default: true description: Whether to deploy a LoadBalancer on the Controller type: boolean - EnableCephStorage: - default: false - description: Whether to deploy Ceph Storage (OSD) on the Controller - type: boolean - EnableSwiftStorage: - default: true - description: Whether to enable Swift Storage on the Controller - type: boolean ExtraConfig: default: {} description: | @@ -202,9 +109,6 @@ parameters: default: 'mysql' description: The short name of the Gnocchi indexer backend to use. type: string - GnocchiApiVirtualIP: - type: string - default: '' GnocchiPassword: description: The password for the gnocchi service and db account. type: string @@ -220,14 +124,6 @@ parameters: default: /dev/log description: Syslog address where HAproxy will send its log type: string - HeatPassword: - description: The password for the Heat service and db account, used by the Heat services. - type: string - hidden: true - HeatStackDomainAdminPassword: - description: Password for heat_stack_domain_admin user. - type: string - hidden: true HeatAuthEncryptionKey: description: Auth encryption key for heat-engine type: string @@ -236,15 +132,6 @@ parameters: default: '*' description: A list of IP/Hostname allowed to connect to horizon type: comma_delimited_list - HeatWorkers: - default: 0 - description: Number of workers for Heat service. - type: number - HeatEnableDBPurge: - type: boolean - default: true - description: | - Whether to create cron job for purging soft deleted rows in the Heat database. HorizonSecret: description: Secret key for Django type: string @@ -284,14 +171,6 @@ parameters: default: false description: Whether IPtables rules should be purged before setting up the new ones. type: boolean - SaharaApiVirtualIP: - type: string - default: '' - SaharaPassword: - default: unset - description: The password for the sahara service account, used by sahara-api. - type: string - hidden: true MysqlClusterUniquePart: description: A unique identifier of the MySQL cluster the controller is in. type: string @@ -310,121 +189,17 @@ parameters: description: Configures MySQL max_connections config setting type: number default: 4096 + MysqlClustercheckPassword: + type: string + hidden: true MysqlRootPassword: type: string hidden: true default: '' # Has to be here because of the ignored empty value bug - NeutronExternalNetworkBridge: - description: Name of bridge used for external network traffic. - type: string - default: 'br-ex' - NeutronBridgeMappings: - description: > - The OVS logical->physical bridge mappings to use. See the Neutron - documentation for details. Defaults to mapping br-ex - the external - bridge on hosts - to a physical name 'datacentre' which can be used - to create provider networks (and we use this for the default floating - network) - if changing this either use different post-install network - scripts or be sure to keep 'datacentre' as a mapping network name. - type: comma_delimited_list - default: "datacentre:br-ex" - NeutronDnsmasqOptions: - default: 'dhcp-option-force=26,1400' - description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the gre tunnel overhead. - type: string - NeutronEnableDHCPAgent: - description: Knob to enable/disable DHCP Agent - type: boolean - default: true - NeutronEnableL3Agent: - description: Knob to enable/disable L3 agent - type: boolean - default: true - NeutronEnableMetadataAgent: - description: Knob to enable/disable Metadata agent - type: boolean - default: true - NeutronEnableOVSAgent: - description: Knob to enable/disable OVS Agent - type: boolean - default: true - NeutronAgentMode: - default: 'dvr_snat' - description: Agent mode for the neutron-l3-agent on the controller hosts - type: string - NeutronL3HA: - default: 'False' - description: Whether to enable l3-agent HA - type: string - NeutronDhcpAgentsPerNetwork: - type: number - default: 3 - description: The number of neutron dhcp agents to schedule per network - NeutronDVR: - default: 'False' - description: Whether to configure Neutron Distributed Virtual Routers - type: string NeutronMetadataProxySharedSecret: description: Shared secret to prevent spoofing type: string hidden: true - NeutronCorePlugin: - default: 'ml2' - description: | - The core plugin for Neutron. The value should be the entrypoint to be loaded - from neutron.core_plugins namespace. - type: string - NeutronServicePlugins: - default: "router,qos" - description: | - Comma-separated list of service plugin entrypoints to be loaded from the - neutron.service_plugins namespace. - type: comma_delimited_list - NeutronTypeDrivers: - default: "vxlan,vlan,flat,gre" - description: | - Comma-separated list of network type driver entrypoints to be loaded. - type: comma_delimited_list - NeutronMechanismDrivers: - default: 'openvswitch' - description: | - The mechanism drivers for the Neutron tenant network. - type: comma_delimited_list - NeutronAllowL3AgentFailover: - default: 'True' - description: Allow automatic l3-agent failover - type: string - NeutronEnableIsolatedMetadata: - default: 'False' - description: If True, DHCP provide metadata route to VM. - type: string - NeutronEnableTunnelling: - type: string - default: "True" - NeutronEnableL2Pop: - type: string - description: > - Enable/disable the L2 population feature in the Neutron agents. - default: "False" - NeutronFlatNetworks: - type: comma_delimited_list - default: 'datacentre' - description: If set, flat networks to configure in neutron plugins. - NeutronL3HA: - default: 'False' - description: Whether to enable l3-agent HA - type: string - NeutronNetworkType: - default: 'vxlan' - description: The tenant network type for Neutron. - type: comma_delimited_list - NeutronNetworkVLANRanges: - default: 'datacentre:1:1000' - description: > - The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the - Neutron documentation for permitted values. Defaults to permitting any - VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). - type: comma_delimited_list NeutronPassword: description: The password for the neutron service and db account, used by neutron agents. type: string @@ -433,71 +208,6 @@ parameters: default: nic1 description: What interface to bridge onto br-ex for network nodes. type: string - NeutronPublicInterfaceTag: - default: '' - description: > - VLAN tag for creating a public VLAN. The tag will be used to - create an access port on the exterior bridge for each control plane node, - and that port will be given the IP address returned by neutron from the - public network. Set CONTROLEXTRA=overcloud-vlan-port.yaml when compiling - overcloud.yaml to include the deployment of VLAN ports to the control - plane. - type: string - NeutronPublicInterfaceDefaultRoute: - default: '' - description: A custom default route for the NeutronPublicInterface. - type: string - NeutronPublicInterfaceIP: - default: '' - description: A custom IP address to put onto the NeutronPublicInterface. - type: string - NeutronPublicInterfaceRawDevice: - default: '' - description: If set, the public interface is a vlan with this device as the raw device. - type: string - NeutronTenantMtu: - description: > - The default MTU for tenant networks. For VXLAN/GRE tunneling, this should - be at least 50 bytes smaller than the MTU on the physical network. This - value will be used to set the MTU on the virtual Ethernet device. - This number is related to the value of NeutronDnsmasqOptions, since that - will determine the MTU that is assigned to the VM host through DHCP. - default: 1400 - type: number - NeutronTunnelTypes: - default: 'vxlan' - description: | - The tunnel types for the Neutron tenant network. - type: comma_delimited_list - NeutronTunnelIdRanges: - description: | - Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges - of GRE tunnel IDs that are available for tenant network allocation - default: ["1:4094", ] - type: comma_delimited_list - NeutronVniRanges: - description: | - Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges - of VXLAN VNI IDs that are available for tenant network allocation - default: ["1:4094", ] - type: comma_delimited_list - NeutronPluginExtensions: - default: "qos,port_security" - description: | - Comma-separated list of extensions enabled for the Neutron plugin. - type: comma_delimited_list - NeutronAgentExtensions: - default: "qos" - description: | - Comma-separated list of extensions enabled for the Neutron agents. - type: comma_delimited_list - NovaApiVirtualIP: - type: string - default: '' - NeutronWorkers: - default: 0 - description: Number of workers for Neutron service. - type: number NovaEnableDBPurge: default: true description: | @@ -511,22 +221,6 @@ parameters: description: The password for the nova service and db account, used by nova-api. type: string hidden: true - NovaWorkers: - default: 0 - description: Number of workers for Nova service. - type: number - MongoDbNoJournal: - default: false - description: Should MongoDb journaling be disabled - type: boolean - MongoDbIPv6: - default: false - description: Enable IPv6 if Mongo DB VIP is IPv6 - type: boolean - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list PcsdPassword: type: string description: The password for the 'pcsd' user. @@ -537,9 +231,6 @@ parameters: Specifies the interface where the public-facing virtual ip will be assigned. This should be int_public when a VLAN is being used. type: string - PublicVirtualIP: - type: string - default: '' # Has to be here because of the ignored empty value bug RabbitCookie: type: string default: '' # Has to be here because of the ignored empty value bug @@ -562,17 +253,9 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number - RabbitFDLimit: - default: 16384 - description: Configures RabbitMQ FD limit - type: string - RabbitIPv6: - default: false - description: Enable IPv6 in RabbitMQ - type: boolean RedisPassword: + description: The password for Redis type: string - description: The password to access the Redis service hidden: true RedisVirtualIP: type: string @@ -581,23 +264,11 @@ parameters: type: string default: '' # Has to be here because of the ignored empty value bug description: An IP address which is wrapped in brackets in case of IPv6 - SnmpdReadonlyUserName: - default: ro_snmp_user - description: The user name for SNMPd with readonly rights running on all Overcloud nodes - type: string - SnmpdReadonlyUserPassword: - description: The user password for SNMPd with readonly rights running on all Overcloud nodes - type: string - hidden: true SwiftHashSuffix: description: A random string to be used as a salt when hashing to determine mappings in the ring. hidden: true type: string - SwiftMountCheck: - default: 'false' - description: Value of mount_check in Swift account/container/object -server.conf - type: boolean SwiftMinPartHours: type: number default: 1 @@ -610,48 +281,17 @@ parameters: default: true description: Whether to manage Swift rings or not type: boolean - SwiftPassword: - description: The password for the swift service account, used by the swift proxy - services. - hidden: true - type: string - SwiftProxyVirtualIP: - type: string - default: '' SwiftReplicas: type: number default: 3 description: How many replicas to use in the swift rings. - SwiftWorkers: - default: 0 - description: Number of workers for Swift service. - type: number - TimeZone: - default: 'UTC' - description: The timezone to be set on controller nodes. - type: string UpgradeLevelNovaCompute: type: string description: Nova Compute upgrade level default: '' - VirtualIP: # DEPRECATED: use per service settings instead - type: string - default: '' # Has to be here because of the ignored empty value bug - HeatApiVirtualIP: - type: string - default: '' - HeatApiVirtualIPUri: - type: string - default: '' MysqlVirtualIP: type: string default: '' - MysqlVirtualIPUri: - type: string - default: '' - NeutronApiVirtualIP: - type: string - default: '' EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -713,6 +353,10 @@ parameters: ServiceConfigSettings: type: json default: {} + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 parameter_groups: - label: deprecated @@ -724,6 +368,9 @@ resources: Controller: type: OS::Nova::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} image_update_policy: {get_param: ImageUpdatePolicy} @@ -808,28 +455,23 @@ resources: properties: ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} - ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} - - NetIpSubnetMap: - type: OS::TripleO::Network::Ports::NetIpSubnetMap - properties: - ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} - ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} - InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} - StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} - StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} - TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkConfig: type: OS::TripleO::Controller::Net::SoftwareConfig @@ -878,191 +520,48 @@ resources: server: {get_resource: Controller} input_values: bootstack_nodeid: {get_attr: [Controller, name]} - ceilometer_workers: {get_param: CeilometerWorkers} - cinder_workers: {get_param: CinderWorkers} - heat_workers: {get_param: HeatWorkers} - nova_workers: {get_param: NovaWorkers} - neutron_workers: {get_param: NeutronWorkers} - swift_workers: {get_param: SwiftWorkers} - neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} - neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} - neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} haproxy_log_address: {get_param: HAProxySyslogAddress} haproxy_stats_password: {get_param: HAProxyStatsPassword} haproxy_stats_user: {get_param: HAProxyStatsUser} - heat.watch_server_url: - list_join: - - '' - - - 'http://' - - {get_param: HeatApiVirtualIPUri} - - ':8003' - heat.metadata_server_url: - list_join: - - '' - - - 'http://' - - {get_param: HeatApiVirtualIPUri} - - ':8000' - heat.waitcondition_server_url: - list_join: - - '' - - - 'http://' - - {get_param: HeatApiVirtualIPUri} - - ':8000/v1/waitcondition' heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey} - heat_enable_db_purge: {get_param: HeatEnableDBPurge} horizon_allowed_hosts: {get_param: HorizonAllowedHosts} horizon_secret: {get_param: HorizonSecret} admin_password: {get_param: AdminPassword} - neutron_public_interface_ip: {get_param: NeutronPublicInterfaceIP} debug: {get_param: Debug} - cinder_enable_db_purge: {get_param: CinderEnableDBPurge} - cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend} - cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend} - cinder_nfs_mount_options: {get_param: CinderNfsMountOptions} - cinder_nfs_servers: - str_replace: - template: SERVERS - params: - SERVERS: {get_param: CinderNfsServers} - cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize} - cinder_password: {get_param: CinderPassword} - cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} - cinder_iscsi_helper: {get_param: CinderISCSIHelper} - cinder_backend_config: {get_param: CinderBackendConfig} - cinder_dsn: - list_join: - - '' - - - 'mysql+pymysql://cinder:' - - {get_param: CinderPassword} - - '@' - - {get_param: MysqlVirtualIPUri} - - '/cinder' - heat_password: {get_param: HeatPassword} - heat_stack_domain_admin_password: {get_param: HeatStackDomainAdminPassword} - heat_dsn: - list_join: - - '' - - - 'mysql+pymysql://heat:' - - {get_param: HeatPassword} - - '@' - - {get_param: MysqlVirtualIPUri} - - '/heat' + cinder_public_url: {get_param: [EndpointMap, CinderPublic, uri]} + cinder_internal_url: {get_param: [EndpointMap, CinderInternal, uri]} + cinder_admin_url: {get_param: [EndpointMap, CinderAdmin, uri]} + cinder_public_url_v2: {get_param: [EndpointMap, CinderV2Public, uri]} + cinder_internal_url_v2: {get_param: [EndpointMap, CinderV2Internal, uri]} + cinder_admin_url_v2: {get_param: [EndpointMap, CinderV2Admin, uri]} keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } keystone_ec2_uri: { get_param: [EndpointMap, KeystoneEC2, uri] } enable_fencing: {get_param: EnableFencing} enable_galera: {get_param: EnableGalera} enable_load_balancer: {get_param: EnableLoadBalancer} - enable_ceph_storage: {get_param: EnableCephStorage} - enable_swift_storage: {get_param: EnableSwiftStorage} manage_firewall: {get_param: ManageFirewall} purge_firewall_rules: {get_param: PurgeFirewallRules} mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize} mysql_max_connections: {get_param: MysqlMaxConnections} mysql_root_password: {get_param: MysqlRootPassword} + mysql_clustercheck_password: {get_param: MysqlClustercheckPassword} mysql_cluster_name: str_replace: template: tripleo-CLUSTER params: CLUSTER: {get_param: MysqlClusterUniquePart} - neutron_flat_networks: - str_replace: - template: NETWORKS - params: - NETWORKS: {get_param: NeutronFlatNetworks} neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} - neutron_agent_mode: {get_param: NeutronAgentMode} - neutron_router_distributed: {get_param: NeutronDVR} - neutron_core_plugin: {get_param: NeutronCorePlugin} - neutron_service_plugins: - str_replace: - template: PLUGINS - params: - PLUGINS: {get_param: NeutronServicePlugins} - neutron_type_drivers: - str_replace: - template: DRIVERS - params: - DRIVERS: {get_param: NeutronTypeDrivers} - neutron_enable_dhcp_agent: {get_param: NeutronEnableDHCPAgent} - neutron_enable_l3_agent: {get_param: NeutronEnableL3Agent} - neutron_enable_metadata_agent: {get_param: NeutronEnableMetadataAgent} - neutron_enable_ovs_agent: {get_param: NeutronEnableOVSAgent} - neutron_mechanism_drivers: - str_replace: - template: MECHANISMS - params: - MECHANISMS: {get_param: NeutronMechanismDrivers} - neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} - neutron_l3_ha: {get_param: NeutronL3HA} - neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} - neutron_network_vlan_ranges: - str_replace: - template: RANGES - params: - RANGES: {get_param: NeutronNetworkVLANRanges} - neutron_bridge_mappings: - str_replace: - template: MAPPINGS - params: - MAPPINGS: {get_param: NeutronBridgeMappings} - neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge} - neutron_public_interface: {get_param: NeutronPublicInterface} - neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} - neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute} - neutron_public_interface_tag: {get_param: NeutronPublicInterfaceTag} - neutron_tunnel_id_ranges: - str_replace: - template: RANGES - params: - RANGES: {get_param: NeutronTunnelIdRanges} - neutron_vni_ranges: - str_replace: - template: RANGES - params: - RANGES: {get_param: NeutronVniRanges} - neutron_tenant_network_types: - str_replace: - template: TYPES - params: - TYPES: {get_param: NeutronNetworkType} - neutron_tunnel_types: - str_replace: - template: TYPES - params: - TYPES: {get_param: NeutronTunnelTypes} - neutron_plugin_extensions: - str_replace: - template: PLUGIN_EXTENSIONS - params: - PLUGIN_EXTENSIONS: {get_param: NeutronPluginExtensions} - neutron_agent_extensions: - str_replace: - template: AGENT_EXTENSIONS - params: - AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_password: {get_param: NeutronPassword} - neutron_tenant_mtu: {get_param: NeutronTenantMtu} - neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} - neutron_dsn: - list_join: - - '' - - - 'mysql+pymysql://neutron:' - - {get_param: NeutronPassword} - - '@' - - {get_param: MysqlVirtualIPUri} - - '/ovs_neutron?charset=utf8' neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] } neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] } neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] } neutron_auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] } nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] } - ceilometer_backend: {get_param: CeilometerBackend} - ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} - ceilometer_password: {get_param: CeilometerPassword} - ceilometer_store_events: {get_param: CeilometerStoreEvents} aodh_password: {get_param: AodhPassword} - ceilometer_meter_dispatcher: {get_param: CeilometerMeterDispatcher} + aodh_internal_url: { get_param: [ EndpointMap, AodhInternal, uri ] } + aodh_public_url: { get_param: [ EndpointMap, AodhPublic, uri ] } + aodh_admin_url: { get_param: [ EndpointMap, AodhAdmin, uri ] } gnocchi_password: {get_param: GnocchiPassword} gnocchi_backend: {get_param: GnocchiBackend} gnocchi_indexer_backend: {get_param: GnocchiIndexerBackend} @@ -1074,25 +573,27 @@ resources: - '@' - {get_param: RedisVirtualIPUri} - ':6379/' - ceilometer_dsn: - list_join: - - '' - - - 'mysql+pymysql://ceilometer:' - - {get_param: CeilometerPassword} - - '@' - - {get_param: MysqlVirtualIPUri} - - '/ceilometer' gnocchi_dsn: list_join: - '' - - - 'mysql+pymysql://gnocchi:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://gnocchi:' - {get_param: GnocchiPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/gnocchi' + aodh_dsn: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://aodh:' + - {get_param: AodhPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/aodh' gnocchi_internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]} - snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} - snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} + gnocchi_public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] } + gnocchi_admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] } nova_enable_db_purge: {get_param: NovaEnableDBPurge} nova_ipv6: {get_param: NovaIPv6} corosync_ipv6: {get_param: CorosyncIPv6} @@ -1101,21 +602,26 @@ resources: nova_dsn: list_join: - '' - - - 'mysql+pymysql://nova:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://nova:' - {get_param: NovaPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/nova' nova_api_dsn: list_join: - '' - - - 'mysql+pymysql://nova_api:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://nova_api:' - {get_param: NovaPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/nova_api' upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} instance_name_template: {get_param: InstanceNameTemplate} + nova_public_url: {get_param: [EndpointMap, NovaPublic, uri]} + nova_internal_url: {get_param: [EndpointMap, NovaInternal, uri]} + nova_admin_url: {get_param: [EndpointMap, NovaAdmin, uri]} fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} rabbit_username: {get_param: RabbitUserName} @@ -1123,32 +629,15 @@ resources: rabbit_cookie: {get_param: RabbitCookie} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - rabbit_ipv6: {get_param: RabbitIPv6} - rabbit_fd_limit: {get_param: RabbitFDLimit} - mongodb_no_journal: {get_param: MongoDbNoJournal} - mongodb_ipv6: {get_param: MongoDbIPv6} - ntp_servers: {get_param: NtpServer} - timezone: {get_param: TimeZone} control_virtual_interface: {get_param: ControlVirtualInterface} public_virtual_interface: {get_param: PublicVirtualInterface} swift_hash_suffix: {get_param: SwiftHashSuffix} - swift_password: {get_param: SwiftPassword} swift_part_power: {get_param: SwiftPartPower} swift_ring_build: {get_param: SwiftRingBuild} swift_replicas: {get_param: SwiftReplicas} swift_min_part_hours: {get_param: SwiftMinPartHours} - swift_mount_check: {get_param: SwiftMountCheck} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} - sahara_password: {get_param: SaharaPassword} - sahara_dsn: - list_join: - - '' - - - 'mysql://sahara:' - - {get_param: SaharaPassword} - - '@' - - {get_param: MysqlVirtualIPUri} - - '/sahara' swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} cinder_iscsi_network: @@ -1177,18 +666,18 @@ resources: str_replace: template: "['SUBNET']" params: - SUBNET: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, HorizonNetwork]}]} + SUBNET: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, HorizonNetwork]}]} rabbitmq_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]} redis_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]} - redis_password: {get_param: RedisPassword} redis_vip: {get_param: RedisVirtualIP} sahara_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]} memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} mysql_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} mysql_virtual_ip: {get_param: MysqlVirtualIP} - ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} - ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} + ceph_public_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} ceph_public_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + ironic_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, IronicApiNetwork]}]} # Map heat metadata into hiera datafiles ControllerConfig: @@ -1214,16 +703,14 @@ resources: - vip_data # provided by vip-config - '"%{::osfamily}"' - common + - network - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre - - neutron_nuage_data # Optionally provided by ControllerExtraConfigPre - midonet_data #Optionally provided by AllNodesExtraConfig - - neutron_opencontrail_data # Optionally provided by ControllerExtraConfigPre - - neutron_plumgrid_data # Optionally provided by ControllerExtraConfigPre merge_behavior: deeper datafiles: service_configs: @@ -1237,6 +724,11 @@ resources: mapped_data: {get_param: ExtraConfig} common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} ceph: raw_data: {get_file: hieradata/ceph.yaml} mapped_data: @@ -1260,183 +752,75 @@ resources: tripleo::fencing::config: {get_input: fencing_config} # Swift + # FIXME: need to move proxy_local_net_ip into swift-proxy.yaml swift::proxy::proxy_local_net_ip: {get_input: swift_proxy_network} - swift::proxy::authtoken::auth_uri: {get_input: keystone_auth_uri} - swift::proxy::authtoken::identity_uri: {get_input: keystone_identity_uri} swift::storage::all::storage_local_net_ip: {get_input: swift_management_network} - swift::swift_hash_suffix: {get_input: swift_hash_suffix} - swift::proxy::authtoken::admin_password: {get_input: swift_password} - swift::proxy::workers: {get_input: swift_workers} + swift::swift_hash_path_suffix: {get_input: swift_hash_suffix} tripleo::ringbuilder::build_ring: { get_input: swift_ring_build } tripleo::ringbuilder::part_power: {get_input: swift_part_power} tripleo::ringbuilder::replicas: {get_input: swift_replicas} tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours} - swift_mount_check: {get_input: swift_mount_check} # Cinder - cinder_enable_db_purge: {get_input: cinder_enable_db_purge} - cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend} - cinder_enable_rbd_backend: {get_input: cinder_enable_rbd_backend} - cinder_nfs_mount_options: {get_input: cinder_nfs_mount_options} - cinder_nfs_servers: {get_input: cinder_nfs_servers} - cinder_lvm_loop_device_size: {get_input: cinder_lvm_loop_device_size} - cinder_iscsi_helper: {get_input: cinder_iscsi_helper} - cinder_iscsi_ip_address: {get_input: cinder_iscsi_network} - cinder::database_connection: {get_input: cinder_dsn} - cinder::api::keystone_password: {get_input: cinder_password} - cinder::api::auth_uri: {get_input: keystone_auth_uri} - cinder::api::identity_uri: {get_input: keystone_identity_uri} + tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_address: {get_input: cinder_iscsi_network} cinder::api::bind_host: {get_input: cinder_api_network} - cinder::rabbit_userid: {get_input: rabbit_username} - cinder::rabbit_password: {get_input: rabbit_password} - cinder::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - cinder::rabbit_port: {get_input: rabbit_client_port} - cinder::debug: {get_input: debug} - cinder_enable_iscsi_backend: {get_input: cinder_enable_iscsi_backend} - cinder::glance::glance_api_servers: {get_input: glance_api_servers} - cinder_backend_config: {get_input: CinderBackendConfig} - cinder::db::mysql::password: {get_input: cinder_password} + cinder::keystone::auth::public_url: {get_input: cinder_public_url } + cinder::keystone::auth::internal_url: {get_input: cinder_internal_url } + cinder::keystone::auth::admin_url: {get_input: cinder_admin_url } + cinder::keystone::auth::public_url_v2: {get_input: cinder_public_url_v2 } + cinder::keystone::auth::internal_url_v2: {get_input: cinder_internal_url_v2 } + cinder::keystone::auth::admin_url_v2: {get_input: cinder_admin_url_v2 } + cinder::keystone::auth::password: {get_input: cinder_password } + cinder::keystone::auth::region: {get_input: keystone_region} # Glance glance::api::bind_host: {get_input: glance_api_network} glance::registry::bind_host: {get_input: glance_registry_network} + glance::keystone::auth::region: {get_input: keystone_region} + # Heat - heat_stack_domain_admin_password: {get_input: heat_stack_domain_admin_password} - heat::engine::heat_watch_server_url: {get_input: heat.watch_server_url} - heat::engine::heat_metadata_server_url: {get_input: heat.metadata_server_url} - heat::engine::heat_waitcondition_server_url: {get_input: heat.waitcondition_server_url} - heat::engine::auth_encryption_key: {get_input: heat_auth_encryption_key} - heat::rabbit_userid: {get_input: rabbit_username} - heat::rabbit_password: {get_input: rabbit_password} - heat::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - heat::rabbit_port: {get_input: rabbit_client_port} - heat::auth_uri: {get_input: keystone_auth_uri} - heat::keystone_ec2_uri: {get_input: keystone_ec2_uri} - heat::identity_uri: {get_input: keystone_identity_uri} - heat::keystone_password: {get_input: heat_password} heat::api::bind_host: {get_input: heat_api_network} - heat::api::workers: {get_input: heat_workers} heat::api_cloudwatch::bind_host: {get_input: heat_api_network} - heat::api_cloudwatch::workers: {get_input: heat_workers} heat::api_cfn::bind_host: {get_input: heat_api_network} - heat::api_cfn::workers: {get_input: heat_workers} - heat::engine::num_engine_workers: {get_input: heat_workers} - heat::database_connection: {get_input: heat_dsn} - heat::debug: {get_input: debug} - heat::db::mysql::password: {get_input: heat_password} - heat_enable_db_purge: {get_input: heat_enable_db_purge} - heat::keystone::domain::domain_password: {get_input: heat_stack_domain_admin_password} + heat::engine::auth_encryption_key: {get_input: heat_auth_encryption_key} + # Keystone keystone::admin_bind_host: {get_input: keystone_admin_api_network} keystone::public_bind_host: {get_input: keystone_public_api_network} keystone::wsgi::apache::bind_host: {get_input: keystone_public_api_network} keystone::wsgi::apache::admin_bind_host: {get_input: keystone_admin_api_network} + # MongoDB mongodb::server::bind_ip: {get_input: mongo_db_network} - mongodb::server::nojournal: {get_input: mongodb_no_journal} - mongodb::server::ipv6: {get_input: mongodb_ipv6} + # MySQL admin_password: {get_input: admin_password} enable_galera: {get_input: enable_galera} - enable_ceph_storage: {get_input: enable_ceph_storage} - enable_swift_storage: {get_input: enable_swift_storage} mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size} mysql_max_connections: {get_input: mysql_max_connections} mysql::server::root_password: {get_input: mysql_root_password} + mysql_clustercheck_password: {get_input: mysql_clustercheck_password} mysql_cluster_name: {get_input: mysql_cluster_name} mysql_bind_host: {get_input: mysql_network} mysql_virtual_ip: {get_input: mysql_virtual_ip} # Neutron neutron::bind_host: {get_input: neutron_api_network} - neutron::rabbit_password: {get_input: rabbit_password} - neutron::rabbit_user: {get_input: rabbit_username} - neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - neutron::rabbit_port: {get_input: rabbit_client_port} - neutron::debug: {get_input: debug} - neutron::server::auth_uri: {get_input: keystone_auth_uri} - neutron::server::identity_uri: {get_input: keystone_identity_uri} - neutron::server::database_connection: {get_input: neutron_dsn} - neutron::server::api_workers: {get_input: neutron_workers} - neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge} - neutron::network_device_mtu: {get_input: neutron_tenant_mtu} - neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} - neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} - neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} - neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} - neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret} neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network} - neutron::agents::metadata::metadata_workers: {get_input: neutron_workers} - neutron_agent_mode: {get_input: neutron_agent_mode} - neutron_router_distributed: {get_input: neutron_router_distributed} - neutron::core_plugin: {get_input: neutron_core_plugin} - neutron::service_plugins: {get_input: neutron_service_plugins} - neutron::enable_dhcp_agent: {get_input: neutron_enable_dhcp_agent} - neutron::enable_l3_agent: {get_input: neutron_enable_l3_agent} - neutron::enable_metadata_agent: {get_input: neutron_enable_metadata_agent} - neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent} - neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} - neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} - neutron::plugins::ml2::extension_drivers: {get_input: neutron_plugin_extensions} - neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} - neutron::server::l3_ha: {get_input: neutron_l3_ha} - neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network} - neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} - neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} - neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings} - neutron_public_interface: {get_input: neutron_public_interface} - neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} - neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route} - neutron_public_interface_tag: {get_input: neutron_public_interface_tag} - neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} - neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} - neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} - neutron::server::auth_password: {get_input: neutron_password} - neutron::agents::metadata::auth_password: {get_input: neutron_password} - neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options} - neutron_dsn: {get_input: neutron_dsn} - neutron::agents::metadata::auth_url: {get_input: keystone_identity_uri} - neutron::db::mysql::password: {get_input: neutron_password} neutron::keystone::auth::public_url: {get_input: neutron_public_url } neutron::keystone::auth::internal_url: {get_input: neutron_internal_url } neutron::keystone::auth::admin_url: {get_input: neutron_admin_url } neutron::keystone::auth::password: {get_input: neutron_password } neutron::keystone::auth::region: {get_input: keystone_region} - neutron::server::notifications::nova_url: {get_input: nova_internal_url} - neutron::server::notifications::auth_url: {get_input: neutron_auth_url} - neutron::server::notifications::tenant_name: 'service' - neutron::server::notifications::project_name: 'service' - neutron::server::notifications::password: {get_input: nova_password} # Ceilometer - ceilometer_backend: {get_input: ceilometer_backend} - ceilometer_mysql_conn_string: {get_input: ceilometer_dsn} - ceilometer::metering_secret: {get_input: ceilometer_metering_secret} - ceilometer::rabbit_userid: {get_input: rabbit_username} - ceilometer::rabbit_password: {get_input: rabbit_password} - ceilometer::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - ceilometer::rabbit_port: {get_input: rabbit_client_port} - ceilometer::debug: {get_input: debug} ceilometer::api::host: {get_input: ceilometer_api_network} - ceilometer::api::keystone_password: {get_input: ceilometer_password} - ceilometer::api::keystone_auth_uri: {get_input: keystone_auth_uri} - ceilometer::api::keystone_identity_uri: {get_input: keystone_identity_uri} - ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} - ceilometer::agent::auth::auth_url: {get_input: keystone_auth_uri} - ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url} - ceilometer::agent::notification::store_events: {get_input: ceilometer_store_events} - ceilometer::db::mysql::password: {get_input: ceilometer_password} - ceilometer::collector::meter_dispatcher: {get_input: ceilometer_meter_dispatcher} - ceilometer::dispatcher::gnocchi::url: {get_input: gnocchi_internal_url } - ceilometer::dispatcher::gnocchi::filter_project: 'service' - ceilometer::dispatcher::gnocchi::archive_policy: 'low' - ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml' snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} # Aodh + aodh_mysql_conn_string: {get_input: aodh_dsn} aodh::rabbit_userid: {get_input: rabbit_username} aodh::rabbit_password: {get_input: rabbit_password} aodh::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} @@ -1449,10 +833,16 @@ resources: aodh::api::keystone_password: {get_input: aodh_password} aodh::api::keystone_auth_uri: {get_input: keystone_auth_uri} aodh::api::keystone_identity_uri: {get_input: keystone_identity_uri} + aodh::auth::auth_url: {get_input: keystone_auth_uri} aodh::auth::auth_password: {get_input: aodh_password} aodh::db::mysql::password: {get_input: aodh_password} # for a migration path from ceilometer-alarm to aodh, we use the same database & coordination aodh::evaluator::coordination_url: {get_input: ceilometer_coordination_url} + aodh::keystone::auth::public_url: {get_input: aodh_public_url } + aodh::keystone::auth::internal_url: {get_input: aodh_internal_url } + aodh::keystone::auth::admin_url: {get_input: aodh_admin_url } + aodh::keystone::auth::password: {get_input: aodh_password } + aodh::keystone::auth::region: {get_input: keystone_region} # Gnocchi gnocchi_backend: {get_input: gnocchi_backend} @@ -1469,24 +859,20 @@ resources: gnocchi::db::mysql::password: {get_input: gnocchi_password} gnocchi::storage::swift::swift_authurl: {get_input: keystone_auth_uri} gnocchi::storage::swift::swift_key: {get_input: gnocchi_password} + gnocchi::keystone::auth::public_url: {get_input: gnocchi_public_url } + gnocchi::keystone::auth::internal_url: {get_input: gnocchi_internal_url } + gnocchi::keystone::auth::admin_url: {get_input: gnocchi_admin_url } + gnocchi::keystone::auth::password: {get_input: gnocchi_password } + gnocchi::keystone::auth::region: {get_input: keystone_region} # Nova - nova::rabbit_userid: {get_input: rabbit_username} - nova::rabbit_password: {get_input: rabbit_password} - nova::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - nova::rabbit_port: {get_input: rabbit_client_port} nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute} - nova::debug: {get_input: debug} nova::use_ipv6: {get_input: nova_ipv6} nova::api::auth_uri: {get_input: keystone_auth_uri} nova::api::identity_uri: {get_input: keystone_identity_uri} nova::api::api_bind_address: {get_input: nova_api_network} nova::api::metadata_listen: {get_input: nova_metadata_network} nova::api::admin_password: {get_input: nova_password} - nova::api::osapi_compute_workers: {get_input: nova_workers} - nova::api::ec2_workers: {get_input: nova_workers} - nova::api::metadata_workers: {get_input: nova_workers} - nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::database_connection: {get_input: nova_dsn} nova::api_database_connection: {get_input: nova_api_dsn} nova::glance_api_servers: {get_input: glance_api_servers} @@ -1499,6 +885,11 @@ resources: nova::db::mysql::password: {get_input: nova_password} nova::db::mysql_api::password: {get_input: nova_password} nova_enable_db_purge: {get_input: nova_enable_db_purge} + nova::keystone::auth::public_url: {get_input: nova_public_url} + nova::keystone::auth::internal_url: {get_input: nova_internal_url} + nova::keystone::auth::admin_url: {get_input: nova_admin_url} + nova::keystone::auth::password: {get_input: nova_password } + nova::keystone::auth::region: {get_input: keystone_region} # Horizon apache::mod::remoteip::proxy_ips: {get_input: horizon_subnet} @@ -1509,41 +900,11 @@ resources: horizon::bind_address: {get_input: horizon_network} horizon::keystone_url: {get_input: keystone_auth_uri} - # Sahara - sahara::host: {get_input: sahara_api_network} - sahara::plugins: - - cdh - - hdp - - mapr - - vanilla - - spark - - storm - sahara::admin_password: {get_input: sahara_password} - sahara::auth_uri: {get_input: keystone_auth_uri} - sahara::admin_user: sahara - sahara::identity_uri: {get_input: keystone_identity_uri} - sahara::use_neutron: true - sahara::database_connection: {get_input: sahara_dsn} - sahara::debug: {get_input: debug} - sahara::rpc_backend: rabbit - sahara::rabbit_userid: {get_input: rabbit_username} - sahara::rabbit_password: {get_input: rabbit_password} - sahara::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - sahara::rabbit_port: {get_input: rabbit_client_port} - sahara::db::mysql::password: {get_input: sahara_password} - - # Rabbit + # RabbitMQ rabbitmq::node_ip_address: {get_input: rabbitmq_network} rabbitmq::erlang_cookie: {get_input: rabbit_cookie} - rabbitmq::file_limit: {get_input: rabbit_fd_limit} - rabbitmq::default_user: {get_input: rabbit_username} - rabbitmq::default_pass: {get_input: rabbit_password} - rabbit_ipv6: {get_input: rabbit_ipv6} # Redis redis::bind: {get_input: redis_network} - redis::requirepass: {get_input: redis_password} - redis::masterauth: {get_input: redis_password} - redis::sentinel_auth_pass: {get_input: redis_password} redis_vip: {get_input: redis_vip} # Firewall tripleo::firewall::manage_firewall: {get_input: manage_firewall} @@ -1551,18 +912,17 @@ resources: # Misc memcached_ipv6: {get_input: memcached_ipv6} memcached::listen_ip: {get_input: memcached_network} - neutron_public_interface_ip: {get_input: neutron_public_interface_ip} - ntp::servers: {get_input: ntp_servers} - timezone::timezone: {get_input: timezone} control_virtual_interface: {get_input: control_virtual_interface} public_virtual_interface: {get_input: public_virtual_interface} - tripleo::loadbalancer::control_virtual_interface: {get_input: control_virtual_interface} - tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface} - tripleo::loadbalancer::haproxy_log_address: {get_input: haproxy_log_address} - tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]} - tripleo::loadbalancer::haproxy_stats_user: {get_input: haproxy_stats_user} - tripleo::loadbalancer::haproxy_stats_password: {get_input: haproxy_stats_password} - tripleo::loadbalancer::redis_password: {get_input: redis_password} + tripleo::keepalived::control_virtual_interface: {get_input: control_virtual_interface} + tripleo::keepalived::public_virtual_interface: {get_input: public_virtual_interface} + tripleo::haproxy::control_virtual_interface: {get_input: control_virtual_interface} + tripleo::haproxy::public_virtual_interface: {get_input: public_virtual_interface} + tripleo::haproxy::haproxy_log_address: {get_input: haproxy_log_address} + tripleo::haproxy::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]} + tripleo::haproxy::haproxy_stats_user: {get_input: haproxy_stats_user} + tripleo::haproxy::haproxy_stats_password: {get_input: haproxy_stats_password} + tripleo::haproxy::redis_password: {get_input: redis_password} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} @@ -1619,13 +979,6 @@ outputs: hostname: description: Hostname of the server value: {get_attr: [Controller, name]} - corosync_node: - description: > - Node object in the format {ip: ..., name: ...} format that the corosync - element expects - value: - ip: {get_attr: [Controller, networks, ctlplane, 0]} - name: {get_attr: [Controller, name]} hosts_entry: description: > Server's IP address and hostname in the /etc/hosts format @@ -1697,16 +1050,6 @@ outputs: template: "IP:11211" params: IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} - config_identifier: - description: identifier which changes if the controller configuration may need re-applying - value: - list_join: - - ',' - - - {get_attr: [ControllerDeployment, deploy_stdout]} - - {get_attr: [NodeTLSCAData, deploy_stdout]} - - {get_attr: [NodeTLSData, deploy_stdout]} - - {get_attr: [ControllerExtraConfigPre, deploy_stdout]} - - {get_param: UpdateIdentifier} tls_key_modulus_md5: description: MD5 checksum of the TLS Key Modulus value: {get_attr: [NodeTLSData, key_modulus_md5]} diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml index 26ce7138..aa5c3c43 100644 --- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml +++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml @@ -85,7 +85,7 @@ resources: tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} - tripleo::loadbalancer::midonet_api: true + tripleo::haproxy::midonet_api: true # Missed Neutron Puppet data neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' @@ -109,11 +109,3 @@ resources: properties: config: {get_resource: NetworkMidoNetConfig} servers: {get_param: compute_servers} - -outputs: - config_identifier: - value: - list_join: - - ' ' - - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]} - - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]} diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml index 71445800..e924fc87 100644 --- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml +++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml @@ -343,11 +343,3 @@ resources: input_values: ucsm_config: {get_param: NetworkUCSMHostList} actions: ['CREATE'] # Only do this on CREATE - -outputs: - # The Deployment applying the hieradata outputs the derived config-id, which - # changes if the input_values change, so if the stdouts from - # NetworkCiscoDeployment change, we need to reapply puppet (which will - # happen if we return a different config_identifier) - config_identifier: - value: {get_attr: [NetworkCiscoDeployment, deploy_stdouts]} diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml deleted file mode 100644 index 5942088c..00000000 --- a/puppet/extraconfig/ceph/ceph-external-config.yaml +++ /dev/null @@ -1,113 +0,0 @@ -heat_template_version: 2015-04-30 -description: 'Configure parameters for an external Ceph cluster via Puppet.' - -parameters: - ceph_storage_count: - default: 0 - type: number - description: Number of Ceph storage nodes. Used to enable/disable managed Ceph installation. - ceph_external_mon_ips: - default: '' - type: string - description: List of external Ceph Mon host IPs. - ceph_client_key: - default: '' - type: string - description: Ceph key used to create the 'openstack' user keyring. - ceph_fsid: - default: '' - type: string - # The following parameters are unused for external Ceph clusters and - # are here and exist for compatibility - ceph_admin_key: - default: '' - type: string - ceph_mon_key: - default: '' - type: string - ceph_mon_names: - type: comma_delimited_list - ceph_mon_ips: - type: comma_delimited_list - NovaRbdPoolName: - default: vms - type: string - CinderRbdPoolName: - default: volumes - type: string - GlanceRbdPoolName: - default: images - type: string - GnocchiRbdPoolName: - default: metrics - type: string - CephClientUserName: - default: openstack - type: string - CephIPv6: - default: False - type: boolean - -resources: - CephClusterConfigImpl: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - ceph_cluster: - mapped_data: - ceph_storage_count: {get_param: ceph_storage_count} - enable_external_ceph: true - ceph_ipv6: {get_param: CephIPv6} - ceph_mon_host: {get_param: ceph_external_mon_ips} - ceph_mon_host_v6: {get_param: ceph_external_mon_ips} - ceph::profile::params::fsid: {get_param: ceph_fsid} - ceph::profile::params::client_keys: - str_replace: - template: "{ - client.CLIENT_USER: { - secret: 'CLIENT_KEY', - mode: '0644', - cap_mon: 'allow r', - cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' - } - }" - params: - CLIENT_USER: {get_param: CephClientUserName} - CLIENT_KEY: {get_param: ceph_client_key} - NOVA_POOL: {get_param: NovaRbdPoolName} - CINDER_POOL: {get_param: CinderRbdPoolName} - GLANCE_POOL: {get_param: GlanceRbdPoolName} - GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} - ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6} - nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} - cinder_rbd_pool_name: {get_param: CinderRbdPoolName} - glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} - gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName} - gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName} - nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} - glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} - nova::compute::rbd::rbd_keyring: - list_join: - - '.' - - - 'client' - - {get_param: CephClientUserName} - gnocchi::storage::ceph::ceph_keyring: - list_join: - - '.' - - - 'client' - - {get_param: CephClientUserName} - ceph_client_user_name: {get_param: CephClientUserName} - ceph_pools: - - {get_param: CinderRbdPoolName} - - {get_param: NovaRbdPoolName} - - {get_param: GlanceRbdPoolName} - - {get_param: GnocchiRbdPoolName} - -outputs: - config_id: - description: The ID of the CephClusterConfigImpl resource. - value: - {get_resource: CephClusterConfigImpl} diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml index 9b6981bb..9423208e 100644 --- a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml +++ b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml @@ -51,7 +51,7 @@ resources: datafiles: cinder_dellsc_data: mapped_data: - cinder_enable_dellsc_backend: {get_input: EnableDellScBackend} + tripleo::profile::base::cinder::volume::cinder_enable_dellsc_backend: {get_input: EnableDellScBackend} cinder::backend::dellsc_iscsi::volume_backend_name: {get_input: DellScBackendName} cinder::backend::dellsc_iscsi::san_ip: {get_input: DellScSanIp} cinder::backend::dellsc_iscsi::san_login: {get_input: DellScSanLogin} diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml index 36db334e..c7af6f22 100644 --- a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml +++ b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml @@ -50,7 +50,7 @@ resources: datafiles: cinder_eqlx_data: mapped_data: - cinder_enable_eqlx_backend: {get_input: EnableEqlxBackend} + tripleo::profile::base::cinder::volume::cinder_enable_eqlx_backend: {get_input: EnableEqlxBackend} cinder::backend::eqlx::volume_backend_name: {get_input: EqlxBackendName} cinder::backend::eqlx::san_ip: {get_input: EqlxSanIp} cinder::backend::eqlx::san_login: {get_input: EqlxSanLogin} diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml index ab442f2b..6ff90881 100644 --- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml +++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml @@ -87,7 +87,7 @@ resources: datafiles: cinder_netapp_data: mapped_data: - cinder_enable_netapp_backend: {get_input: EnableNetappBackend} + tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend} cinder::backend::netapp::title: {get_input: NetappBackendName} cinder::backend::netapp::netapp_login: {get_input: NetappLogin} cinder::backend::netapp::netapp_password: {get_input: NetappPassword} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml deleted file mode 100644 index a4cfea07..00000000 --- a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml +++ /dev/null @@ -1,91 +0,0 @@ -heat_template_version: 2015-04-30 - -description: Configure hieradata for Nuage configuration on the Controller - -parameters: - server: - description: ID of the controller node to apply this config to - type: string - - # Config specific parameters, to be provided via parameter_defaults - NeutronNuageOSControllerIp: - description: IP address of the OpenStack Controller - type: string - - NeutronNuageNetPartitionName: - description: Specifies the title that you will see on the VSD - type: string - default: 'default_name' - - NeutronNuageVSDIp: - description: IP address and port of the Virtual Services Directory - type: string - - NeutronNuageVSDUsername: - description: Username to be used to log into VSD - type: string - - NeutronNuageVSDPassword: - description: Password to be used to log into VSD - type: string - - NeutronNuageVSDOrganization: - description: Organization parameter required to log into VSD - type: string - default: 'organization' - - NeutronNuageBaseURIVersion: - description: URI version to be used based on the VSD release - type: string - default: 'default_uri_version' - - NeutronNuageCMSId: - description: Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD - type: string - - UseForwardedFor: - description: Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. - type: boolean - default: false - -resources: - NeutronNuageConfig: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - neutron_nuage_data: - mapped_data: - neutron::plugins::nuage::nuage_oscontroller_ip: {get_input: NuageOSControllerIp} - neutron::plugins::nuage::nuage_net_partition_name: {get_input: NuageNetPartitionName} - neutron::plugins::nuage::nuage_vsd_ip: {get_input: NuageVSDIp} - neutron::plugins::nuage::nuage_vsd_username: {get_input: NuageVSDUsername} - neutron::plugins::nuage::nuage_vsd_password: {get_input: NuageVSDPassword} - neutron::plugins::nuage::nuage_vsd_organization: {get_input: NuageVSDOrganization} - neutron::plugins::nuage::nuage_base_uri_version: {get_input: NuageBaseURIVersion} - neutron::plugins::nuage::nuage_cms_id: {get_input: NuageCMSId} - nova::api::use_forwarded_for: {get_input: NovaUseForwardedFor} - - NeutronNuageDeployment: - type: OS::Heat::StructuredDeployment - properties: - name: NeutronNuageDeployment - config: {get_resource: NeutronNuageConfig} - server: {get_param: server} - input_values: - NuageOSControllerIp: {get_param: NeutronNuageOSControllerIp} - NuageNetPartitionName: {get_param: NeutronNuageNetPartitionName} - NuageVSDIp: {get_param: NeutronNuageVSDIp} - NuageVSDUsername: {get_param: NeutronNuageVSDUsername} - NuageVSDPassword: {get_param: NeutronNuageVSDPassword} - NuageVSDOrganization: {get_param: NeutronNuageVSDOrganization} - NuageBaseURIVersion: {get_param: NeutronNuageBaseURIVersion} - NuageCMSId: {get_param: NeutronNuageCMSId} - NovaUseForwardedFor: {get_param: UseForwardedFor} - -outputs: - deploy_stdout: - description: Deployment reference, used to trigger puppet apply on changes - value: {get_attr: [NeutronNuageDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml deleted file mode 100644 index 5c686fe7..00000000 --- a/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml +++ /dev/null @@ -1,62 +0,0 @@ -heat_template_version: 2015-04-30 - -description: Controller hieradata for Neutron OpenContrail configuration - -parameters: - server: - description: ID of the controller node to apply this config to - type: string - ContrailApiServerIp: - description: IP address of the OpenContrail API server - type: string - ContrailApiServerPort: - description: Port of the OpenContrail API - type: string - default: 8082 - ContrailMultiTenancy: - description: Whether to enable multi tenancy - type: boolean - default: false - ContrailExtensions: - description: List of OpenContrail extensions to be enabled - type: comma_delimited_list - default: '' - -resources: - ControllerContrailConfig: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - neutron_opencontrail_data: - mapped_data: - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions - - neutron::plugins::opencontrail::api_server_ip: {get_input: contrail_api_server_ip} - neutron::plugins::opencontrail::api_server_port: {get_input: contrail_api_server_port} - neutron::plugins::opencontrail::multi_tenancy: {get_input: contrail_multi_tenancy} - neutron::plugins::opencontrail::contrail_extensions: {get_input: contrail_extensions} - neutron::plugins::opencontrail::keystone_auth_url: '"%{hiera(''keystone_auth_uri'')}"' - neutron::plugins::opencontrail::keystone_admin_user: admin - neutron::plugins::opencontrail::keystone_admin_tenant_name: admin - neutron::plugins::opencontrail::keystone_admin_password: '"%{hiera(''admin_password'')}"' - neutron::plugins::opencontrail::keystone_admin_token: '"%{hiera(''keystone::admin_token'')}"' - - ControllerContrailDeployment: - type: OS::Heat::StructuredDeployment - properties: - config: {get_resource: ControllerContrailConfig} - server: {get_param: server} - input_values: - contrail_api_server_ip: {get_param: ContrailApiServerIp} - contrail_api_server_port: {get_param: ContrailApiServerPort} - contrail_multi_tenancy: {get_param: ContrailMultiTenancy} - contrail_extensions: {get_param: ContrailExtensions} - - -outputs: - deploy_stdout: - description: Output of the extra hiera data deployment - value: {get_attr: [ControllerContrailDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml deleted file mode 100755 index 7c0a7ad2..00000000 --- a/puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml +++ /dev/null @@ -1,113 +0,0 @@ -heat_template_version: 2015-04-30 - -description: Controller hieradata for Neutron PLUMgrid configuration - -parameters: - server: - description: ID of the controller node to apply this config to - type: string - PLUMgridDirectorServer: - description: IP address of the PLUMgrid Director Server - type: string - default: 127.0.0.1 - PLUMgridDirectorServerPort: - description: Port of the PLUMgrid Director Server - type: string - default: 443 - PLUMgridUsername: - description: Username for PLUMgrid platform - type: string - PLUMgridPassword: - description: Password for PLUMgrid platform - type: string - hidden: true - PLUMgridServerTimeOut: - description: Request timeout duration (seconds) to PLUMgrid platform - type: string - default: 99 - PLUMgridNovaMetadataIP: - description: IP address of Nova Metadata - type: string - default: 169.254.169.254 - PLUMgridNovaMetadataPort: - description: Port of Nova Metadata - type: string - default: 8775 - PLUMgridL2GatewayVendor: - description: Vendor for L2 Gateway Switch - type: string - default: vendor - PLUMgridL2GatewayUsername: - description: Username for L2 Gateway Switch - type: string - default: username - PLUMgridL2GatewayPassword: - description: Password for L2 Gateway Switch - type: string - hidden: true - PLUMgridIdentityVersion: - description: Keystone Identity version - type: string - default: v2.0 - PLUMgridConnectorType: - description: Neutron Network Connector Type - type: string - default: distributed - PLUMgridNeutronPluginVersion: - description: PLUMgrid Neutron Plugin version - type: string - default: present - PLUMgridPlumlibVersion: - description: PLUMgrid Plumlib version - type: string - default: present - - -resources: - ControllerPLUMgridConfig: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - neutron_plumgrid_data: - mapped_data: - neutron::plugins::plumgrid::director_server: {get_input: plumgrid_director_server} - neutron::plugins::plumgrid::director_server_port: {get_input: plumgrid_director_server_port} - neutron::plugins::plumgrid::username: {get_input: plumgrid_username} - neutron::plugins::plumgrid::password: {get_input: plumgrid_password} - neutron::plugins::plumgrid::nova_metadata_ip: {get_input: plumgrid_nova_metadata_ip} - neutron::plugins::plumgrid::nova_metadata_port: {get_input: plumgrid_nova_metadata_port} - neutron::plugins::plumgrid::l2gateway_vendor: {get_input: plumgrid_l2gateway_vendor} - neutron::plugins::plumgrid::l2gateway_sw_username: {get_input: plumgrid_l2gateway_sw_username} - neutron::plugins::plumgrid::l2gateway_sw_password: {get_input: plumgrid_l2gateway_sw_password} - neutron::plugins::plumgrid::connector_type: {get_input: plumgrid_connector_type} - neutron::plugins::plumgrid::identity_version: {get_input: plumgrid_identity_version} - neutron::plugins::plumgrid::package_ensure: {get_input: plumgrid_neutron_plugin_version} - neutron::plugins::plumgrid::plumlib_package_ensure: {get_input: plumgrid_plumlib_version} - - ControllerPLUMgridDeployment: - type: OS::Heat::StructuredDeployment - properties: - config: {get_resource: ControllerPLUMgridConfig} - server: {get_param: server} - input_values: - plumgrid_director_server: {get_param: PLUMgridDirectorServer} - plumgrid_director_server_port: {get_param: PLUMgridDirectorServerPort} - plumgrid_username: {get_param: PLUMgridUsername} - plumgrid_password: {get_param: PLUMgridPassword} - plumgrid_nova_metadata_ip: {get_param: PLUMgridNovaMetadataIP} - plumgrid_nova_metadata_port: {get_param: PLUMgridNovaMetadataPort} - plumgrid_l2gateway_vendor: {get_param: PLUMgridL2GatewayVendor} - plumgrid_l2gateway_sw_username: {get_param: PLUMgridL2GatewayUsername} - plumgrid_l2gateway_sw_password: {get_param: PLUMgridL2GatewayPassword} - plumgrid_identity_version: {get_param: PLUMgridIdentityVersion} - plumgrid_connector_type: {get_param: PLUMgridConnectorType} - plumgrid_neutron_plugin_version: {get_param: PLUMgridNeutronPluginVersion} - plumgrid_plumlib_version: {get_param: PLUMgridPlumlibVersion} - -outputs: - deploy_stdout: - description: Deployment reference, used to trigger puppet apply on changes - value: {get_attr: [ControllerPLUMgridDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/tls/no-ca.yaml b/puppet/extraconfig/tls/no-ca.yaml deleted file mode 100644 index 5862a85c..00000000 --- a/puppet/extraconfig/tls/no-ca.yaml +++ /dev/null @@ -1,17 +0,0 @@ -heat_template_version: 2015-04-30 - -description: > - This is a default no-op template which can be passed to the - OS::Nova::Server resources. This template can be replaced with - a different implementation via the resource registry, such that - deployers may customize their configuration. - -parameters: - server: # Here for compatibility with controller.yaml - description: ID of the controller node to apply this config to - type: string - -outputs: - deploy_stdout: - description: Deployment reference, used to trigger puppet apply on changes - value: 'Root CA cert injection not enabled.' diff --git a/puppet/extraconfig/tls/no-tls.yaml b/puppet/extraconfig/tls/no-tls.yaml deleted file mode 100644 index a2b5c569..00000000 --- a/puppet/extraconfig/tls/no-tls.yaml +++ /dev/null @@ -1,34 +0,0 @@ -heat_template_version: 2015-04-30 - -description: > - This is a default no-op template. This defines the parameters that - need to be passed in order to have TLS enabled in the controller - nodes. This template can be replaced with a different - implementation via the resource registry, such that deployers - may customize their configuration. - -parameters: - DeployedSSLCertificatePath: - default: '' - description: > - The filepath of the certificate as it will be stored in the controller. - type: string - NodeIndex: # Here for compatibility with puppet/controller.yaml - default: 0 - type: number - server: # Here for compatibility with puppet/controller.yaml - description: ID of the controller node to apply this config to - type: string - -outputs: - deploy_stdout: - description: Deployment reference, used to trigger puppet apply on changes - value: 'TLS not enabled.' - deployed_ssl_certificate_path: - value: '' - key_modulus_md5: - description: Key SSL Modulus - value: '' - cert_modulus_md5: - description: Certificate SSL Modulus - value: '' diff --git a/puppet/hieradata/ceph.yaml b/puppet/hieradata/ceph.yaml index 1e480e60..ccb41cc4 100644 --- a/puppet/hieradata/ceph.yaml +++ b/puppet/hieradata/ceph.yaml @@ -1,4 +1,3 @@ -ceph::profile::params::osd_journal_size: 1024 ceph::profile::params::osd_pool_default_pg_num: 32 ceph::profile::params::osd_pool_default_pgp_num: 32 ceph::profile::params::osd_pool_default_size: 3 @@ -8,5 +7,3 @@ ceph::profile::params::manage_repo: false ceph::profile::params::authentication_type: cephx ceph_classes: [] - -ceph_osd_selinux_permissive: true diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index 34965959..65cf9577 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -17,6 +17,7 @@ nova::network::neutron::neutron_username: 'neutron' nova::network::neutron::dhcp_domain: '' neutron::allow_overlapping_ips: true +neutron::server::project_name: 'service' kernel_modules: nf_conntrack: {} diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index 865210c9..62728332 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -1,16 +1,15 @@ # Hiera data here applies to all compute nodes +nova::host: "%{::fqdn}" nova::notify_on_state_change: 'vm_and_task_state' nova::notification_driver: messagingv2 -nova::compute::enabled: true nova::compute::instance_usage_audit: true nova::compute::instance_usage_audit_period: 'hour' -nova::compute::vnc_enabled: true - -nova::compute::libvirt::migration_support: true nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" +nova::network::neutron::neutron_auth_type: 'v3password' + # Changing the default from 512MB. The current templates can not deploy # overclouds with swap. On an idle compute node, we see ~1024MB of RAM # used. 2048 is suggested to account for other possible operations for @@ -20,4 +19,6 @@ nova::compute::reserved_host_memory: 2048 ceilometer::agent::auth::auth_tenant_name: 'service' ceilometer::agent::auth::auth_endpoint_type: 'internalURL' +neutron::host: "%{::fqdn}" + compute_classes: [] diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 9316cf17..66613f0f 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -1,13 +1,10 @@ # Hiera data here applies to all controller nodes nova::api::enabled: true -nova::conductor::enabled: true -nova::consoleauth::enabled: true nova::vncproxy::enabled: true -nova::scheduler::enabled: true # gnocchi -gnocchi::db::sync::extra_opts: '--skip-storage' +gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types' gnocchi::storage::swift::swift_user: 'service:gnocchi' gnocchi::storage::swift::swift_auth_version: 2 gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26' @@ -31,6 +28,7 @@ rabbitmq_kernel_variables: rabbitmq_config_variables: tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]' cluster_partition_handling: 'pause_minority' + loopback_users: '[]' mongodb::server::replset: tripleo mongodb::server::journal: false @@ -50,13 +48,22 @@ glance::registry::keystone_tenant: 'service' neutron::server::auth_tenant: 'service' neutron::agents::metadata::auth_tenant: 'service' neutron::agents::l3::router_delete_namespaces: True -neutron::agents::dhcp::dhcp_delete_namespaces: True cinder::api::keystone_tenant: 'service' swift::proxy::authtoken::admin_tenant_name: 'service' ceilometer::api::keystone_tenant: 'service' gnocchi::api::keystone_tenant: 'service' heat::keystone_tenant: 'service' sahara::admin_tenant_name: 'service' +aodh::keystone::auth::tenant: 'service' +ceilometer::keystone::auth::tenant: 'service' +cinder::keystone::auth::tenant: 'service' +glance::keystone::auth::tenant: 'service' +gnocchi::keystone::auth::tenant: 'service' +heat::keystone::auth::tenant: 'service' +neutron::keystone::auth::tenant: 'service' +nova::keystone::auth::tenant: 'service' +sahara::keystone::auth::tenant: 'service' +swift::keystone::auth::tenant: 'service' # keystone keystone::cron::token_flush::maxdelay: 3600 @@ -77,6 +84,7 @@ swift::proxy::pipeline: - 'healthcheck' - 'cache' - 'ratelimit' + - 'bulk' - 'tempurl' - 'formpost' - 'authtoken' @@ -86,6 +94,10 @@ swift::proxy::pipeline: - 'proxy-server' swift::proxy::account_autocreate: true +swift::keystone::auth::configure_s3_endpoint: false +swift::keystone::auth::operator_roles: + - admin + - swiftoperator # glance glance::api::pipeline: 'keystone' @@ -96,13 +108,12 @@ glance_file_pcmk_directory: '/var/lib/glance/images' # neutron neutron::server::sync_db: true -neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf # nova nova::notify_on_state_change: 'vm_and_task_state' nova::api::default_floating_pool: 'public' nova::api::sync_db_api: true -nova::scheduler::filter::ram_allocation_ratio: '1.0' +nova::api::enable_proxy_headers_parsing: true nova::cron::archive_deleted_rows::hour: '*/12' nova::cron::archive_deleted_rows::destination: '/dev/null' nova::notification_driver: messaging @@ -114,7 +125,10 @@ ceilometer::agent::auth::auth_endpoint_type: 'internalURL' cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler cinder::cron::db_purge::destination: '/dev/null' cinder::host: hostgroup -cinder_user_enabled_backends: [] + +# TODO(jaosorior): Move to cinder profile once cinder is moved as a composable +# service. +cinder::api::enable_proxy_headers_parsing: true # heat heat::engine::configure_delegated_roles: false @@ -127,6 +141,7 @@ heat::cron::purge_deleted::destination: '/dev/null' heat::keystone::domain::domain_name: 'heat_stack' heat::keystone::domain::domain_admin: 'heat_stack_domain_admin' heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost' +heat::auth_plugin: 'password' # pacemaker pacemaker::corosync::cluster_name: 'tripleo_cluster' @@ -147,39 +162,38 @@ horizon::vhost_extra_params: mysql::server::manage_config_file: true -tripleo::loadbalancer::keystone_admin: true -tripleo::loadbalancer::keystone_public: true -tripleo::loadbalancer::neutron: true -tripleo::loadbalancer::cinder: true -tripleo::loadbalancer::glance_api: true -tripleo::loadbalancer::glance_registry: true -tripleo::loadbalancer::nova_ec2: true -tripleo::loadbalancer::nova_osapi: true -tripleo::loadbalancer::nova_metadata: true -tripleo::loadbalancer::nova_novncproxy: true -tripleo::loadbalancer::mysql: true -tripleo::loadbalancer::redis: true -tripleo::loadbalancer::sahara: true -tripleo::loadbalancer::swift_proxy_server: true -tripleo::loadbalancer::ceilometer: true -tripleo::loadbalancer::aodh: true -tripleo::loadbalancer::gnocchi: true -tripleo::loadbalancer::heat_api: true -tripleo::loadbalancer::heat_cloudwatch: true -tripleo::loadbalancer::heat_cfn: true -tripleo::loadbalancer::horizon: true +tripleo::haproxy::keystone_admin: true +tripleo::haproxy::keystone_public: true +tripleo::haproxy::neutron: true +tripleo::haproxy::cinder: true +tripleo::haproxy::glance_api: true +tripleo::haproxy::glance_registry: true +tripleo::haproxy::nova_osapi: true +tripleo::haproxy::nova_metadata: true +tripleo::haproxy::nova_novncproxy: true +tripleo::haproxy::mysql: true +tripleo::haproxy::redis: true +tripleo::haproxy::sahara: true +tripleo::haproxy::swift_proxy_server: true +tripleo::haproxy::ceilometer: true +tripleo::haproxy::aodh: true +tripleo::haproxy::gnocchi: true +tripleo::haproxy::heat_api: true +tripleo::haproxy::heat_cloudwatch: true +tripleo::haproxy::heat_cfn: true +tripleo::haproxy::horizon: true controller_classes: [] # firewall tripleo::firewall::firewall_rules: '101 mongodb_config': - port: 27019 + dport: 27019 '102 mongodb_sharding': - port: 27018 + dport: 27018 '103 mongod': - port: 27017 + dport: 27017 '104 mysql galera': - port: + dport: - 873 - 3306 - 4444 @@ -187,37 +201,38 @@ tripleo::firewall::firewall_rules: - 4568 - 9200 '105 ntp': - port: 123 + dport: 123 proto: udp '106 vrrp': proto: vrrp '107 haproxy stats': - port: 1993 + dport: 1993 '108 redis': - port: + dport: - 6379 - 26379 '109 rabbitmq': - port: + dport: + - 4369 - 5672 - 35672 '110 ceph': - port: + dport: - 6789 - '6800-6810' '111 keystone': - port: + dport: - 5000 - 13000 - 35357 - 13357 '112 glance': - port: + dport: - 9292 - 9191 - 13292 '113 nova': - port: + dport: - 6080 - 13080 - 8773 @@ -226,43 +241,43 @@ tripleo::firewall::firewall_rules: - 13774 - 8775 '114 neutron server': - port: + dport: - 9696 - 13696 '115 neutron dhcp input': proto: 'udp' - port: 67 + dport: 67 '116 neutron dhcp output': proto: 'udp' chain: 'OUTPUT' - port: 68 + dport: 68 '118 neutron vxlan networks': proto: 'udp' - port: 4789 + dport: 4789 '119 cinder': - port: + dport: - 8776 - 13776 '120 iscsi initiator': - port: 3260 + dport: 3260 '121 memcached': - port: 11211 + dport: 11211 '122 swift proxy': - port: + dport: - 8080 - 13808 '123 swift storage': - port: + dport: - 873 - 6000 - 6001 - 6002 '124 ceilometer': - port: + dport: - 8777 - 13777 '125 heat': - port: + dport: - 8000 - 13800 - 8003 @@ -270,17 +285,30 @@ tripleo::firewall::firewall_rules: - 8004 - 13004 '126 horizon': - port: + dport: - 80 - 443 '127 snmp': - port: 161 + dport: 161 proto: 'udp' '128 aodh': - port: + dport: - 8042 - 13042 '129 gnocchi-api': - port: + dport: - 8041 - 13041 + '130 pacemaker tcp': + proto: 'tcp' + dport: + - 2224 + - 3121 + - 21064 + '131 pacemaker udp': + proto: 'udp' + dport: 5405 + '132 sahara': + dport: + - 8386 + - 13386 diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml index 4eb199c8..9b2ea4f4 100644 --- a/puppet/hieradata/database.yaml +++ b/puppet/hieradata/database.yaml @@ -13,46 +13,6 @@ nova::db::mysql_api::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" -# Glance -glance::db::mysql::user: glance -glance::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -glance::db::mysql::dbname: glance -glance::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Keystone -keystone::db::mysql::user: keystone -keystone::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -keystone::db::mysql::dbname: keystone -keystone::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Neutron -neutron::db::mysql::user: neutron -neutron::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -neutron::db::mysql::dbname: ovs_neutron -neutron::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Cinder -cinder::db::mysql::user: cinder -cinder::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -cinder::db::mysql::dbname: cinder -cinder::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Heat -heat::db::mysql::user: heat -heat::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -heat::db::mysql::dbname: heat -heat::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - # Ceilometer ceilometer::db::mysql::user: ceilometer ceilometer::db::mysql::host: "%{hiera('mysql_virtual_ip')}" @@ -69,9 +29,10 @@ gnocchi::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" -sahara::db::mysql::user: sahara -sahara::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -sahara::db::mysql::dbname: sahara -sahara::db::mysql::allowed_hosts: +# Aodh +aodh::db::mysql::user: aodh +aodh::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +aodh::db::mysql::dbname: aodh +aodh::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index fd7faff1..152694d9 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -16,41 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), {}) -create_resources(sysctl::value, hiera('sysctl_settings'), {}) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} - -include ::timezone - -if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], - } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] -} - -if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') -} else { - $mon_host = hiera('ceph_mon_host') -} -class { '::ceph::profile::params': - mon_host => $mon_host, +if hiera('step') >= 4 { + hiera_include('ceph_classes') } -include ::ceph::conf -include ::ceph::profile::client -include ::ceph::profile::osd -hiera_include('ceph_classes') -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present} +$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_ceph', hiera('step')]) +package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index cc58cb14..b25d62f8 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -16,201 +16,117 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), {}) -create_resources(sysctl::value, hiera('sysctl_settings'), {}) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> +if hiera('step') >= 4 { -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} - -include ::timezone - -file { ['/etc/libvirt/qemu/networks/autostart/default.xml', - '/etc/libvirt/qemu/networks/default.xml']: - ensure => absent, - before => Service['libvirt'], -} -# in case libvirt has been already running before the Puppet run, make -# sure the default network is destroyed -exec { 'libvirt-default-net-destroy': - command => '/usr/bin/virsh net-destroy default', - onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', - before => Service['libvirt'], -} - -# When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique -exec { 'reset-iscsi-initiator-name': - command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi', - onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset', -}-> - -file { '/etc/iscsi/.initiator_reset': - ensure => present, -} + # When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique + exec { 'reset-iscsi-initiator-name': + command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi', + onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset', + }-> -include ::nova -include ::nova::config -include ::nova::compute - -$rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) -$rbd_persistent_storage = hiera('rbd_persistent_storage', false) -if $rbd_ephemeral_storage or $rbd_persistent_storage { - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') + file { '/etc/iscsi/.initiator_reset': + ensure => present, } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client - $client_keys = hiera('ceph::profile::params::client_keys') - $client_user = join(['client.', hiera('ceph_client_user_name')]) - class { '::nova::compute::rbd': - libvirt_rbd_secret_key => $client_keys[$client_user]['secret'], + nova_config { + 'DEFAULT/my_ip': value => $ipaddress; + 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; } -} -if hiera('cinder_enable_nfs_backend', false) { - if str2bool($::selinux) { - selboolean { 'virt_use_nfs': - value => on, - persistent => true, - } -> Package['nfs-utils'] + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + file { '/etc/libvirt/qemu.conf': + ensure => present, + content => hiera('midonet_libvirt_qemu_data') + } } - package {'nfs-utils': } -> Service['nova-compute'] -} - -if str2bool(hiera('nova::use_ipv6', false)) { - $vncserver_listen = '::0' -} else { - $vncserver_listen = '0.0.0.0' -} -class { '::nova::compute::libvirt' : - vncserver_listen => $vncserver_listen, -} - -nova_config { - 'DEFAULT/my_ip': value => $ipaddress; - 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; - 'DEFAULT/host': value => $fqdn; - # TUNNELLED mode provides a security enhancement when using shared storage but is not - # supported when not using shared storage. - # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12 - # In future versions of QEMU (2.6, mostly), Dan's native encryption - # work will obsolete the need to use TUNNELLED transport mode. - 'libvirt/live_migration_tunnelled': value => $rbd_ephemeral_storage; -} - -if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - file {'/etc/libvirt/qemu.conf': - ensure => present, - content => hiera('midonet_libvirt_qemu_data') - } -} -include ::nova::network::neutron -include ::neutron -include ::neutron::config - -# If the value of core plugin is set to 'nuage', -# include nuage agent, -# If the value of core plugin is set to 'midonet', -# include midonet agent, -# else use the default value of 'ml2' -if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::nuage::vrs - include ::nova::compute::neutron - - class { '::nuage::metadataagent': - nova_os_tenant_name => hiera('nova::api::admin_tenant_name'), - nova_os_password => hiera('nova_password'), - nova_metadata_ip => hiera('nova_metadata_node_ips'), - nova_auth_ip => hiera('keystone_public_api_virtual_ip'), + include ::neutron + include ::neutron::config + + # If the value of core plugin is set to 'nuage', + # include nuage agent, + # If the value of core plugin is set to 'midonet', + # include midonet agent, + # else use the default value of 'ml2' + if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { + include ::nuage::vrs + include ::nova::compute::neutron + + class { '::nuage::metadataagent': + nova_os_tenant_name => hiera('nova::api::admin_tenant_name'), + nova_os_password => hiera('nova_password'), + nova_metadata_ip => hiera('nova_metadata_node_ips'), + nova_auth_ip => hiera('keystone_public_api_virtual_ip'), + } } -} -elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips + class { '::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips + } } -} -elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { - - include ::contrail::vrouter - # NOTE: it's not possible to use this class without a functional - # contrail controller up and running - #class {'::contrail::vrouter::provision_vrouter': - # require => Class['contrail::vrouter'], - #} -} -elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { - # forward all ipv4 traffic - # this is required for the vms to pass through the gateways public interface - sysctl::value { 'net.ipv4.ip_forward': value => '1' } - - # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on - file { '/etc/sudoers.d/ifc_ctl_sudoers': - ensure => file, - owner => root, - group => root, - mode => '0440', - content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", + elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { + + include ::contrail::vrouter + # NOTE: it's not possible to use this class without a functional + # contrail controller up and running + #class {'::contrail::vrouter::provision_vrouter': + # require => Class['contrail::vrouter'], + #} } -} -else { - - # NOTE: this code won't live in puppet-neutron until Neutron OVS agent - # can be gracefully restarted. See https://review.openstack.org/#/c/297211 - # In the meantime, it's safe to restart the agent on each change in neutron.conf, - # because Puppet changes are supposed to be done during bootstrap and upgrades. - # Some resource managed by Neutron_config (like messaging and logging options) require - # a restart of OVS agent. This code does it. - # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code - # from here and fix it in puppet-neutron. - Neutron_config<||> ~> Service['neutron-ovs-agent-service'] - - include ::neutron::plugins::ml2 - include ::neutron::agents::ml2::ovs - - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { + # forward all ipv4 traffic + # this is required for the vms to pass through the gateways public interface + sysctl::value { 'net.ipv4.ip_forward': value => '1' } + + # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on + file { '/etc/sudoers.d/ifc_ctl_sudoers': + ensure => file, + owner => root, + group => root, + mode => '0440', + content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", } } + else { + + # NOTE: this code won't live in puppet-neutron until Neutron OVS agent + # can be gracefully restarted. See https://review.openstack.org/#/c/297211 + # In the meantime, it's safe to restart the agent on each change in neutron.conf, + # because Puppet changes are supposed to be done during bootstrap and upgrades. + # Some resource managed by Neutron_config (like messaging and logging options) require + # a restart of OVS agent. This code does it. + # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code + # from here and fix it in puppet-neutron. + Neutron_config<||> ~> Service['neutron-ovs-agent-service'] + + include ::neutron::plugins::ml2 + include ::neutron::agents::ml2::ovs + + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), + } + } - if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::agents::bigswitch + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::agents::bigswitch + } } -} - -neutron_config { - 'DEFAULT/host': value => $fqdn; -} -include ::ceilometer -include ::ceilometer::config -include ::ceilometer::agent::compute -include ::ceilometer::agent::auth + include ::ceilometer + include ::ceilometer::config + include ::ceilometer::agent::compute + include ::ceilometer::agent::auth -$snmpd_user = hiera('snmpd_readonly_user_name') -snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), -} -class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + hiera_include('compute_classes') } -hiera_include('compute_classes') -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_compute': ensure => present} +$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_compute', hiera('step')]) +package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 910617fa..4f15bb72 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -18,77 +18,7 @@ include ::tripleo::firewall $enable_load_balancer = hiera('enable_load_balancer', true) -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - - $controller_node_ips = split(hiera('controller_node_ips'), ',') - - if $enable_load_balancer { - class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - manage_vip => true, - } - } - -} - if hiera('step') >= 2 { - - if count(hiera('ntp::servers')) > 0 { - include ::ntp - } - - include ::timezone - - # MongoDB - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - include ::mongodb::globals - include ::mongodb::client - include ::mongodb::server - # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and - # without the brackets as 'members' argument for the 'mongodb_replset' - # resource. - if str2bool(hiera('mongodb::server::ipv6', false)) { - $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[') - $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } else { - $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } - $mongo_node_string = join($mongo_node_ips_with_port, ',') - - $mongodb_replset = hiera('mongodb::server::replset') - $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - if downcase(hiera('bootstrap_nodeid')) == $::hostname { - mongodb_replset { $mongodb_replset : - members => $mongo_node_ips_with_port_nobr, - } - } - } - - # Redis - $redis_node_ips = hiera('redis_node_ips') - $redis_master_hostname = downcase(hiera('bootstrap_nodeid')) - - if $redis_master_hostname == $::hostname { - $slaveof = undef - } else { - $slaveof = "${redis_master_hostname} 6379" - } - class {'::redis' : - slaveof => $slaveof, - } - - if count($redis_node_ips) > 1 { - Class['::tripleo::redis_notification'] -> Service['redis-sentinel'] - include ::redis::sentinel - include ::tripleo::redis_notification - } - if str2bool(hiera('enable_galera', true)) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' } else { @@ -113,103 +43,10 @@ if hiera('step') >= 2 { # FIXME: this should only occur on the bootstrap host (ditto for db syncs) # Create all the database schemas - include ::keystone::db::mysql - include ::glance::db::mysql - include ::nova::db::mysql - include ::nova::db::mysql_api - include ::neutron::db::mysql - include ::cinder::db::mysql - include ::heat::db::mysql - include ::sahara::db::mysql if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { include ::gnocchi::db::mysql } - if downcase(hiera('ceilometer_backend')) == 'mysql' { - include ::ceilometer::db::mysql - include ::aodh::db::mysql - } - - $rabbit_nodes = hiera('rabbit_node_ips') - if count($rabbit_nodes) > 1 { - - $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false)) - if $rabbit_ipv6 { - $rabbit_env = merge(hiera('rabbitmq_environment'), { - 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"' - }) - } else { - $rabbit_env = hiera('rabbitmq_environment') - } - - class { '::rabbitmq': - config_cluster => true, - cluster_nodes => $rabbit_nodes, - tcp_keepalive => false, - config_kernel_variables => hiera('rabbitmq_kernel_variables'), - config_variables => hiera('rabbitmq_config_variables'), - environment_variables => $rabbit_env, - } - rabbitmq_policy { 'ha-all@/': - pattern => '^(?!amq\.).*', - definition => { - 'ha-mode' => 'all', - }, - } - } else { - include ::rabbitmq - } - - # pre-install swift here so we can build rings - include ::swift - - $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) - - if $enable_ceph { - $mon_initial_members = downcase(hiera('ceph_mon_initial_members')) - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_initial_members => $mon_initial_members, - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::mon - } - - if str2bool(hiera('enable_ceph_storage', false)) { - if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], - } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] - } - - include ::ceph::conf - include ::ceph::profile::osd - } - - if str2bool(hiera('enable_external_ceph', false)) { - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client - } + include ::aodh::db::mysql } #END STEP 2 @@ -226,355 +63,12 @@ if hiera('step') >= 4 { memcached_servers => $memcached_servers } include ::nova::config - include ::nova::api - include ::nova::cert - include ::nova::conductor - include ::nova::consoleauth - include ::nova::network::neutron - include ::nova::vncproxy - include ::nova::scheduler - include ::nova::scheduler::filter - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - # Run zookeeper in the controller if configured - if hiera('enable_zookeeper_on_controller') { - class {'::tripleo::cluster::zookeeper': - zookeeper_server_ips => $zookeeper_node_ips, - # TODO: create a 'bind' hiera key for zookeeper - zookeeper_client_ip => hiera('neutron::bind_host'), - zookeeper_hostnames => hiera('controller_node_names') - } - } - - # Run cassandra in the controller if configured - if hiera('enable_cassandra_on_controller') { - class {'::tripleo::cluster::cassandra': - cassandra_servers => $cassandra_node_ips, - # TODO: create a 'bind' hiera key for cassandra - cassandra_ip => hiera('neutron::bind_host'), - } - } - - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - - class {'::tripleo::network::midonet::api': - zookeeper_servers => $zookeeper_node_ips, - vip => hiera('tripleo::loadbalancer::public_virtual_ip'), - keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), - keystone_admin_token => hiera('keystone::admin_token'), - # TODO: create a 'bind' hiera key for api - bind_address => hiera('neutron::bind_host'), - admin_password => hiera('admin_password') - } - - # TODO: find a way to get an empty list from hiera - class {'::neutron': - service_plugins => [] - } - - } - else { - - # ML2 plugin - include ::neutron - } - - include ::neutron::config - include ::neutron::server - include ::neutron::server::notifications - - # If the value of core plugin is set to 'nuage' or'opencontrail' or 'plumgrid', - # include nuage or opencontrail or plumgrid core plugins - # else use the default value of 'ml2' - if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::neutron::plugins::nuage - } elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { - include ::neutron::plugins::opencontrail - } - elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { - class { '::neutron::plugins::plumgrid' : - connection => hiera('neutron::server::database_connection'), - controller_priv_host => hiera('keystone_admin_api_vip'), - admin_password => hiera('admin_password'), - metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'), - } - } else { - include ::neutron::agents::l3 - include ::neutron::agents::dhcp - include ::neutron::agents::metadata - - file { '/etc/neutron/dnsmasq-neutron.conf': - content => hiera('neutron_dnsmasq_options'), - owner => 'neutron', - group => 'neutron', - notify => Service['neutron-dhcp-service'], - require => Package['neutron'], - } - - # If the value of core plugin is set to 'midonet', - # skip all the ML2 configuration - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - class {'::neutron::plugins::midonet': - midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), - keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::auth_password') - } - } else { - - include ::neutron::plugins::ml2 - include ::neutron::agents::ml2::ovs - - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus1000v - - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), - } - - class { '::n1k_vsm': - n1kv_source => hiera('n1kv_vsm_source', undef), - n1kv_version => hiera('n1kv_vsm_version', undef), - pacemaker_control => false, - } - } - - if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::ucsm - } - if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus - include ::neutron::plugins::ml2::cisco::type_nexus_vxlan - } - - if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::bigswitch::restproxy - include ::neutron::agents::bigswitch - } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - Service['neutron-server'] -> Service['neutron-ovs-agent-service'] - } - - Service['neutron-server'] -> Service['neutron-dhcp-service'] - Service['neutron-server'] -> Service['neutron-l3'] - Service['neutron-server'] -> Service['neutron-metadata'] - } - - include ::cinder - include ::cinder::config - include ::tripleo::ssl::cinder_config - include ::cinder::api - include ::cinder::glance - include ::cinder::scheduler - include ::cinder::volume - include ::cinder::ceilometer - class { '::cinder::setup_test_volume': - size => join([hiera('cinder_lvm_loop_device_size'), 'M']), - } - - $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true) - if $cinder_enable_iscsi { - $cinder_iscsi_backend = 'tripleo_iscsi' - - cinder::backend::iscsi { $cinder_iscsi_backend : - iscsi_ip_address => hiera('cinder_iscsi_ip_address'), - iscsi_helper => hiera('cinder_iscsi_helper'), - } - } - - if $enable_ceph { - - $ceph_pools = hiera('ceph_pools') - ceph::pool { $ceph_pools : - pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), - pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), - size => hiera('ceph::profile::params::osd_pool_default_size'), - } - - $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]] - - } else { - $cinder_pool_requires = [] - } - - if hiera('cinder_enable_rbd_backend', false) { - $cinder_rbd_backend = 'tripleo_ceph' - - cinder::backend::rbd { $cinder_rbd_backend : - backend_host => hiera('cinder::host'), - rbd_pool => hiera('cinder_rbd_pool_name'), - rbd_user => hiera('ceph_client_user_name'), - rbd_secret_uuid => hiera('ceph::profile::params::fsid'), - require => $cinder_pool_requires, - } - } - - if hiera('cinder_enable_eqlx_backend', false) { - $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name') - - cinder::backend::eqlx { $cinder_eqlx_backend : - volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef), - san_ip => hiera('cinder::backend::eqlx::san_ip', undef), - san_login => hiera('cinder::backend::eqlx::san_login', undef), - san_password => hiera('cinder::backend::eqlx::san_password', undef), - san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef), - eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef), - eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef), - eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef), - eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef), - eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef), - } - } - - if hiera('cinder_enable_dellsc_backend', false) { - $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name') - - cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend : - volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef), - san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef), - san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef), - san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef), - dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), - iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), - iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), - dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef), - dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), - dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), - } - } - - if hiera('cinder_enable_netapp_backend', false) { - $cinder_netapp_backend = hiera('cinder::backend::netapp::title') - - if hiera('cinder::backend::netapp::nfs_shares', undef) { - $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',') - } - - cinder::backend::netapp { $cinder_netapp_backend : - netapp_login => hiera('cinder::backend::netapp::netapp_login', undef), - netapp_password => hiera('cinder::backend::netapp::netapp_password', undef), - netapp_server_hostname => hiera('cinder::backend::netapp::netapp_server_hostname', undef), - netapp_server_port => hiera('cinder::backend::netapp::netapp_server_port', undef), - netapp_size_multiplier => hiera('cinder::backend::netapp::netapp_size_multiplier', undef), - netapp_storage_family => hiera('cinder::backend::netapp::netapp_storage_family', undef), - netapp_storage_protocol => hiera('cinder::backend::netapp::netapp_storage_protocol', undef), - netapp_transport_type => hiera('cinder::backend::netapp::netapp_transport_type', undef), - netapp_vfiler => hiera('cinder::backend::netapp::netapp_vfiler', undef), - netapp_volume_list => hiera('cinder::backend::netapp::netapp_volume_list', undef), - netapp_vserver => hiera('cinder::backend::netapp::netapp_vserver', undef), - netapp_partner_backend_name => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef), - nfs_shares => $cinder_netapp_nfs_shares, - nfs_shares_config => hiera('cinder::backend::netapp::nfs_shares_config', undef), - netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef), - netapp_controller_ips => hiera('cinder::backend::netapp::netapp_controller_ips', undef), - netapp_sa_password => hiera('cinder::backend::netapp::netapp_sa_password', undef), - netapp_storage_pools => hiera('cinder::backend::netapp::netapp_storage_pools', undef), - netapp_eseries_host_type => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef), - netapp_webservice_path => hiera('cinder::backend::netapp::netapp_webservice_path', undef), - } - } - - if hiera('cinder_enable_nfs_backend', false) { - $cinder_nfs_backend = 'tripleo_nfs' - - if str2bool($::selinux) { - selboolean { 'virt_use_nfs': - value => on, - persistent => true, - } -> Package['nfs-utils'] - } - - package {'nfs-utils': } -> - cinder::backend::nfs { $cinder_nfs_backend : - nfs_servers => hiera('cinder_nfs_servers'), - nfs_mount_options => hiera('cinder_nfs_mount_options',''), - nfs_shares_config => '/etc/cinder/shares-nfs.conf', - } - } - - $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend]) - class { '::cinder::backends' : - enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), - } - - # swift proxy - include ::memcached - include ::swift::proxy - include ::swift::proxy::proxy_logging - include ::swift::proxy::healthcheck - include ::swift::proxy::cache - include ::swift::proxy::keystone - include ::swift::proxy::authtoken - include ::swift::proxy::staticweb - include ::swift::proxy::ratelimit - include ::swift::proxy::catch_errors - include ::swift::proxy::tempurl - include ::swift::proxy::formpost - - # swift storage - if str2bool(hiera('enable_swift_storage', true)) { - class { '::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')), - } - if(!defined(File['/srv/node'])) { - file { '/srv/node': - ensure => directory, - owner => 'swift', - group => 'swift', - require => Package['openstack-swift'], - } - } - $swift_components = ['account', 'container', 'object'] - swift::storage::filter::recon { $swift_components : } - swift::storage::filter::healthcheck { $swift_components : } - } - - # Ceilometer - $ceilometer_backend = downcase(hiera('ceilometer_backend')) - case $ceilometer_backend { - /mysql/ : { - $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') - } - default : { - $ceilometer_database_connection = $ceilometer_mongodb_conn_string - } - } - include ::ceilometer - include ::ceilometer::config - include ::ceilometer::api - include ::ceilometer::agent::notification - include ::ceilometer::agent::central - include ::ceilometer::expirer - include ::ceilometer::collector - include ::ceilometer::agent::auth - include ::ceilometer::dispatcher::gnocchi - class { '::ceilometer::db' : - database_connection => $ceilometer_database_connection, - } - - Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } # Aodh class { '::aodh' : - database_connection => $ceilometer_database_connection, + database_connection => hiera('aodh_mysql_conn_string'), } include ::aodh::db::sync - # To manage the upgrade: - Exec['ceilometer-dbsync'] -> Exec['aodh-db-sync'] include ::aodh::auth include ::aodh::api include ::aodh::wsgi::apache @@ -583,21 +77,6 @@ if hiera('step') >= 4 { include ::aodh::listener include ::aodh::client - # Heat - class { '::heat' : - notification_driver => 'messaging', - } - include ::heat::config - include ::heat::api - include ::heat::api_cfn - include ::heat::api_cloudwatch - include ::heat::engine - - # Sahara - include ::sahara - include ::sahara::service::api - include ::sahara::service::engine - # Horizon include ::apache::mod::remoteip if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { @@ -605,7 +84,7 @@ if hiera('step') >= 4 { } else { $_profile_support = 'None' } - $neutron_options = {'profile_support' => $_profile_support } + $neutron_options = merge({'profile_support' => $_profile_support },hiera('horizon::neutron_options',undef)) $memcached_ipv6 = hiera('memcached_ipv6', false) if $memcached_ipv6 { @@ -639,50 +118,16 @@ if hiera('step') >= 4 { default: { fail('Unrecognized gnocchi_backend parameter.') } } - $snmpd_user = hiera('snmpd_readonly_user_name') - snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), - } - class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], - } - hiera_include('controller_classes') } #END STEP 4 if hiera('step') >= 5 { $nova_enable_db_purge = hiera('nova_enable_db_purge', true) - $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true) - $heat_enable_db_purge = hiera('heat_enable_db_purge', true) if $nova_enable_db_purge { include ::nova::cron::archive_deleted_rows } - if $cinder_enable_db_purge { - include ::cinder::cron::db_purge - } - if $heat_enable_db_purge { - include ::heat::cron::purge_deleted - } - - if downcase(hiera('bootstrap_nodeid')) == $::hostname { - # Class ::heat::keystone::domain has to run on bootstrap node - # because it creates DB entities via API calls. - include ::heat::keystone::domain - - Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain'] - } else { - # On non-bootstrap node we don't need to create Keystone resources again - class { '::heat::keystone::domain': - manage_domain => false, - manage_user => false, - manage_role => false, - } - } - } #END STEP 5 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')]) diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 0372a56b..9a9d0081 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -21,15 +21,7 @@ Pcmk_resource <| |> { # TODO(jistr): use pcs resource provider instead of just no-ops Service <| tag == 'aodh-service' or - tag == 'cinder-service' or - tag == 'ceilometer-service' or - tag == 'glance-service' or - tag == 'gnocchi-service' or - tag == 'heat-service' or - tag == 'keystone-service' or - tag == 'neutron-service' or - tag == 'nova-service' or - tag == 'sahara-service' + tag == 'gnocchi-service' |> { hasrestart => true, restart => '/bin/true', @@ -48,7 +40,7 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) { $sync_db = false } -$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 6 +$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5 $enable_load_balancer = hiera('enable_load_balancer', true) # When to start and enable services which haven't been Pacemakerized @@ -58,28 +50,6 @@ $non_pcmk_start = hiera('step') >= 5 if hiera('step') >= 1 { - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - - include ::timezone - - if count(hiera('ntp::servers')) > 0 { - include ::ntp - } - - $controller_node_ips = split(hiera('controller_node_ips'), ',') - $controller_node_names = split(downcase(hiera('controller_node_names')), ',') - if $enable_load_balancer { - class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - controller_hosts_names => $controller_node_names, - manage_vip => false, - mysql_clustercheck => true, - haproxy_service_manage => false, - } - } - $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false)) if $corosync_ipv6 { @@ -101,6 +71,10 @@ if hiera('step') >= 1 { if $enable_fencing { include ::tripleo::fencing + # enable stonith after all Pacemaker resources have been created + Pcmk_resource<||> -> Class['tripleo::fencing'] + Pcmk_constraint<||> -> Class['tripleo::fencing'] + Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing'] # enable stonith after all fencing devices have been created Class['tripleo::fencing'] -> Class['pacemaker::stonith'] } @@ -112,54 +86,6 @@ if hiera('step') >= 1 { op_params => 'start timeout=200s stop timeout=200s', } - # Only configure RabbitMQ in this step, don't start it yet to - # avoid races where non-master nodes attempt to start without - # config (eg. binding on 0.0.0.0) - # The module ignores erlang_cookie if cluster_config is false - $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false)) - if $rabbit_ipv6 { - $rabbit_env = merge(hiera('rabbitmq_environment'), { - 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"' - }) - } else { - $rabbit_env = hiera('rabbitmq_environment') - } - - class { '::rabbitmq': - service_manage => false, - tcp_keepalive => false, - config_kernel_variables => hiera('rabbitmq_kernel_variables'), - config_variables => hiera('rabbitmq_config_variables'), - environment_variables => $rabbit_env, - } -> - file { '/var/lib/rabbitmq/.erlang.cookie': - ensure => file, - owner => 'rabbitmq', - group => 'rabbitmq', - mode => '0400', - content => hiera('rabbitmq::erlang_cookie'), - replace => true, - } - - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - include ::mongodb::globals - include ::mongodb::client - class { '::mongodb::server' : - service_manage => false, - } - } - - # Memcached - class {'::memcached' : - service_manage => false, - } - - # Redis - class { '::redis' : - service_manage => false, - notify_service => false, - } - # Galera if str2bool(hiera('enable_galera', true)) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' @@ -219,6 +145,7 @@ if hiera('step') >= 1 { if hiera('step') >= 2 { + # NOTE(gfidente): the following vars are needed on all nodes so they # need to stay out of pacemaker_master conditional. # The addresses mangling will hopefully go away when we'll be able to @@ -237,95 +164,12 @@ if hiera('step') >= 2 { if $pacemaker_master { - if $enable_load_balancer { - - include ::pacemaker::resource_defaults - - # Create an openstack-core dummy resource. See RHBZ 1290121 - pacemaker::resource::ocf { 'openstack-core': - ocf_agent_name => 'heartbeat:Dummy', - clone_params => true, - } - # FIXME: we should not have to access tripleo::loadbalancer class - # parameters here to configure pacemaker VIPs. The configuration - # of pacemaker VIPs could move into puppet-tripleo or we should - # make use of less specific hiera parameters here for the settings. - pacemaker::resource::service { 'haproxy': - clone_params => true, - } - - $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_control_vip': - vip_name => 'control', - ip_address => $control_vip, - } - - $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_public_vip': - ensure => $public_vip and $public_vip != $control_vip, - vip_name => 'public', - ip_address => $public_vip, - } - - $redis_vip = hiera('redis_vip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_redis_vip': - ensure => $redis_vip and $redis_vip != $control_vip, - vip_name => 'redis', - ip_address => $redis_vip, - } - - - $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_internal_api_vip': - ensure => $internal_api_vip and $internal_api_vip != $control_vip, - vip_name => 'internal_api', - ip_address => $internal_api_vip, - } - - $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_vip': - ensure => $storage_vip and $storage_vip != $control_vip, - vip_name => 'storage', - ip_address => $storage_vip, - } - - $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_mgmt_vip': - ensure => $storage_mgmt_vip and $storage_mgmt_vip != $control_vip, - vip_name => 'storage_mgmt', - ip_address => $storage_mgmt_vip, - } - } - - pacemaker::resource::service { $::memcached::params::service_name : - clone_params => 'interleave=true', - require => Class['::memcached'], - } - - pacemaker::resource::ocf { 'rabbitmq': - ocf_agent_name => 'heartbeat:rabbitmq-cluster', - resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'', - clone_params => 'ordered=true interleave=true', - meta_params => 'notify=true', - require => Class['::rabbitmq'], - } + include ::pacemaker::resource_defaults - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - pacemaker::resource::service { $::mongodb::params::service_name : - op_params => 'start timeout=370s stop timeout=200s', - clone_params => true, - require => Class['::mongodb::server'], - } - # NOTE (spredzy) : The replset can only be run - # once all the nodes have joined the cluster. - mongodb_conn_validator { $mongo_node_ips_with_port : - timeout => '600', - require => Pacemaker::Resource::Service[$::mongodb::params::service_name], - before => Mongodb_replset[$mongodb_replset], - } - mongodb_replset { $mongodb_replset : - members => $mongo_node_ips_with_port_nobr, - } + # Create an openstack-core dummy resource. See RHBZ 1290121 + pacemaker::resource::ocf { 'openstack-core': + ocf_agent_name => 'heartbeat:Dummy', + clone_params => true, } pacemaker::resource::ocf { 'galera' : @@ -338,73 +182,30 @@ if hiera('step') >= 2 { before => Exec['galera-ready'], } - pacemaker::resource::ocf { 'redis': - ocf_agent_name => 'heartbeat:redis', - master_params => '', - meta_params => 'notify=true ordered=true interleave=true', - resource_params => 'wait_last_known_master=true', - require => Class['::redis'], + exec { 'galera-ready' : + command => '/usr/bin/clustercheck >/dev/null', + timeout => 30, + tries => 180, + try_sleep => 10, + environment => ['AVAILABLE_WHEN_READONLY=0'], + require => Exec['create-root-sysconfig-clustercheck'], } - } - - exec { 'galera-ready' : - command => '/usr/bin/clustercheck >/dev/null', - timeout => 30, - tries => 180, - try_sleep => 10, - environment => ['AVAILABLE_WHEN_READONLY=0'], - require => File['/etc/sysconfig/clustercheck'], - } - - file { '/etc/sysconfig/clustercheck' : - ensure => file, - content => "MYSQL_USERNAME=root\n -MYSQL_PASSWORD=''\n -MYSQL_HOST=localhost\n", - } - - xinetd::service { 'galera-monitor' : - port => '9200', - server => '/usr/bin/clustercheck', - per_source => 'UNLIMITED', - log_on_success => '', - log_on_failure => 'HOST', - flags => 'REUSE', - service_type => 'UNLISTED', - user => 'root', - group => 'root', - require => File['/etc/sysconfig/clustercheck'], - } - - # Create all the database schemas - if $sync_db { - class { '::keystone::db::mysql': - require => Exec['galera-ready'], - } - class { '::glance::db::mysql': - require => Exec['galera-ready'], - } - class { '::nova::db::mysql': - require => Exec['galera-ready'], - } - class { '::nova::db::mysql_api': - require => Exec['galera-ready'], - } - class { '::neutron::db::mysql': - require => Exec['galera-ready'], - } - class { '::cinder::db::mysql': - require => Exec['galera-ready'], - } - class { '::heat::db::mysql': - require => Exec['galera-ready'], + # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck + # to it in a later step. We do this only on one node as it will replicate on + # the other members. We also make sure that the permissions are the minimum necessary + mysql_user { 'clustercheck@localhost': + ensure => 'present', + password_hash => mysql_password(hiera('mysql_clustercheck_password')), + require => Exec['galera-ready'], } - if downcase(hiera('ceilometer_backend')) == 'mysql' { - class { '::ceilometer::db::mysql': - require => Exec['galera-ready'], - } + mysql_grant { 'clustercheck@localhost/*.*': + ensure => 'present', + options => ['GRANT'], + privileges => ['PROCESS'], + table => '*.*', + user => 'clustercheck@localhost', } if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { @@ -412,68 +213,48 @@ MYSQL_HOST=localhost\n", require => Exec['galera-ready'], } } - class { '::sahara::db::mysql': - require => Exec['galera-ready'], - } - } - - # pre-install swift here so we can build rings - include ::swift - # Ceph - $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) - - if $enable_ceph { - $mon_initial_members = downcase(hiera('ceph_mon_initial_members')) - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_initial_members => $mon_initial_members, - mon_host => $mon_host, + class { '::aodh::db::mysql': + require => Exec['galera-ready'], } - include ::ceph::conf - include ::ceph::profile::mon } - - if str2bool(hiera('enable_ceph_storage', false)) { - if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], - } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] - } - - include ::ceph::conf - include ::ceph::profile::osd + # This step is to create a sysconfig clustercheck file with the root user and empty password + # on the first install only (because later on the clustercheck db user will be used) + # We are using exec and not file in order to not have duplicate definition errors in puppet + # when we later set the the file to contain the clustercheck data + exec { 'create-root-sysconfig-clustercheck': + command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck", + unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck', } - if str2bool(hiera('enable_external_ceph', false)) { - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client + xinetd::service { 'galera-monitor' : + port => '9200', + server => '/usr/bin/clustercheck', + per_source => 'UNLIMITED', + log_on_success => '', + log_on_failure => 'HOST', + flags => 'REUSE', + service_type => 'UNLISTED', + user => 'root', + group => 'root', + require => Exec['create-root-sysconfig-clustercheck'], } - } #END STEP 2 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { + # At this stage we are guaranteed that the clustercheck db user exists + # so we switch the resource agent to use it. + $mysql_clustercheck_password = hiera('mysql_clustercheck_password') + file { '/etc/sysconfig/clustercheck' : + ensure => file, + mode => '0600', + owner => 'root', + group => 'root', + content => "MYSQL_USERNAME=clustercheck\n +MYSQL_PASSWORD='${mysql_clustercheck_password}'\n +MYSQL_HOST=localhost\n", + } $nova_ipv6 = hiera('nova::use_ipv6', false) if $nova_ipv6 { @@ -488,446 +269,6 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { include ::nova::config - class { '::nova::api' : - sync_db => $sync_db, - sync_db_api => $sync_db, - manage_service => false, - enabled => false, - } - class { '::nova::cert' : - manage_service => false, - enabled => false, - } - class { '::nova::conductor' : - manage_service => false, - enabled => false, - } - class { '::nova::consoleauth' : - manage_service => false, - enabled => false, - } - class { '::nova::vncproxy' : - manage_service => false, - enabled => false, - } - include ::nova::scheduler::filter - class { '::nova::scheduler' : - manage_service => false, - enabled => false, - } - include ::nova::network::neutron - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - # Run zookeeper in the controller if configured - if hiera('enable_zookeeper_on_controller') { - class {'::tripleo::cluster::zookeeper': - zookeeper_server_ips => $zookeeper_node_ips, - # TODO: create a 'bind' hiera key for zookeeper - zookeeper_client_ip => hiera('neutron::bind_host'), - zookeeper_hostnames => split(hiera('controller_node_names'), ',') - } - } - - # Run cassandra in the controller if configured - if hiera('enable_cassandra_on_controller') { - class {'::tripleo::cluster::cassandra': - cassandra_servers => $cassandra_node_ips, - # TODO: create a 'bind' hiera key for cassandra - cassandra_ip => hiera('neutron::bind_host'), - } - } - - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - - class {'::tripleo::network::midonet::api': - zookeeper_servers => $zookeeper_node_ips, - vip => hiera('tripleo::loadbalancer::public_virtual_ip'), - keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), - keystone_admin_token => hiera('keystone::admin_token'), - # TODO: create a 'bind' hiera key for api - bind_address => hiera('neutron::bind_host'), - admin_password => hiera('admin_password') - } - - # Configure Neutron - class {'::neutron': - service_plugins => [] - } - - } - else { - # Neutron class definitions - include ::neutron - } - - include ::neutron::config - class { '::neutron::server' : - sync_db => $sync_db, - manage_service => false, - enabled => false, - } - include ::neutron::server::notifications - if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::neutron::plugins::nuage - } - if hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { - include ::neutron::plugins::opencontrail - } - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - class {'::neutron::plugins::midonet': - midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), - keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::auth_password') - } - } - if hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { - class { '::neutron::plugins::plumgrid' : - connection => hiera('neutron::server::database_connection'), - controller_priv_host => hiera('keystone_admin_api_vip'), - admin_password => hiera('admin_password'), - metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'), - } - } - if hiera('neutron::enable_dhcp_agent',true) { - class { '::neutron::agents::dhcp' : - manage_service => false, - enabled => false, - } - file { '/etc/neutron/dnsmasq-neutron.conf': - content => hiera('neutron_dnsmasq_options'), - owner => 'neutron', - group => 'neutron', - notify => Service['neutron-dhcp-service'], - require => Package['neutron'], - } - } - if hiera('neutron::enable_l3_agent',true) { - class { '::neutron::agents::l3' : - manage_service => false, - enabled => false, - } - } - if hiera('neutron::enable_metadata_agent',true) { - class { '::neutron::agents::metadata': - manage_service => false, - enabled => false, - } - } - include ::neutron::plugins::ml2 - class { '::neutron::agents::ml2::ovs': - manage_service => false, - enabled => false, - } - - if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::ucsm - } - if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus - include ::neutron::plugins::ml2::cisco::type_nexus_vxlan - } - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus1000v - - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), - } - - class { '::n1k_vsm': - n1kv_source => hiera('n1kv_vsm_source', undef), - n1kv_version => hiera('n1kv_vsm_version', undef), - } - } - - if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::plugins::ml2::bigswitch::restproxy - include ::neutron::agents::bigswitch - } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_config { - 'DEFAULT/notification_driver': value => 'messaging'; - } - - include ::cinder - include ::cinder::config - include ::tripleo::ssl::cinder_config - class { '::cinder::api': - sync_db => $sync_db, - manage_service => false, - enabled => false, - } - class { '::cinder::scheduler' : - manage_service => false, - enabled => false, - } - class { '::cinder::volume' : - manage_service => false, - enabled => false, - } - include ::cinder::glance - include ::cinder::ceilometer - class { '::cinder::setup_test_volume': - size => join([hiera('cinder_lvm_loop_device_size'), 'M']), - } - - $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true) - if $cinder_enable_iscsi { - $cinder_iscsi_backend = 'tripleo_iscsi' - - cinder::backend::iscsi { $cinder_iscsi_backend : - iscsi_ip_address => hiera('cinder_iscsi_ip_address'), - iscsi_helper => hiera('cinder_iscsi_helper'), - } - } - - if $enable_ceph { - - $ceph_pools = hiera('ceph_pools') - ceph::pool { $ceph_pools : - pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), - pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), - size => hiera('ceph::profile::params::osd_pool_default_size'), - } - - $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]] - - } else { - $cinder_pool_requires = [] - } - - if hiera('cinder_enable_rbd_backend', false) { - $cinder_rbd_backend = 'tripleo_ceph' - - cinder::backend::rbd { $cinder_rbd_backend : - backend_host => hiera('cinder::host'), - rbd_pool => hiera('cinder_rbd_pool_name'), - rbd_user => hiera('ceph_client_user_name'), - rbd_secret_uuid => hiera('ceph::profile::params::fsid'), - require => $cinder_pool_requires, - } - } - - if hiera('cinder_enable_eqlx_backend', false) { - $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name') - - cinder::backend::eqlx { $cinder_eqlx_backend : - volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef), - san_ip => hiera('cinder::backend::eqlx::san_ip', undef), - san_login => hiera('cinder::backend::eqlx::san_login', undef), - san_password => hiera('cinder::backend::eqlx::san_password', undef), - san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef), - eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef), - eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef), - eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef), - eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef), - eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef), - } - } - - if hiera('cinder_enable_dellsc_backend', false) { - $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name') - - cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend : - volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef), - san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef), - san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef), - san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef), - dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), - iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), - iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), - dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef), - dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), - dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), - } - } - - if hiera('cinder_enable_netapp_backend', false) { - $cinder_netapp_backend = hiera('cinder::backend::netapp::title') - - if hiera('cinder::backend::netapp::nfs_shares', undef) { - $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',') - } - - cinder::backend::netapp { $cinder_netapp_backend : - netapp_login => hiera('cinder::backend::netapp::netapp_login', undef), - netapp_password => hiera('cinder::backend::netapp::netapp_password', undef), - netapp_server_hostname => hiera('cinder::backend::netapp::netapp_server_hostname', undef), - netapp_server_port => hiera('cinder::backend::netapp::netapp_server_port', undef), - netapp_size_multiplier => hiera('cinder::backend::netapp::netapp_size_multiplier', undef), - netapp_storage_family => hiera('cinder::backend::netapp::netapp_storage_family', undef), - netapp_storage_protocol => hiera('cinder::backend::netapp::netapp_storage_protocol', undef), - netapp_transport_type => hiera('cinder::backend::netapp::netapp_transport_type', undef), - netapp_vfiler => hiera('cinder::backend::netapp::netapp_vfiler', undef), - netapp_volume_list => hiera('cinder::backend::netapp::netapp_volume_list', undef), - netapp_vserver => hiera('cinder::backend::netapp::netapp_vserver', undef), - netapp_partner_backend_name => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef), - nfs_shares => $cinder_netapp_nfs_shares, - nfs_shares_config => hiera('cinder::backend::netapp::nfs_shares_config', undef), - netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef), - netapp_controller_ips => hiera('cinder::backend::netapp::netapp_controller_ips', undef), - netapp_sa_password => hiera('cinder::backend::netapp::netapp_sa_password', undef), - netapp_storage_pools => hiera('cinder::backend::netapp::netapp_storage_pools', undef), - netapp_eseries_host_type => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef), - netapp_webservice_path => hiera('cinder::backend::netapp::netapp_webservice_path', undef), - } - } - - if hiera('cinder_enable_nfs_backend', false) { - $cinder_nfs_backend = 'tripleo_nfs' - - if str2bool($::selinux) { - selboolean { 'virt_use_nfs': - value => on, - persistent => true, - } -> Package['nfs-utils'] - } - - package { 'nfs-utils': } -> - cinder::backend::nfs { $cinder_nfs_backend: - nfs_servers => hiera('cinder_nfs_servers'), - nfs_mount_options => hiera('cinder_nfs_mount_options',''), - nfs_shares_config => '/etc/cinder/shares-nfs.conf', - } - } - - $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend]) - class { '::cinder::backends' : - enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), - } - - class { '::sahara': - sync_db => $sync_db, - } - class { '::sahara::service::api': - manage_service => false, - enabled => false, - } - class { '::sahara::service::engine': - manage_service => false, - enabled => false, - } - - # swift proxy - class { '::swift::proxy' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, - } - include ::swift::proxy::proxy_logging - include ::swift::proxy::healthcheck - include ::swift::proxy::cache - include ::swift::proxy::keystone - include ::swift::proxy::authtoken - include ::swift::proxy::staticweb - include ::swift::proxy::ratelimit - include ::swift::proxy::catch_errors - include ::swift::proxy::tempurl - include ::swift::proxy::formpost - - # swift storage - if str2bool(hiera('enable_swift_storage', true)) { - class {'::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')), - } - class {'::swift::storage::account': - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, - } - class {'::swift::storage::container': - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, - } - class {'::swift::storage::object': - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, - } - if(!defined(File['/srv/node'])) { - file { '/srv/node': - ensure => directory, - owner => 'swift', - group => 'swift', - require => Package['openstack-swift'], - } - } - $swift_components = ['account', 'container', 'object'] - swift::storage::filter::recon { $swift_components : } - swift::storage::filter::healthcheck { $swift_components : } - } - - # Ceilometer - case downcase(hiera('ceilometer_backend')) { - /mysql/: { - $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') - } - default: { - $mongo_node_string = join($mongo_node_ips_with_port, ',') - $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - } - } - include ::ceilometer - include ::ceilometer::config - class { '::ceilometer::api' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::agent::notification' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::agent::central' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::collector' : - manage_service => false, - enabled => false, - } - include ::ceilometer::expirer - class { '::ceilometer::db' : - database_connection => $ceilometer_database_connection, - sync_db => $sync_db, - } - include ::ceilometer::agent::auth - include ::ceilometer::dispatcher::gnocchi - - Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } - - # Heat - include ::heat::config - class { '::heat' : - sync_db => $sync_db, - notification_driver => 'messaging', - } - class { '::heat::api' : - manage_service => false, - enabled => false, - } - class { '::heat::api_cfn' : - manage_service => false, - enabled => false, - } - class { '::heat::api_cloudwatch' : - manage_service => false, - enabled => false, - } - class { '::heat::engine' : - manage_service => false, - enabled => false, - } - # httpd/apache and horizon # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent class { '::apache' : @@ -941,7 +282,7 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { } else { $_profile_support = 'None' } - $neutron_options = {'profile_support' => $_profile_support } + $neutron_options = merge({'profile_support' => $_profile_support },hiera('horizon::neutron_options',undef)) $memcached_ipv6 = hiera('memcached_ipv6', false) if $memcached_ipv6 { @@ -957,7 +298,7 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { # Aodh class { '::aodh' : - database_connection => $ceilometer_database_connection, + database_connection => hiera('aodh_mysql_conn_string'), } include ::aodh::config include ::aodh::auth @@ -1015,34 +356,40 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { enabled => false, } - $snmpd_user = hiera('snmpd_readonly_user_name') - snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), - } - class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], - } - hiera_include('controller_classes') } #END STEP 4 if hiera('step') >= 5 { + # We now make sure that the root db password is set to a random one + # At first installation /root/.my.cnf will be empty and we connect without a root + # password. On second runs or updates /root/.my.cnf will already be populated + # with proper credentials. This step happens on every node because this sql + # statement does not automatically replicate across nodes. + $mysql_root_password = hiera('mysql::server::root_password') + exec { 'galera-set-root-password': + command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root", + } + file { '/root/.my.cnf' : + ensure => file, + mode => '0600', + owner => 'root', + group => 'root', + content => "[client] +user=root +password=\"${mysql_root_password}\" + +[mysql] +user=root +password=\"${mysql_root_password}\"", + require => Exec['galera-set-root-password'], + } + $nova_enable_db_purge = hiera('nova_enable_db_purge', true) - $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true) - $heat_enable_db_purge = hiera('heat_enable_db_purge', true) if $nova_enable_db_purge { include ::nova::cron::archive_deleted_rows } - if $cinder_enable_db_purge { - include ::cinder::cron::db_purge - } - if $heat_enable_db_purge { - include ::heat::cron::purge_deleted - } if $pacemaker_master { @@ -1055,15 +402,6 @@ if hiera('step') >= 5 { require => [Pacemaker::Resource::Service[$::apache::params::service_name], Pacemaker::Resource::Ocf['openstack-core']], } - pacemaker::constraint::base { 'memcached-then-openstack-core-constraint': - constraint_type => 'order', - first_resource => 'memcached-clone', - second_resource => 'openstack-core-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service['memcached'], - Pacemaker::Resource::Ocf['openstack-core']], - } pacemaker::constraint::base { 'galera-then-openstack-core-constraint': constraint_type => 'order', first_resource => 'galera-master', @@ -1074,312 +412,7 @@ if hiera('step') >= 5 { Pacemaker::Resource::Ocf['openstack-core']], } - # Cinder - pacemaker::resource::service { $::cinder::params::api_service : - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'], - } - pacemaker::resource::service { $::cinder::params::scheduler_service : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::cinder::params::volume_service : } - - pacemaker::constraint::base { 'keystone-then-cinder-api-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::cinder::params::api_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['openstack-core'], - Pacemaker::Resource::Service[$::cinder::params::api_service]], - } - pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint': - constraint_type => 'order', - first_resource => "${::cinder::params::api_service}-clone", - second_resource => "${::cinder::params::scheduler_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], - } - pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation': - source => "${::cinder::params::scheduler_service}-clone", - target => "${::cinder::params::api_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], - } - pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint': - constraint_type => 'order', - first_resource => "${::cinder::params::scheduler_service}-clone", - second_resource => $::cinder::params::volume_service, - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], - Pacemaker::Resource::Service[$::cinder::params::volume_service]], - } - pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation': - source => $::cinder::params::volume_service, - target => "${::cinder::params::scheduler_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], - Pacemaker::Resource::Service[$::cinder::params::volume_service]], - } - - # Sahara - pacemaker::resource::service { $::sahara::params::api_service_name : - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'], - } - pacemaker::resource::service { $::sahara::params::engine_service_name : - clone_params => 'interleave=true', - } - pacemaker::constraint::base { 'keystone-then-sahara-api-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::sahara::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'sahara-api-then-sahara-engine-constraint': - constraint_type => 'order', - first_resource => "${::sahara::params::api_service_name}-clone", - second_resource => "${::sahara::params::engine_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name], - Pacemaker::Resource::Service[$::sahara::params::engine_service_name]], - } - - if hiera('step') == 5 { - # Neutron - # NOTE(gfidente): Neutron will try to populate the database with some data - # as soon as neutron-server is started; to avoid races we want to make this - # happen only on one node, before normal Pacemaker initialization - # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 - # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec - # will try to start the service while it's already started by Pacemaker - # It would result to a deployment failure since systemd would return 1 to Puppet - # and the overcloud would fail to deploy (6 would be returned). - # This conditional prevents from a race condition during the deployment. - # https://bugzilla.redhat.com/show_bug.cgi?id=1290582 - exec { 'neutron-server-systemd-start-sleep' : - command => 'systemctl start neutron-server && /usr/bin/sleep 5', - path => '/usr/bin', - unless => '/sbin/pcs resource show neutron-server', - } -> - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'] - } - } else { - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'] - } - } - if hiera('neutron::enable_l3_agent', true) { - pacemaker::resource::service { $::neutron::params::l3_agent_service: - clone_params => 'interleave=true', - } - } - if hiera('neutron::enable_dhcp_agent', true) { - pacemaker::resource::service { $::neutron::params::dhcp_agent_service: - clone_params => 'interleave=true', - } - } - if hiera('neutron::enable_ovs_agent', true) { - pacemaker::resource::service { $::neutron::params::ovs_agent_service: - clone_params => 'interleave=true', - } - } - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - pacemaker::resource::service {'tomcat': - clone_params => 'interleave=true', - } - } - if hiera('neutron::enable_metadata_agent', true) { - pacemaker::resource::service { $::neutron::params::metadata_agent_service: - clone_params => 'interleave=true', - } - } - if hiera('neutron::enable_ovs_agent', true) { - pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: - ocf_agent_name => 'neutron:OVSCleanup', - clone_params => 'interleave=true', - } - pacemaker::resource::ocf { 'neutron-netns-cleanup': - ocf_agent_name => 'neutron:NetnsCleanup', - clone_params => 'interleave=true', - } - - # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent - pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::ovs_cleanup_service}-clone", - second_resource => 'neutron-netns-cleanup-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], - Pacemaker::Resource::Ocf['neutron-netns-cleanup']], - } - pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': - source => 'neutron-netns-cleanup-clone', - target => "${::neutron::params::ovs_cleanup_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], - Pacemaker::Resource::Ocf['neutron-netns-cleanup']], - } - pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': - constraint_type => 'order', - first_resource => 'neutron-netns-cleanup-clone', - second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], - } - pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': - source => "${::neutron::params::ovs_agent_service}-clone", - target => 'neutron-netns-cleanup-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], - } - } - pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::neutron::params::server_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['openstack-core'], - Pacemaker::Resource::Service[$::neutron::params::server_service]], - } - if hiera('neutron::enable_ovs_agent',true) { - pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::ovs_agent_service}-clone", - second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - } - if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) { - pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], - } - - pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': - source => "${::neutron::params::dhcp_agent_service}-clone", - target => "${::neutron::params::ovs_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - } - if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_l3_agent',true) { - pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::dhcp_agent_service}-clone", - second_resource => "${::neutron::params::l3_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]] - } - pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': - source => "${::neutron::params::l3_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]] - } - } - if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) { - pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::l3_agent_service}-clone", - second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] - } - pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::l3_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] - } - } - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat - pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::dhcp_agent_service}-clone", - second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], - } - pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::metadata_agent_service}-clone", - second_resource => 'tomcat-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service], - Pacemaker::Resource::Service['tomcat']], - } - pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], - } - } - # Nova - pacemaker::resource::service { $::nova::params::api_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::nova::params::conductor_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::nova::params::consoleauth_service_name : - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'], - } - pacemaker::resource::service { $::nova::params::vncproxy_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::nova::params::scheduler_service_name : - clone_params => 'interleave=true', - } - pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': constraint_type => 'order', first_resource => 'openstack-core-clone', @@ -1389,6 +422,13 @@ if hiera('step') >= 5 { require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], Pacemaker::Resource::Ocf['openstack-core']], } + pacemaker::constraint::colocation { 'nova-consoleauth-with-openstack-core': + source => "${::nova::params::consoleauth_service_name}-clone", + target => 'openstack-core-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], + Pacemaker::Resource::Ocf['openstack-core']], + } pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': constraint_type => 'order', first_resource => "${::nova::params::consoleauth_service_name}-clone", @@ -1454,54 +494,12 @@ if hiera('step') >= 5 { Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], } - # Ceilometer and Aodh - case downcase(hiera('ceilometer_backend')) { - /mysql/: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'], - } - } - default: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: - clone_params => 'interleave=true', - require => [Pacemaker::Resource::Ocf['openstack-core'], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], - } - } - } - pacemaker::resource::service { $::ceilometer::params::collector_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::api_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::ocf { 'delay' : - ocf_agent_name => 'heartbeat:Delay', - clone_params => 'interleave=true', - resource_params => 'startdelay=10', - } # Fedora doesn't know `require-all` parameter for constraints yet if $::operatingsystem == 'Fedora' { - $redis_ceilometer_constraint_params = undef $redis_aodh_constraint_params = undef } else { - $redis_ceilometer_constraint_params = 'require-all=false' $redis_aodh_constraint_params = 'require-all=false' } - pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => 'redis-master', - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'promote', - second_action => 'start', - constraint_params => $redis_ceilometer_constraint_params, - require => [Pacemaker::Resource::Ocf['redis'], - Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]], - } pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint': constraint_type => 'order', first_resource => 'redis-master', @@ -1512,65 +510,6 @@ if hiera('step') >= 5 { require => [Pacemaker::Resource::Ocf['redis'], Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]], } - pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::agent_central_service_name}-clone", - second_resource => "${::ceilometer::params::collector_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], - } - pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::collector_service_name}-clone", - second_resource => "${::ceilometer::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation': - source => "${::ceilometer::params::api_service_name}-clone", - target => "${::ceilometer::params::collector_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], - } - pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::api_service_name}-clone", - second_resource => 'delay-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation': - source => 'delay-clone', - target => "${::ceilometer::params::api_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], - } # Aodh pacemaker::resource::service { $::aodh::params::evaluator_service_name : clone_params => 'interleave=true', @@ -1581,22 +520,6 @@ if hiera('step') >= 5 { pacemaker::resource::service { $::aodh::params::listener_service_name : clone_params => 'interleave=true', } - pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint': - constraint_type => 'order', - first_resource => 'delay-clone', - second_resource => "${::aodh::params::evaluator_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation': - source => "${::aodh::params::evaluator_service_name}-clone", - target => 'delay-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint': constraint_type => 'order', first_resource => "${::aodh::params::evaluator_service_name}-clone", @@ -1629,17 +552,6 @@ if hiera('step') >= 5 { require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], Pacemaker::Resource::Service[$::aodh::params::listener_service_name]], } - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => "${::mongodb::params::service_name}-clone", - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], - } - } # gnocchi pacemaker::resource::service { $::gnocchi::params::metricd_service_name : @@ -1665,77 +577,6 @@ if hiera('step') >= 5 { Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]], } - # Heat - pacemaker::resource::service { $::heat::params::api_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::heat::params::api_cfn_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::heat::params::engine_service_name : - clone_params => 'interleave=true', - } - pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint': - constraint_type => 'order', - first_resource => "${::heat::params::api_service_name}-clone", - second_resource => "${::heat::params::api_cfn_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], - } - pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation': - source => "${::heat::params::api_cfn_service_name}-clone", - target => "${::heat::params::api_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name], - Pacemaker::Resource::Service[$::heat::params::api_service_name]], - } - pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint': - constraint_type => 'order', - first_resource => "${::heat::params::api_cfn_service_name}-clone", - second_resource => "${::heat::params::api_cloudwatch_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], - } - pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation': - source => "${::heat::params::api_cloudwatch_service_name}-clone", - target => "${::heat::params::api_cfn_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]], - } - pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint': - constraint_type => 'order', - first_resource => "${::heat::params::api_cloudwatch_service_name}-clone", - second_resource => "${::heat::params::engine_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::engine_service_name]], - } - pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation': - source => "${::heat::params::engine_service_name}-clone", - target => "${::heat::params::api_cloudwatch_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::engine_service_name]], - } - pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - second_resource => "${::heat::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]], - } - # Horizon and Keystone pacemaker::resource::service { $::apache::params::service_name: clone_params => 'interleave=true', diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index ae074589..1f04c581 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -16,42 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), {}) -create_resources(sysctl::value, hiera('sysctl_settings'), {}) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} - -include ::timezone - -include ::swift -class { '::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')), -} -if(!defined(File['/srv/node'])) { - file { '/srv/node': - ensure => directory, - owner => 'swift', - group => 'swift', - require => Package['openstack-swift'], - } -} - -$swift_components = ['account', 'container', 'object'] -swift::storage::filter::recon { $swift_components : } -swift::storage::filter::healthcheck { $swift_components : } - -$snmpd_user = hiera('snmpd_readonly_user_name') -snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), -} -class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], +if hiera('step') >= 4 { + hiera_include('object_classes') } -hiera_include('object_classes') -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_object': ensure => present} +$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_object', hiera('step')]) +package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 134dc43b..7c7da586 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -16,46 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), {}) -create_resources(sysctl::value, hiera('sysctl_settings'), {}) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} - -include ::timezone - -include ::cinder -include ::cinder::config -include ::cinder::glance -include ::cinder::volume -include ::cinder::setup_test_volume - -$cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true) -if $cinder_enable_iscsi { - $cinder_iscsi_backend = 'tripleo_iscsi' - - cinder::backend::iscsi { $cinder_iscsi_backend : - iscsi_ip_address => hiera('cinder_iscsi_ip_address'), - iscsi_helper => hiera('cinder_iscsi_helper'), - } -} - -$cinder_enabled_backends = any2array($cinder_iscsi_backend) -class { '::cinder::backends' : - enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), -} - -$snmpd_user = hiera('snmpd_readonly_user_name') -snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), -} -class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], +if hiera('step') >= 4 { + hiera_include('volume_classes') } -hiera_include('volume_classes') -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_volume': ensure => present} +$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_volume', hiera('step')]) +package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp index a623da29..2411ff84 100644 --- a/puppet/manifests/ringbuilder.pp +++ b/puppet/manifests/ringbuilder.pp @@ -89,6 +89,11 @@ class tripleo::ringbuilder ( } } +if hiera('step') >= 2 { + # pre-install swift here so we can build rings + include ::swift +} + if hiera('step') >= 3 { include ::tripleo::ringbuilder } diff --git a/puppet/services/README.rst b/puppet/services/README.rst index 38d2ac64..15c8c1f1 100644 --- a/puppet/services/README.rst +++ b/puppet/services/README.rst @@ -48,3 +48,7 @@ are re-asserted when applying latter ones. 5) Service activation (Pacemaker) 6) Fencing (Pacemaker) + +Note: Not all roles currently support all steps: + + * ObjectStorage role only supports steps 2, 3 and 4 diff --git a/puppet/services/ceilometer-agent-central.yaml b/puppet/services/ceilometer-agent-central.yaml new file mode 100644 index 00000000..294e7dd2 --- /dev/null +++ b/puppet/services/ceilometer-agent-central.yaml @@ -0,0 +1,43 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Central Agent service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + RedisPassword: + description: The password for the redis service account. + type: string + hidden: true + RedisVirtualIPUri: + type: string + default: '' + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Central Agent role. + value: + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::agent::central::coordination_url: + list_join: + - '' + - - 'redis://:' + - {get_param: RedisPassword} + - '@' + - {get_param: RedisVirtualIPUri} + - ':6379/' + step_config: | + include ::tripleo::profile::base::ceilometer::agent::central diff --git a/puppet/services/ceilometer-agent-notification.yaml b/puppet/services/ceilometer-agent-notification.yaml new file mode 100644 index 00000000..523dabb9 --- /dev/null +++ b/puppet/services/ceilometer-agent-notification.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Notification Agent service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Notification Agent role. + value: + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::agent::notification diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml new file mode 100644 index 00000000..06c2ed12 --- /dev/null +++ b/puppet/services/ceilometer-api.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer API role. + value: + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::api diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml new file mode 100644 index 00000000..1dea785f --- /dev/null +++ b/puppet/services/ceilometer-base.yaml @@ -0,0 +1,105 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + CeilometerBackend: + default: 'mongodb' + description: The ceilometer backend type. + type: string + CeilometerMeteringSecret: + description: Secret shared by the ceilometer services. + type: string + hidden: true + CeilometerPassword: + description: The password for the ceilometer service account. + type: string + hidden: true + CeilometerMeterDispatcher: + default: 'gnocchi' + description: Dispatcher to process meter data + type: string + constraints: + - allowed_values: ['gnocchi', 'database'] + CeilometerWorkers: + default: 0 + description: Number of workers for Ceilometer service. + type: number + CeilometerStoreEvents: + default: false + description: Whether to store events in ceilometer. + type: boolean + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + +outputs: + role_data: + description: Role data for the Ceilometer role. + value: + config_settings: + ceilometer::db::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - - '://ceilometer:' + - {get_param: CeilometerPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/ceilometer' + ceilometer_backend: {get_param: CeilometerBackend} + ceilometer::metering_secret: {get_param: CeilometerMeteringSecret} + # we include db_sync class in puppet-tripleo + ceilometer::db::sync_db: false + ceilometer::api::keystone_password: {get_param: CeilometerPassword} + ceilometer::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + ceilometer::api::keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword} + ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } + ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents} + ceilometer::db::mysql::password: {get_param: CeilometerPassword} + ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher} + ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]} + ceilometer::dispatcher::gnocchi::filter_project: 'service' + ceilometer::dispatcher::gnocchi::archive_policy: 'low' + ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml' + ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]} + ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]} + ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]} + ceilometer::keystone::auth::password: {get_param: CeilometerPassword} + ceilometer::keystone::auth::region: {get_param: KeystoneRegion} + ceilometer::rabbit_userid: {get_param: RabbitUserName} + ceilometer::rabbit_password: {get_param: RabbitPassword} + ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + ceilometer::rabbit_port: {get_param: RabbitClientPort} + ceilometer::db::mysql::user: ceilometer + ceilometer::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]} + ceilometer::db::mysql::dbname: ceilometer + ceilometer::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/services/ceilometer-collector.yaml b/puppet/services/ceilometer-collector.yaml new file mode 100644 index 00000000..29627210 --- /dev/null +++ b/puppet/services/ceilometer-collector.yaml @@ -0,0 +1,26 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Collector service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Collector role. + value: + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::collector diff --git a/puppet/services/ceilometer-expirer.yaml b/puppet/services/ceilometer-expirer.yaml new file mode 100644 index 00000000..796abe1f --- /dev/null +++ b/puppet/services/ceilometer-expirer.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Expirer service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Expirer role. + value: + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::expirer diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml new file mode 100644 index 00000000..065901b8 --- /dev/null +++ b/puppet/services/ceph-base.yaml @@ -0,0 +1,94 @@ +heat_template_version: 2016-04-08 + +description: > + Ceph base service. Shared by all Ceph services. + +parameters: + CephAdminKey: + default: '' + description: The Ceph admin client key. Can be created with ceph-authtool --gen-print-key. + type: string + hidden: true + CephClientKey: + default: '' + description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring. + type: string + hidden: true + CephClientUserName: + default: openstack + type: string + CephClusterFSID: + default: '' + type: string + description: The Ceph cluster FSID. Must be a UUID. + CephIPv6: + default: False + type: boolean + CinderRbdPoolName: + default: volumes + type: string + CinderBackupRbdPoolName: + default: backups + type: string + GlanceRbdPoolName: + default: images + type: string + GnocchiRbdPoolName: + default: metrics + type: string + NovaRbdPoolName: + default: vms + type: string + # DEPRECATED options for compatibility with overcloud.yaml + # This should be removed and manipulation of the ControllerServices list + # used instead, but we need client support for that first + ControllerEnableCephStorage: + default: false + description: Whether to deploy Ceph Storage (OSD) on the Controller + type: boolean + +parameter_groups: +- label: deprecated + description: Do not use deprecated params, they will be removed. + parameters: + - ControllerEnableCephStorage + +outputs: + role_data: + description: Role data for the Ceph base service. + value: + config_settings: + tripleo::profile::base::ceph::ceph_ipv6: {get_param: CephIPv6} + tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage} + ceph::profile::params::fsid: {get_param: CephClusterFSID} + ceph::profile::params::client_keys: + str_replace: + template: "{ + client.admin: { + secret: 'ADMIN_KEY', + mode: '0600', + cap_mon: 'allow *', + cap_osd: 'allow *', + cap_mds: 'allow *' + }, + client.bootstrap-osd: { + secret: 'ADMIN_KEY', + keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring', + cap_mon: 'allow profile bootstrap-osd' + }, + client.CLIENT_USER: { + secret: 'CLIENT_KEY', + mode: '0644', + cap_mon: 'allow r', + cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' + } + }" + params: + CLIENT_USER: {get_param: CephClientUserName} + CLIENT_KEY: {get_param: CephClientKey} + ADMIN_KEY: {get_param: CephAdminKey} + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} diff --git a/puppet/services/ceph-client.yaml b/puppet/services/ceph-client.yaml new file mode 100644 index 00000000..ca920a5f --- /dev/null +++ b/puppet/services/ceph-client.yaml @@ -0,0 +1,24 @@ +heat_template_version: 2016-04-08 + +description: > + Ceph Client service. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CephBase: + type: ./ceph-base.yaml + +outputs: + role_data: + description: Role data for the Cinder OSD service. + value: + config_settings: + get_attr: [CephBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceph::client diff --git a/puppet/services/ceph-external.yaml b/puppet/services/ceph-external.yaml new file mode 100644 index 00000000..4522f416 --- /dev/null +++ b/puppet/services/ceph-external.yaml @@ -0,0 +1,65 @@ +heat_template_version: 2016-04-08 + +description: > + Ceph External service. + +parameters: + CephClientKey: + default: '' + description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring. + type: string + hidden: true + CephClientUserName: + default: openstack + type: string + CephClusterFSID: + default: '' + type: string + description: The Ceph cluster FSID. Must be a UUID. + CephExternalMonHost: + default: '' + type: string + description: List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments. + CinderRbdPoolName: + default: volumes + type: string + CinderBackupRbdPoolName: + default: backups + type: string + GlanceRbdPoolName: + default: images + type: string + GnocchiRbdPoolName: + default: metrics + type: string + NovaRbdPoolName: + default: vms + type: string + +outputs: + role_data: + description: Role data for the Ceph External service. + value: + config_settings: + tripleo::profile::base::ceph::ceph_mon_host: {get_param: CephExternalMonHost} + ceph::profile::params::fsid: {get_param: CephClusterFSID} + ceph::profile::params::client_keys: + str_replace: + template: "{ + client.CLIENT_USER: { + secret: 'CLIENT_KEY', + mode: '0644', + cap_mon: 'allow r', + cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' + } + }" + params: + CLIENT_USER: {get_param: CephClientUserName} + CLIENT_KEY: {get_param: CephClientKey} + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} + step_config: | + include ::tripleo::profile::base::ceph::client diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml new file mode 100644 index 00000000..d6e3aa70 --- /dev/null +++ b/puppet/services/ceph-mon.yaml @@ -0,0 +1,56 @@ +heat_template_version: 2016-04-08 + +description: > + Ceph Monitor service. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + CephIPv6: + default: False + type: boolean + CephMonKey: + default: '' + description: The Ceph monitors key. Can be created with ceph-authtool --gen-print-key. + type: string + hidden: true + CinderRbdPoolName: + default: volumes + type: string + CinderBackupRbdPoolName: + default: backups + type: string + GlanceRbdPoolName: + default: images + type: string + GnocchiRbdPoolName: + default: metrics + type: string + NovaRbdPoolName: + default: vms + type: string + +resources: + CephBase: + type: ./ceph-base.yaml + +outputs: + role_data: + description: Role data for the Ceph Monitor service. + value: + config_settings: + map_merge: + - get_attr: [CephBase, role_data, config_settings] + - ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6} + ceph::profile::params::mon_key: {get_param: CephMonKey} + tripleo::profile::base::ceph::mon::ceph_pools: + - {get_param: CinderRbdPoolName} + - {get_param: CinderBackupRbdPoolName} + - {get_param: NovaRbdPoolName} + - {get_param: GlanceRbdPoolName} + - {get_param: GnocchiRbdPoolName} + step_config: | + include ::tripleo::profile::base::ceph::mon diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml new file mode 100644 index 00000000..24f60283 --- /dev/null +++ b/puppet/services/ceph-osd.yaml @@ -0,0 +1,24 @@ +heat_template_version: 2016-04-08 + +description: > + Ceph OSD service. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CephBase: + type: ./ceph-base.yaml + +outputs: + role_data: + description: Role data for the Cinder OSD service. + value: + config_settings: + get_attr: [CephBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceph::osd diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml new file mode 100644 index 00000000..c53bef6f --- /dev/null +++ b/puppet/services/cinder-api.yaml @@ -0,0 +1,42 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Cinder API service configured with Puppet + +parameters: + CinderEnableDBPurge: + default: true + description: | + Whether to create cron job for purging soft deleted rows in Cinder database. + type: boolean + CinderPassword: + description: The password for the cinder service account, used by cinder-api. + type: string + hidden: true + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + CinderBase: + type: ./cinder-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Cinder API role. + value: + config_settings: + map_merge: + - get_attr: [CinderBase, role_data, config_settings] + - cinder::api::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + cinder::api::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + cinder::api::keystone_password: {get_param: CinderPassword} + cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} + tripleo::profile::base::cinder::cinder_enable_db_purge: {get_param: CinderEnableDBPurge} + step_config: | + include ::tripleo::profile::base::cinder::api diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml new file mode 100644 index 00000000..f6d2b645 --- /dev/null +++ b/puppet/services/cinder-base.yaml @@ -0,0 +1,64 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Cinder base service. Shared by all Cinder services. + +parameters: + CinderPassword: + description: The password for the cinder service account, used by cinder-api. + type: string + hidden: true + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + +outputs: + role_data: + description: Role data for the Cinder base service. + value: + config_settings: + cinder::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://cinder:' + - {get_param: CinderPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/cinder' + cinder::db::mysql::password: {get_param: CinderPassword} + cinder::debug: {get_param: Debug} + cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + cinder::rabbit_userid: {get_param: RabbitUserName} + cinder::rabbit_password: {get_param: RabbitPassword} + cinder::rabbit_port: {get_param: RabbitClientPort} + cinder::db::mysql::user: cinder + cinder::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]} + cinder::db::mysql::dbname: cinder + cinder::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/services/cinder-scheduler.yaml b/puppet/services/cinder-scheduler.yaml new file mode 100644 index 00000000..6bdf86bc --- /dev/null +++ b/puppet/services/cinder-scheduler.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Cinder Scheduler service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + CinderBase: + type: ./cinder-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Cinder Scheduler role. + value: + config_settings: + get_attr: [CinderBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::cinder::scheduler diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml new file mode 100644 index 00000000..41f3827d --- /dev/null +++ b/puppet/services/cinder-volume.yaml @@ -0,0 +1,79 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Cinder Volume service configured with Puppet + +parameters: + CinderEnableNfsBackend: + default: false + description: Whether to enable or not the NFS backend for Cinder + type: boolean + CinderEnableIscsiBackend: + default: true + description: Whether to enable or not the Iscsi backend for Cinder + type: boolean + CinderEnableRbdBackend: + default: false + description: Whether to enable or not the Rbd backend for Cinder + type: boolean + CinderISCSIHelper: + default: lioadm + description: The iSCSI helper to use with cinder. + type: string + CinderLVMLoopDeviceSize: + default: 10280 + description: The size of the loopback file used by the cinder LVM driver. + type: number + CinderNfsMountOptions: + default: '' + description: > + Mount options for NFS mounts used by Cinder NFS backend. Effective + when CinderEnableNfsBackend is true. + type: string + CinderNfsServers: + default: '' + description: > + NFS servers used by Cinder NFS backend. Effective when + CinderEnableNfsBackend is true. + type: comma_delimited_list + CinderRbdPoolName: + default: volumes + type: string + CephClientUserName: + default: openstack + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + CinderBase: + type: ./cinder-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Cinder Volume role. + value: + config_settings: + map_merge: + - get_attr: [CinderBase, role_data, config_settings] + - tripleo::profile::base::cinder::volume::cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} + tripleo::profile::base::cinder::volume::cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend} + tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend} + tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions} + tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: + str_replace: + template: SERVERS + params: + SERVERS: {get_param: CinderNfsServers} + tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize} + tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper} + tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: {get_param: CinderRbdPoolName} + tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: {get_param: CephClientUserName} + step_config: | + include ::tripleo::profile::base::cinder::volume diff --git a/puppet/services/database/mongodb-base.yaml b/puppet/services/database/mongodb-base.yaml new file mode 100644 index 00000000..ecd1d319 --- /dev/null +++ b/puppet/services/database/mongodb-base.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + Configuration details for MongoDB service using composable roles + +parameters: + MongoDbNoJournal: + default: false + description: Should MongoDb journaling be disabled + type: boolean + MongoDbIPv6: + default: false + description: Enable IPv6 if MongoDB VIP is IPv6 + type: boolean + MongoDbReplset: + type: string + default: "tripleo" + +outputs: + aux_parameters: + description: Additional parameters referenced outside the base file + value: + rplset_name: {get_param: MongoDbReplset} + role_data: + description: Role data for the MongoDB base service. + value: + config_settings: + mongodb::server::nojournal: {get_param: MongoDbNoJournal} + mongodb::server::ipv6: {get_param: MongoDbIPv6} + mongodb::server::replset: {get_param: MongoDbReplset}
\ No newline at end of file diff --git a/puppet/services/database/mongodb.yaml b/puppet/services/database/mongodb.yaml new file mode 100644 index 00000000..c0488700 --- /dev/null +++ b/puppet/services/database/mongodb.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + MongoDb service deployment using puppet + +parameters: + #Parameters not used EndpointMap + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + MongoDbBase: + type: ./mongodb-base.yaml + +outputs: + role_data: + description: Service mongodb using composable services. + value: + config_settings: + map_merge: + - get_attr: [MongoDbBase, role_data, config_settings] + - tripleo::profile::base::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]} + mongodb::server::service_manage: True + step_config: | + include ::tripleo::profile::base::database::mongodb
\ No newline at end of file diff --git a/puppet/services/database/redis-base.yaml b/puppet/services/database/redis-base.yaml new file mode 100644 index 00000000..77b3c9f0 --- /dev/null +++ b/puppet/services/database/redis-base.yaml @@ -0,0 +1,21 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Redis service configured with Puppet + +parameters: + RedisPassword: + description: The password for Redis + type: string + hidden: true + +outputs: + role_data: + description: Role data for the redis role. + value: + config_settings: + redis::requirepass: {get_param: RedisPassword} + redis::masterauth: {get_param: RedisPassword} + redis::sentinel_auth_pass: {get_param: RedisPassword} + tripleo::loadbalancer::redis_password: {get_param: RedisPassword} + diff --git a/puppet/services/database/redis.yaml b/puppet/services/database/redis.yaml new file mode 100644 index 00000000..2669592a --- /dev/null +++ b/puppet/services/database/redis.yaml @@ -0,0 +1,25 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Redis service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + RedisBase: + type: ./redis-base.yaml + +outputs: + role_data: + description: Role data for the redis role. + value: + config_settings: + map_merge: + - get_attr: [RedisBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::database::redis diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index 3e8784b7..f1f98a8e 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -9,9 +9,9 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: + CephClientUserName: + default: openstack type: string - default: '' Debug: default: '' description: Set to True to enable debugging on all services. @@ -39,6 +39,9 @@ parameters: default: 0 description: Number of workers for Glance service. type: number + GlanceRbdPoolName: + default: images + type: string RabbitPassword: description: The password for RabbitMQ type: string @@ -63,13 +66,14 @@ outputs: description: Role data for the Glance API role. value: config_settings: - glance_dsn: &glance_dsn + glance::api::database_connection: list_join: - '' - - - 'mysql+pymysql://glance:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://glance:' - {get_param: GlancePassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/glance' glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]} glance::api::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } @@ -84,15 +88,20 @@ outputs: glance::api::workers: {get_param: GlanceWorkers} glance_notifier_strategy: {get_param: GlanceNotifierStrategy} glance_log_file: {get_param: GlanceLogFile} - glance::api::database_connection: *glance_dsn glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] } glance::backend::swift::swift_store_user: service:glance glance::backend::swift::swift_store_key: {get_param: GlancePassword} + glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} glance_backend: {get_param: GlanceBackend} glance::db::mysql::password: {get_param: GlancePassword} glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName} glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort} glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword} glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]} + glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]} + glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]} + glance::keystone::auth::password: {get_param: GlancePassword } step_config: | include ::tripleo::profile::base::glance::api diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml index 1a1a515a..d71157f9 100644 --- a/puppet/services/glance-registry.yaml +++ b/puppet/services/glance-registry.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' Debug: default: '' description: Set to True to enable debugging on all services. @@ -30,19 +27,26 @@ outputs: description: Role data for the Glance Registry role. value: config_settings: - glance_dsn: &glance_dsn + glance::registry::database_connection: list_join: - '' - - - 'mysql+pymysql://glance:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://glance:' - {get_param: GlancePassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/glance' glance::registry::keystone_password: {get_param: GlancePassword} - glance::registry::database_connection: *glance_dsn glance::registry::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } glance::registry::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } glance::registry::debug: {get_param: Debug} glance::registry::workers: {get_param: GlanceWorkers} + glance::db::mysql::user: glance + glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]} + glance::db::mysql::dbname: glance + glance::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + step_config: | include ::tripleo::profile::base::glance::registry diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml new file mode 100644 index 00000000..844637bc --- /dev/null +++ b/puppet/services/haproxy.yaml @@ -0,0 +1,18 @@ +heat_template_version: 2016-04-08 + +description: > + HAproxy service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the HAproxy role. + value: + step_config: | + include ::tripleo::profile::base::haproxy diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml new file mode 100644 index 00000000..c1f26c15 --- /dev/null +++ b/puppet/services/heat-api-cfn.yaml @@ -0,0 +1,43 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat CloudFormation API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number + HeatPassword: + description: The password for the Heat service and db account, used by the Heat services. + type: string + hidden: true + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + +resources: + HeatBase: + type: ./heat-base.yaml + +outputs: + role_data: + description: Role data for the Heat CloudFormation API role. + value: + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - heat::api_cfn::workers: {get_param: HeatWorkers} + heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]} + heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]} + heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]} + heat::keystone::auth_cfn::password: {get_param: HeatPassword} + heat::keystone::auth::region: {get_param: KeystoneRegion} + step_config: | + include ::tripleo::profile::base::heat::api_cfn diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml new file mode 100644 index 00000000..2c56951b --- /dev/null +++ b/puppet/services/heat-api-cloudwatch.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat CloudWatch API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number + +resources: + HeatBase: + type: ./heat-base.yaml + +outputs: + role_data: + description: Role data for the Heat Cloudwatch API role. + value: + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - heat::api_cloudwatch::workers: {get_param: HeatWorkers} + step_config: | + include ::tripleo::profile::base::heat::api_cloudwatch diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml new file mode 100644 index 00000000..d3461e63 --- /dev/null +++ b/puppet/services/heat-api.yaml @@ -0,0 +1,43 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number + HeatPassword: + description: The password for the Heat service and db account, used by the Heat services. + type: string + hidden: true + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + +resources: + HeatBase: + type: ./heat-base.yaml + +outputs: + role_data: + description: Role data for the Heat API role. + value: + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - heat::api::workers: {get_param: HeatWorkers} + heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]} + heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]} + heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]} + heat::keystone::auth::password: {get_param: HeatPassword} + heat::keystone::auth::region: {get_param: KeystoneRegion} + step_config: | + include ::tripleo::profile::base::heat::api diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml new file mode 100644 index 00000000..88e27945 --- /dev/null +++ b/puppet/services/heat-base.yaml @@ -0,0 +1,46 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat base service. Shared for all Heat services. + +parameters: + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + +outputs: + role_data: + description: Shared role data for the Heat services. + value: + config_settings: + heat::rabbit_userid: {get_param: RabbitUserName} + heat::rabbit_password: {get_param: RabbitPassword} + heat::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + heat::rabbit_port: {get_param: RabbitClientPort} + heat::debug: {get_param: Debug} + heat::enable_proxy_headers_parsing: true + # We need this because the default heat policy.json no longer works on TripleO + # https://git.openstack.org/cgit/openstack/heat/commit/?id=ac86702172ddf01f5bdc3f3cd99d2e32ad9b7024 + heat::policy::policies: + context_is_admin: + key: 'context_is_admin' + value: 'role:admin' diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml new file mode 100644 index 00000000..77af55ef --- /dev/null +++ b/puppet/services/heat-engine.yaml @@ -0,0 +1,64 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat Engine service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + HeatEnableDBPurge: + type: boolean + default: true + description: | + Whether to create cron job for purging soft deleted rows in the Heat database. + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number + HeatPassword: + description: The password for the Heat service and db account, used by the Heat services. + type: string + hidden: true + HeatStackDomainAdminPassword: + description: Password for heat_stack_domain_admin user. + type: string + hidden: true + +resources: + HeatBase: + type: ./heat-base.yaml + +outputs: + role_data: + description: Role data for the Heat Engine role. + value: + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - heat::engine::num_engine_workers: {get_param: HeatWorkers} + tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge} + heat::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://heat:' + - {get_param: HeatPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/heat' + heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]} + heat::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + heat::keystone_password: {get_param: HeatPassword} + heat::db::mysql::password: {get_param: HeatPassword} + heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword} + heat::db::mysql::user: heat + heat::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]} + heat::db::mysql::dbname: heat + heat::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + step_config: | + include ::tripleo::profile::base::heat::engine diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml new file mode 100644 index 00000000..5ab03fcb --- /dev/null +++ b/puppet/services/ironic-api.yaml @@ -0,0 +1,42 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ironic API configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + IronicPassword: + description: The password for the Ironic service and db account, used by the Ironic services + type: string + hidden: true + +resources: + IronicBase: + type: ./ironic-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ironic API role. + value: + config_settings: + map_merge: + - get_attr: [IronicBase, role_data, config_settings] + # NOTE(dtantsur): the my_ip parameter is heavily overloaded in + # ironic. It's used as a default value for e.g. TFTP server IP, + # glance and neutron endpoints, virtual console IP. We override + # the TFTP server IP in ironic-conductor.yaml as it should not be + # the VIP, but rather a real IP of the controller. + - ironic::my_ip: {get_param: [EndpointMap, MysqlInternal, host]} + ironic::api::admin_password: {get_param: IronicPassword} + ironic::keystone::auth::public_url: {get_param: [EndpointMap, IronicPublic, uri]} + ironic::keystone::auth::internal_url: {get_param: [EndpointMap, IronicInternal, uri]} + ironic::keystone::auth::admin_url: {get_param: [EndpointMap, IronicAdmin, uri]} + ironic::keystone::auth::password: {get_param: IronicPassword } + step_config: | + include ::tripleo::profile::base::ironic::api diff --git a/puppet/services/ironic-base.yaml b/puppet/services/ironic-base.yaml new file mode 100644 index 00000000..df82bb6c --- /dev/null +++ b/puppet/services/ironic-base.yaml @@ -0,0 +1,69 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ironic services configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + IronicPassword: + description: The password for the Ironic service and db account, used by the Ironic services + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + +outputs: + role_data: + description: Role data for the Ironic role. + value: + config_settings: + ironic::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://ironic:' + - {get_param: IronicPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/ironic' + ironic::admin_tenant_name: 'service' + ironic::debug: {get_param: Debug} + ironic::rabbit_userid: {get_param: RabbitUserName} + ironic::rabbit_password: {get_param: RabbitPassword} + ironic::rabbit_port: {get_param: RabbitClientPort} + ironic::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + ironic::db::mysql::password: {get_param: IronicPassword} + ironic::db::mysql::user: ironic + ironic::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + ironic::db::mysql::dbname: ironic + ironic::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + + ironic::keystone::auth::tenant: 'service' + step_config: | + include ::tripleo::profile::base::ironic diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml new file mode 100644 index 00000000..26d4e0ed --- /dev/null +++ b/puppet/services/ironic-conductor.yaml @@ -0,0 +1,35 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ironic conductor configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + IronicEnabledDrivers: + default: ['pxe_ipmitool', 'agent_ipmitool'] + description: Enabled Ironic drivers + type: comma_delimited_list + +resources: + IronicBase: + type: ./ironic-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ironic conductor role. + value: + config_settings: + map_merge: + - get_attr: [IronicBase, role_data, config_settings] + - ironic::enabled_drivers: {get_param: IronicEnabledDrivers} + # Prevent tftp_server from defaulting to my_ip setting, which is + # controller VIP, not a real IP. + ironic::drivers::pxe::tftp_server: {get_input: ironic_api_network} + step_config: | + include ::tripleo::profile::base::ironic::conductor diff --git a/puppet/services/keepalived.yaml b/puppet/services/keepalived.yaml new file mode 100644 index 00000000..09ce26b5 --- /dev/null +++ b/puppet/services/keepalived.yaml @@ -0,0 +1,18 @@ +heat_template_version: 2016-04-08 + +description: > + Keepalived service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Keepalived role. + value: + step_config: | + include ::tripleo::profile::base::keepalived diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml new file mode 100644 index 00000000..b429c5ea --- /dev/null +++ b/puppet/services/kernel.yaml @@ -0,0 +1,18 @@ +heat_template_version: 2016-04-08 + +description: > + Load kernel modules with kmod and configure kernel options with sysctl. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Kernel modules + value: + step_config: | + include ::tripleo::profile::base::kernel diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index 1654f0e7..0ad6025c 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -54,9 +54,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' Debug: type: string default: '' @@ -97,15 +94,15 @@ outputs: description: Role data for the Keystone role. value: config_settings: - keystone_dsn: &keystone_dsn + keystone::database_connection: list_join: - '' - - - 'mysql+pymysql://keystone:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://keystone:' - {get_param: AdminToken} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/keystone' - keystone::database_connection: *keystone_dsn keystone::admin_token: {get_param: AdminToken} keystone::roles::admin::password: {get_param: AdminPassword} keystone_ca_certificate: {get_param: KeystoneCACertificate} @@ -131,5 +128,12 @@ outputs: keystone::public_workers: {get_param: KeystoneWorkers} keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge} keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]} + keystone::db::mysql::user: keystone + keystone::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + keystone::db::mysql::dbname: keystone + keystone::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + step_config: | include ::tripleo::profile::base::keystone diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml new file mode 100644 index 00000000..fcd0adca --- /dev/null +++ b/puppet/services/memcached.yaml @@ -0,0 +1,19 @@ +heat_template_version: 2016-04-08 + +description: > + Memcached service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Memcached role. + value: + config_settings: + step_config: | + include ::tripleo::profile::base::memcached diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml new file mode 100644 index 00000000..8bd8d989 --- /dev/null +++ b/puppet/services/neutron-base.yaml @@ -0,0 +1,62 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron base service. Shared for all Neutron agents. + +parameters: + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + NeutronDhcpAgentsPerNetwork: + type: number + default: 3 + description: The number of neutron dhcp agents to schedule per network + NeutronCorePlugin: + default: 'ml2' + description: | + The core plugin for Neutron. The value should be the entrypoint to be loaded + from neutron.core_plugins namespace. + type: string + NeutronServicePlugins: + default: "router,qos" + description: | + Comma-separated list of service plugin entrypoints to be loaded from the + neutron.service_plugins namespace. + type: comma_delimited_list + Debug: + type: string + default: '' + description: Set to True to enable debugging on all services. + +outputs: + role_data: + description: Role data for the Neutron base service. + value: + config_settings: + neutron::rabbit_password: {get_param: RabbitPassword} + neutron::rabbit_user: {get_param: RabbitUserName} + neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + neutron::rabbit_port: {get_param: RabbitClientPort} + neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} + neutron::core_plugin: {get_param: NeutronCorePlugin} + neutron::service_plugins: + str_replace: + template: PLUGINS + params: + PLUGINS: {get_param: NeutronServicePlugins} + neutron::debug: {get_param: Debug} diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml new file mode 100644 index 00000000..5d02bc90 --- /dev/null +++ b/puppet/services/neutron-dhcp.yaml @@ -0,0 +1,31 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron DHCP agent configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronEnableIsolatedMetadata: + default: 'False' + description: If True, DHCP provide metadata route to VM. + type: string + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron DHCP agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} + step_config: | + include tripleo::profile::base::neutron::dhcp diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml new file mode 100644 index 00000000..20c82dc1 --- /dev/null +++ b/puppet/services/neutron-l3.yaml @@ -0,0 +1,34 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron L3 agent configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + Debug: + type: string + default: '' + NeutronExternalNetworkBridge: + description: Name of bridge used for external network traffic. + type: string + default: 'br-ex' + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron L3 agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} + step_config: | + include tripleo::profile::base::neutron::l3 diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml new file mode 100644 index 00000000..e221b3a1 --- /dev/null +++ b/puppet/services/neutron-metadata.yaml @@ -0,0 +1,42 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Metadata agent configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronMetadataProxySharedSecret: + description: Shared secret to prevent spoofing + type: string + hidden: true + NeutronWorkers: + default: 0 + description: Number of workers for Neutron service. + type: number + NeutronPassword: + description: The password for the neutron service and db account, used by neutron agents. + type: string + hidden: true + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron Metadata agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::metadata::shared_secret: {get_param: NeutronMetadataProxySharedSecret} + neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers} + neutron::agents::metadata::auth_password: {get_param: NeutronPassword} + neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + step_config: | + include tripleo::profile::base::neutron::metadata diff --git a/puppet/services/neutron-midonet.yaml b/puppet/services/neutron-midonet.yaml new file mode 100644 index 00000000..736c01c3 --- /dev/null +++ b/puppet/services/neutron-midonet.yaml @@ -0,0 +1,48 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Midonet plugin and services + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronPassword: + description: The password for the neutron service and db account, used by neutron agents. + type: string + hidden: true + AdminPassword: + description: The password for the keystone admin account, used for monitoring, querying neutron etc. + type: string + hidden: true + AdminToken: + description: The keystone auth secret and db password. + type: string + hidden: true + EnableZookeeperOnController: + label: Enable Zookeeper On Controller + description: 'Whether enable Zookeeper cluster on Controller' + type: boolean + default: false + EnableCassandraOnController: + label: Enable Cassandra On Controller + description: 'Whether enable Cassandra cluster on Controller' + type: boolean + default: false + +outputs: + role_data: + description: Role data for the Neutron Midonet plugin and services + value: + config_settings: + tripleo::profile::base::neutron::midonet::admin_password: {get_param: AdminPassword} + tripleo::profile::base::neutron::midonet::keystone_admin_token: {get_param: AdminToken} + tripleo::profile::base::neutron::midonet::neutron_auth_password: {get_param: NeutronPassword} + tripleo::profile::base::neutron::midonet::zk_on_controller: {get_param: EnableZookeeperOnController} + tripleo::profile::base::neutron::midonet::neutron_auth_tenant: 'service' + enable_cassandra_on_controller: {get_param: EnableCassandraOnController} + neutron::service_plugins: [] + step_config: | + include tripleo::profile::base::neutron::plugins::midonet diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml new file mode 100644 index 00000000..0e1dbb29 --- /dev/null +++ b/puppet/services/neutron-ovs-agent.yaml @@ -0,0 +1,71 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron OVS agent configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronEnableTunnelling: + type: string + default: "True" + NeutronEnableL2Pop: + type: string + description: > + Enable/disable the L2 population feature in the Neutron agents. + default: "False" + NeutronBridgeMappings: + description: > + The OVS logical->physical bridge mappings to use. See the Neutron + documentation for details. Defaults to mapping br-ex - the external + bridge on hosts - to a physical name 'datacentre' which can be used + to create provider networks (and we use this for the default floating + network) - if changing this either use different post-install network + scripts or be sure to keep 'datacentre' as a mapping network name. + type: comma_delimited_list + default: "datacentre:br-ex" + NeutronTunnelTypes: + default: 'vxlan' + description: | + The tunnel types for the Neutron tenant network. + type: comma_delimited_list + NeutronAgentExtensions: + default: "qos" + description: | + Comma-separated list of extensions enabled for the Neutron agents. + type: comma_delimited_list + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron OVS agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + neutron::agents::ml2::ovs::enable_tunneling: {get_param: NeutronEnableTunnelling} + neutron::agents::ml2::ovs::l2_population: {get_param: NeutronEnableL2Pop} + neutron::agents::ml2::ovs::bridge_mappings: + str_replace: + template: MAPPINGS + params: + MAPPINGS: {get_param: NeutronBridgeMappings} + neutron::agents::ml2::ovs::tunnel_types: + str_replace: + template: TYPES + params: + TYPES: {get_param: NeutronTunnelTypes} + neutron::agents::ml2::ovs::extensions: + str_replace: + template: AGENT_EXTENSIONS + params: + AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} + step_config: | + include ::tripleo::profile::base::neutron::ovs diff --git a/puppet/services/neutron-plugin-ml2.yaml b/puppet/services/neutron-plugin-ml2.yaml new file mode 100644 index 00000000..ff13d5d8 --- /dev/null +++ b/puppet/services/neutron-plugin-ml2.yaml @@ -0,0 +1,109 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron ML2 Plugin configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronMechanismDrivers: + default: 'openvswitch' + description: | + The mechanism drivers for the Neutron tenant network. + type: comma_delimited_list + NeutronTypeDrivers: + default: "vxlan,vlan,flat,gre" + description: | + Comma-separated list of network type driver entrypoints to be loaded. + type: comma_delimited_list + NeutronFlatNetworks: + type: comma_delimited_list + default: 'datacentre' + description: If set, flat networks to configure in neutron plugins. + NeutronPluginExtensions: + default: "qos,port_security" + description: | + Comma-separated list of extensions enabled for the Neutron plugin. + type: comma_delimited_list + NeutronNetworkVLANRanges: + default: 'datacentre:1:1000' + description: > + The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the + Neutron documentation for permitted values. Defaults to permitting any + VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). + type: comma_delimited_list + NeutronTunnelIdRanges: + description: | + Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges + of GRE tunnel IDs that are available for tenant network allocation + default: ["1:4094", ] + type: comma_delimited_list + NeutronVniRanges: + description: | + Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges + of VXLAN VNI IDs that are available for tenant network allocation + default: ["1:4094", ] + type: comma_delimited_list + NeutronNetworkType: + default: 'vxlan' + description: The tenant network type for Neutron. + type: comma_delimited_list + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron ML2 plugin. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::plugins::ml2::mechanism_drivers: + str_replace: + template: MECHANISMS + params: + MECHANISMS: {get_param: NeutronMechanismDrivers} + neutron::plugins::ml2::type_drivers: + str_replace: + template: DRIVERS + params: + DRIVERS: {get_param: NeutronTypeDrivers} + neutron::plugins::ml2::flat_networks: + str_replace: + template: NETWORKS + params: + NETWORKS: {get_param: NeutronFlatNetworks} + neutron::plugins::ml2::extension_drivers: + str_replace: + template: PLUGIN_EXTENSIONS + params: + PLUGIN_EXTENSIONS: {get_param: NeutronPluginExtensions} + neutron::plugins::ml2::network_vlan_ranges: + str_replace: + template: RANGES + params: + RANGES: {get_param: NeutronNetworkVLANRanges} + neutron::plugins::ml2::tunnel_id_ranges: + str_replace: + template: RANGES + params: + RANGES: {get_param: NeutronTunnelIdRanges} + neutron::plugins::ml2::vni_ranges: + str_replace: + template: RANGES + params: + RANGES: {get_param: NeutronVniRanges} + neutron::plugins::ml2::tenant_network_types: + str_replace: + template: TYPES + params: + TYPES: {get_param: NeutronNetworkType} + + step_config: | + include ::tripleo::profile::base::neutron::plugins::ml2 diff --git a/puppet/services/neutron-plugin-nuage.yaml b/puppet/services/neutron-plugin-nuage.yaml new file mode 100644 index 00000000..3c3d8b63 --- /dev/null +++ b/puppet/services/neutron-plugin-nuage.yaml @@ -0,0 +1,75 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Nuage plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + # Config specific parameters, to be provided via parameter_defaults + NeutronNuageOSControllerIp: + description: IP address of the OpenStack Controller + type: string + + NeutronNuageNetPartitionName: + description: Specifies the title that you will see on the VSD + type: string + default: 'default_name' + + NeutronNuageVSDIp: + description: IP address and port of the Virtual Services Directory + type: string + + NeutronNuageVSDUsername: + description: Username to be used to log into VSD + type: string + + NeutronNuageVSDPassword: + description: Password to be used to log into VSD + type: string + + NeutronNuageVSDOrganization: + description: Organization parameter required to log into VSD + type: string + default: 'organization' + + NeutronNuageBaseURIVersion: + description: URI version to be used based on the VSD release + type: string + default: 'default_uri_version' + + NeutronNuageCMSId: + description: Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD + type: string + + UseForwardedFor: + description: Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. + type: boolean + default: false + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron Nuage plugin + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::plugins::nuage::nuage_oscontroller_ip: {get_param: NeutronNuageOSControllerIp} + neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName} + neutron::plugins::nuage::nuage_vsd_ip: {get_param: NeutronNuageVSDIp} + neutron::plugins::nuage::nuage_vsd_username: {get_param: NeutronNuageVSDUsername} + neutron::plugins::nuage::nuage_vsd_password: {get_param: NeutronNuageVSDPassword} + neutron::plugins::nuage::nuage_vsd_organization: {get_param: NeutronNuageVSDOrganization} + neutron::plugins::nuage::nuage_base_uri_version: {get_param: NeutronNuageBaseURIVersion} + neutron::plugins::nuage::nuage_cms_id: {get_param: NeutronNuageCMSId} + nova::api::use_forwarded_for: {get_param: UseForwardedFor} + step_config: | + include tripleo::profile::base::neutron::plugins::nuage diff --git a/puppet/services/neutron-plugin-opencontrail.yaml b/puppet/services/neutron-plugin-opencontrail.yaml new file mode 100644 index 00000000..9c58c03c --- /dev/null +++ b/puppet/services/neutron-plugin-opencontrail.yaml @@ -0,0 +1,60 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Opencontrail plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + AdminPassword: + description: The password for the keystone admin account, used for monitoring, querying neutron etc. + type: string + hidden: true + AdminToken: + description: The keystone auth secret and db password. + type: string + hidden: true + ContrailApiServerIp: + description: IP address of the OpenContrail API server + type: string + ContrailApiServerPort: + description: Port of the OpenContrail API + type: string + default: 8082 + ContrailMultiTenancy: + description: Whether to enable multi tenancy + type: boolean + default: false + ContrailExtensions: + description: List of OpenContrail extensions to be enabled + type: comma_delimited_list + default: '' + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron Opencontrail plugin + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions + + neutron::plugins::opencontrail::api_server_ip: {get_param: ContrailApiServerIp} + neutron::plugins::opencontrail::api_server_port: {get_param: ContrailApiServerPort} + neutron::plugins::opencontrail::multi_tenancy: {get_param: ContrailMultiTenancy} + neutron::plugins::opencontrail::contrail_extensions: {get_param: ContrailExtensions} + neutron::plugins::opencontrail::keystone_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri] } + neutron::plugins::opencontrail::keystone_admin_user: admin + neutron::plugins::opencontrail::keystone_admin_tenant_name: admin + neutron::plugins::opencontrail::keystone_admin_password: {get_param: AdminPassword} + neutron::plugins::opencontrail::keystone_admin_token: {get_param: AdminToken} + step_config: | + include tripleo::profile::base::neutron::plugins::opencontrail diff --git a/puppet/services/neutron-plugin-plumgrid.yaml b/puppet/services/neutron-plugin-plumgrid.yaml new file mode 100644 index 00000000..a0ac46ef --- /dev/null +++ b/puppet/services/neutron-plugin-plumgrid.yaml @@ -0,0 +1,111 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Plumgrid plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronPassword: + description: The password for the neutron service and db account, used by neutron agents. + type: string + hidden: true + NeutronMetadataProxySharedSecret: + description: Shared secret to prevent spoofing + type: string + hidden: true + AdminPassword: + description: The password for the keystone admin account, used for monitoring, querying neutron etc. + type: string + hidden: true + + # PLUMgrid specific settings + PLUMgridDirectorServer: + description: IP address of the PLUMgrid Director Server + type: string + default: 127.0.0.1 + PLUMgridDirectorServerPort: + description: Port of the PLUMgrid Director Server + type: string + default: 443 + PLUMgridUsername: + description: Username for PLUMgrid platform + type: string + PLUMgridPassword: + description: Password for PLUMgrid platform + type: string + hidden: true + PLUMgridNovaMetadataIP: + description: IP address of Nova Metadata + type: string + default: 169.254.169.254 + PLUMgridNovaMetadataPort: + description: Port of Nova Metadata + type: string + default: 8775 + PLUMgridL2GatewayVendor: + description: Vendor for L2 Gateway Switch + type: string + default: vendor + PLUMgridL2GatewayUsername: + description: Username for L2 Gateway Switch + type: string + default: username + PLUMgridL2GatewayPassword: + description: Password for L2 Gateway Switch + type: string + hidden: true + PLUMgridIdentityVersion: + description: Keystone Identity version + type: string + default: v2.0 + PLUMgridConnectorType: + description: Neutron Network Connector Type + type: string + default: distributed + PLUMgridNeutronPluginVersion: + description: PLUMgrid Neutron Plugin version + type: string + default: present + PLUMgridPlumlibVersion: + description: PLUMgrid Plumlib version + type: string + default: present + + +outputs: + role_data: + description: Role data for the Neutron Plumgrid plugin + value: + config_settings: + neutron::plugins::plumgrid::connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://neutron:' + - {get_param: NeutronPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/ovs_neutron?charset=utf8' + neutron::plugins::plumgrid::controller_priv_host: {get_param: [EndpointMap, KeystoneAdmin, host]} + neutron::plugins::plumgrid::admin_password: {get_param: AdminPassword} + neutron::plugins::plumgrid::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} + neutron::plugins::plumgrid::director_server: {get_param: PLUMgridDirectorServer} + neutron::plugins::plumgrid::director_server_port: {get_param: PLUMgridDirectorServerPort} + neutron::plugins::plumgrid::username: {get_param: PLUMgridUsername} + neutron::plugins::plumgrid::password: {get_param: PLUMgridPassword} + neutron::plugins::plumgrid::nova_metadata_ip: {get_param: PLUMgridNovaMetadataIP} + neutron::plugins::plumgrid::nova_metadata_port: {get_param: PLUMgridNovaMetadataPort} + neutron::plugins::plumgrid::l2gateway_vendor: {get_param: PLUMgridL2GatewayVendor} + neutron::plugins::plumgrid::l2gateway_sw_username: {get_param: PLUMgridL2GatewayUsername} + neutron::plugins::plumgrid::l2gateway_sw_password: {get_param: PLUMgridL2GatewayPassword} + neutron::plugins::plumgrid::connector_type: {get_param: PLUMgridConnectorType} + neutron::plugins::plumgrid::identity_version: {get_param: PLUMgridIdentityVersion} + neutron::plugins::plumgrid::package_ensure: {get_param: PLUMgridNeutronPluginVersion} + neutron::plugins::plumgrid::plumlib_package_ensure: {get_param: PLUMgridPlumlibVersion} + + step_config: | + include tripleo::profile::base::neutron::plugins::plumgrid diff --git a/puppet/services/neutron-server.yaml b/puppet/services/neutron-server.yaml new file mode 100644 index 00000000..d759d420 --- /dev/null +++ b/puppet/services/neutron-server.yaml @@ -0,0 +1,75 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Server configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronWorkers: + default: 0 + description: Number of workers for Neutron service. + type: number + NeutronPassword: + description: The password for the neutron service and db account, used by neutron agents. + type: string + hidden: true + NeutronAllowL3AgentFailover: + default: 'True' + description: Allow automatic l3-agent failover + type: string + NeutronL3HA: + default: 'False' + description: Whether to enable l3-agent HA + type: string + NovaPassword: + description: The password for the nova service and db account, used by nova-api. + type: string + hidden: true + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron Server agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + neutron::server::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://neutron:' + - {get_param: NeutronPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/ovs_neutron?charset=utf8' + neutron::server::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + neutron::server::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + neutron::server::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + neutron::server::api_workers: {get_param: NeutronWorkers} + neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} + neutron::server::l3_ha: {get_param: NeutronL3HA} + neutron::server::auth_password: {get_param: NeutronPassword} + + neutron::server::notifications::nova_url: { get_param: [ EndpointMap, NovaInternal, uri ] } + neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] } + neutron::server::notifications::tenant_name: 'service' + neutron::server::notifications::project_name: 'service' + neutron::server::notifications::password: {get_param: NovaPassword} + neutron::db::mysql::password: {get_param: NeutronPassword} + neutron::db::mysql::user: neutron + neutron::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]} + neutron::db::mysql::dbname: ovs_neutron + neutron::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + step_config: | + include tripleo::profile::base::neutron::server diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml new file mode 100644 index 00000000..f31df371 --- /dev/null +++ b/puppet/services/nova-api.yaml @@ -0,0 +1,31 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NovaWorkers: + default: 0 + description: Number of workers for Nova API service. + type: number + +resources: + NovaBase: + type: ./nova-base.yaml + +outputs: + role_data: + description: Role data for the Nova API service. + value: + config_settings: + map_merge: + - get_attr: [NovaBase, role_data, config_settings] + - nova::api::osapi_compute_workers: {get_param: NovaWorkers} + - nova::api::metadata_workers: {get_param: NovaWorkers} + step_config: | + include tripleo::profile::base::nova::api diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml new file mode 100644 index 00000000..7de14f68 --- /dev/null +++ b/puppet/services/nova-base.yaml @@ -0,0 +1,39 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova base service. Shared for all Nova services. + +parameters: + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + Debug: + type: string + default: '' + description: Set to True to enable debugging on all services. + +outputs: + role_data: + description: Role data for the Neutron base service. + value: + config_settings: + nova::rabbit_password: {get_param: RabbitPassword} + nova::rabbit_user: {get_param: RabbitUserName} + nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + nova::rabbit_port: {get_param: RabbitClientPort} + nova::debug: {get_param: Debug} diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml new file mode 100644 index 00000000..679586f7 --- /dev/null +++ b/puppet/services/nova-compute.yaml @@ -0,0 +1,33 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova Compute service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + NovaBase: + type: ./nova-base.yaml + +outputs: + role_data: + description: Role data for the Nova Compute service. + value: + config_settings: + map_merge: + - get_attr: [NovaBase, role_data, config_settings] + - nova::compute::libvirt::manage_libvirt_services: false + # we manage migration in nova common puppet profile + nova::compute::libvirt::migration_support: false + tripleo::profile::base::nova::manage_migration: true + tripleo::profile::base::nova::nova_compute_enabled: true + step_config: | + # TODO(emilien): figure how to deal with libvirt profile. + # We'll probably threat it like we do with Neutron plugins. + # Until then, just include it in the default nova-compute role. + include tripleo::profile::base::nova::compute::libvirt diff --git a/puppet/services/nova-conductor.yaml b/puppet/services/nova-conductor.yaml new file mode 100644 index 00000000..412dd275 --- /dev/null +++ b/puppet/services/nova-conductor.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova Conductor service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NovaWorkers: + default: 0 + description: Number of workers for Nova Conductor service. + type: number + +resources: + NovaBase: + type: ./nova-base.yaml + +outputs: + role_data: + description: Role data for the Nova Conductor service. + value: + config_settings: + map_merge: + - get_attr: [NovaBase, role_data, config_settings] + - nova::conductor::workers: {get_param: NovaWorkers} + step_config: | + include tripleo::profile::base::nova::conductor diff --git a/puppet/services/nova-consoleauth.yaml b/puppet/services/nova-consoleauth.yaml new file mode 100644 index 00000000..791c5449 --- /dev/null +++ b/puppet/services/nova-consoleauth.yaml @@ -0,0 +1,24 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova Consoleauth service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + NovaBase: + type: ./nova-base.yaml + +outputs: + role_data: + description: Role data for the Nova Consoleauth service. + value: + config_settings: + get_attr: [NovaBase, role_data, config_settings] + step_config: | + include tripleo::profile::base::nova::consoleauth diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml new file mode 100644 index 00000000..e3309c32 --- /dev/null +++ b/puppet/services/nova-libvirt.yaml @@ -0,0 +1,31 @@ +heat_template_version: 2016-04-08 + +description: > + Libvirt service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + NovaBase: + type: ./nova-base.yaml + +outputs: + role_data: + description: Role data for the Libvirt service. + value: + config_settings: + map_merge: + - get_attr: [NovaBase, role_data, config_settings] + # we include ::nova::compute::libvirt::services in nova/libvirt profile + - nova::compute::libvirt::manage_libvirt_services: false + # we manage migration in nova common puppet profile + nova::compute::libvirt::migration_support: false + tripleo::profile::base::nova::manage_migration: true + tripleo::profile::base::nova::libvirt_enabled: true + step_config: | + include tripleo::profile::base::nova::libvirt diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml new file mode 100644 index 00000000..65ed6643 --- /dev/null +++ b/puppet/services/nova-scheduler.yaml @@ -0,0 +1,26 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova Scheduler service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + NovaBase: + type: ./nova-base.yaml + +outputs: + role_data: + description: Role data for the Nova Scheduler service. + value: + config_settings: + map_merge: + - get_attr: [NovaBase, role_data, config_settings] + - nova::scheduler::filter::ram_allocation_ratio: '1.0' + step_config: | + include tripleo::profile::base::nova::scheduler diff --git a/puppet/services/nova-vncproxy.yaml b/puppet/services/nova-vncproxy.yaml new file mode 100644 index 00000000..93a25ab2 --- /dev/null +++ b/puppet/services/nova-vncproxy.yaml @@ -0,0 +1,24 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova Vncproxy service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + NovaBase: + type: ./nova-base.yaml + +outputs: + role_data: + description: Role data for the Nova Vncproxy service. + value: + config_settings: + get_attr: [NovaBase, role_data, config_settings] + step_config: | + include tripleo::profile::base::nova::vncproxy diff --git a/puppet/services/pacemaker/ceilometer-agent-central.yaml b/puppet/services/pacemaker/ceilometer-agent-central.yaml new file mode 100644 index 00000000..8fb7bd23 --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-agent-central.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Central Agent service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Central Agent pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::agent::central::manage_service: false + ceilometer::agent::central::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::agent::central diff --git a/puppet/services/pacemaker/ceilometer-agent-notification.yaml b/puppet/services/pacemaker/ceilometer-agent-notification.yaml new file mode 100644 index 00000000..54709783 --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-agent-notification.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Notification Agent service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Notification Agent pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::agent::notification::manage_service: false + ceilometer::agent::notification::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::agent::notification diff --git a/puppet/services/pacemaker/ceilometer-api.yaml b/puppet/services/pacemaker/ceilometer-api.yaml new file mode 100644 index 00000000..d45b1578 --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-api.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer API service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer API pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::api::manage_service: false + ceilometer::api::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::api diff --git a/puppet/services/pacemaker/ceilometer-collector.yaml b/puppet/services/pacemaker/ceilometer-collector.yaml new file mode 100644 index 00000000..487a557c --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-collector.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Collector service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Collector pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::collector::manage_service: false + ceilometer::collector::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::collector diff --git a/puppet/services/pacemaker/cinder-api.yaml b/puppet/services/pacemaker/cinder-api.yaml new file mode 100644 index 00000000..0f66cc06 --- /dev/null +++ b/puppet/services/pacemaker/cinder-api.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Cinder API service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + CinderApiBase: + type: ../cinder-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Cinder API role. + value: + config_settings: + map_merge: + - get_attr: [CinderApiBase, role_data, config_settings] + - cinder::api::manage_service: false + cinder::api::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::cinder::api diff --git a/puppet/services/pacemaker/cinder-scheduler.yaml b/puppet/services/pacemaker/cinder-scheduler.yaml new file mode 100644 index 00000000..d1472c00 --- /dev/null +++ b/puppet/services/pacemaker/cinder-scheduler.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Cinder Scheduler service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + CinderSchedulerBase: + type: ../cinder-scheduler.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Cinder Scheduler role. + value: + config_settings: + map_merge: + - get_attr: [CinderSchedulerBase, role_data, config_settings] + - cinder::scheduler::manage_service: false + cinder::scheduler::enabled: false + step_config: + include ::tripleo::profile::pacemaker::cinder::scheduler diff --git a/puppet/services/pacemaker/cinder-volume.yaml b/puppet/services/pacemaker/cinder-volume.yaml new file mode 100644 index 00000000..ee4e6cea --- /dev/null +++ b/puppet/services/pacemaker/cinder-volume.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Cinder Volume service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + CinderVolumeBase: + type: ../cinder-volume.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Cinder Volume role. + value: + config_settings: + map_merge: + - get_attr: [CinderVolumeBase, role_data, config_settings] + - cinder::volume::manage_service: false + cinder::volume::enabled: false + step_config: + include ::tripleo::profile::pacemaker::cinder::volume diff --git a/puppet/services/pacemaker/database/mongodb.yaml b/puppet/services/pacemaker/database/mongodb.yaml new file mode 100644 index 00000000..b2e9e0bb --- /dev/null +++ b/puppet/services/pacemaker/database/mongodb.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + MongoDb service deployment using puppet + +parameters: + #Parameters not used EndpointMap + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + MongoDbBase: + type: ../../database/mongodb-base.yaml + +outputs: + role_data: + description: Service mongodb using composable services. + value: + config_settings: + map_merge: + - get_attr: [MongoDbBase, role_data, config_settings] + - tripleo::profile::pacemaker::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]} + mongodb::server::service_manage: False + step_config: | + include ::tripleo::profile::pacemaker::database::mongodb diff --git a/puppet/services/pacemaker/database/redis.yaml b/puppet/services/pacemaker/database/redis.yaml new file mode 100644 index 00000000..0e46f8a3 --- /dev/null +++ b/puppet/services/pacemaker/database/redis.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Redis service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + RedisBase: + type: ../../database/redis-base.yaml + +outputs: + role_data: + description: Role data for the Redis pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [RedisBase, role_data, config_settings] + - tripleo::profile::pacemaker::database::redis::redis_vip: {get_input: redis_vip} + redis::service_manage: false + redis::notify_service: false + step_config: | + include ::tripleo::profile::pacemaker::database::redis diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml index 815eb5bf..5a581dca 100644 --- a/puppet/services/pacemaker/glance-api.yaml +++ b/puppet/services/pacemaker/glance-api.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' GlanceFilePcmkDevice: default: '' description: > @@ -43,7 +40,6 @@ resources: type: ../glance-api.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: @@ -56,5 +52,7 @@ outputs: glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype} glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage} glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions} + glance::api::manage_service: false + glance::api::enabled: false step_config: | include ::tripleo::profile::pacemaker::glance diff --git a/puppet/services/pacemaker/glance-registry.yaml b/puppet/services/pacemaker/glance-registry.yaml index 56353459..8b88cb93 100644 --- a/puppet/services/pacemaker/glance-registry.yaml +++ b/puppet/services/pacemaker/glance-registry.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: @@ -19,14 +16,16 @@ resources: type: ../glance-registry.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: description: Role data for the Glance role. value: config_settings: - get_attr: [GlanceRegistryBase, role_data, config_settings] + map_merge: + - get_attr: [GlanceRegistryBase, role_data, config_settings] + - glance::registry::manage_service: false + glance::registry::enabled: false # No puppet manifests since glance-registry is included in # ::tripleo::profile::pacemaker::glance which is maintained alongside of # pacemaker/glance-api.yaml. diff --git a/puppet/services/pacemaker/haproxy.yaml b/puppet/services/pacemaker/haproxy.yaml new file mode 100644 index 00000000..c2ca2816 --- /dev/null +++ b/puppet/services/pacemaker/haproxy.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + HAproxy service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + LoadbalancerServiceBase: + type: ../haproxy.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the HAproxy with pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [LoadbalancerServiceBase, role_data, config_settings] + - tripleo::haproxy::haproxy_service_manage: false + tripleo::haproxy::mysql_clustercheck: true + enable_keepalived: false + step_config: | + include ::tripleo::profile::pacemaker::haproxy diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml new file mode 100644 index 00000000..780c295e --- /dev/null +++ b/puppet/services/pacemaker/heat-api-cfn.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat CloudFormation API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + HeatApiCfnBase: + type: ../heat-api-cfn.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Heat CloudFormation API role. + value: + config_settings: + map_merge: + - get_attr: [HeatApiCfnBase, role_data, config_settings] + - heat::api_cfn::manage_service: false + heat::api_cfn::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::heat::api_cfn diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml new file mode 100644 index 00000000..2fa82fe7 --- /dev/null +++ b/puppet/services/pacemaker/heat-api-cloudwatch.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat CloudWatch API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + HeatApiCloudwatchBase: + type: ../heat-api-cloudwatch.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Heat Cloudwatch API role. + value: + config_settings: + map_merge: + - get_attr: [HeatApiCloudwatchBase, role_data, config_settings] + - heat::api_cloudwatch::manage_service: false + heat::api_cloudwatch::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::heat::api_cloudwatch diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml new file mode 100644 index 00000000..be897a55 --- /dev/null +++ b/puppet/services/pacemaker/heat-api.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + HeatApiBase: + type: ../heat-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Heat API role. + value: + config_settings: + map_merge: + - get_attr: [HeatApiBase, role_data, config_settings] + - heat::api::manage_service: false + heat::api::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::heat::api diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml new file mode 100644 index 00000000..a8ed5c0c --- /dev/null +++ b/puppet/services/pacemaker/heat-engine.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat Engine service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + HeatEngineBase: + type: ../heat-engine.yaml + properties: + EndpointMap: {get_param: EndpointMap} + + +outputs: + role_data: + description: Role data for the Heat engine role. + value: + config_settings: + map_merge: + - get_attr: [HeatEngineBase, role_data, config_settings] + - heat::engine::manage_service: false + heat::engine::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::heat::engine diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml index 8fcab15f..04e90368 100644 --- a/puppet/services/pacemaker/keystone.yaml +++ b/puppet/services/pacemaker/keystone.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: @@ -19,7 +16,6 @@ resources: type: ../keystone.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: @@ -28,7 +24,7 @@ outputs: config_settings: map_merge: - get_attr: [KeystoneServiceBase, role_data, config_settings] - #- - # custom keystone hiera goes here if we need it!? + - keystone::manage_service: false + keystone::enabled: false step_config: | include ::tripleo::profile::pacemaker::keystone diff --git a/puppet/services/pacemaker/memcached.yaml b/puppet/services/pacemaker/memcached.yaml new file mode 100644 index 00000000..9a11855e --- /dev/null +++ b/puppet/services/pacemaker/memcached.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + Mecached service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + MemcachedServiceBase: + type: ../memcached.yaml + +outputs: + role_data: + description: Role data for the Memcached pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [MemcachedServiceBase, role_data, config_settings] + - memcached::service_manage: false + step_config: | + include ::tripleo::profile::pacemaker::memcached diff --git a/puppet/services/pacemaker/neutron-dhcp.yaml b/puppet/services/pacemaker/neutron-dhcp.yaml new file mode 100644 index 00000000..6f514379 --- /dev/null +++ b/puppet/services/pacemaker/neutron-dhcp.yaml @@ -0,0 +1,31 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron DHCP service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronDhcpBase: + type: ../neutron-dhcp.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron DHCP role. + value: + config_settings: + map_merge: + - get_attr: [NeutronDhcpBase, role_data, config_settings] + - tripleo::profile::pacemaker::neutron::enable_dhcp: True + neutron::agents::dhcp::enabled: false + neutron::agents::dhcp::manage_service: false + step_config: | + include ::tripleo::profile::pacemaker::neutron::dhcp diff --git a/puppet/services/pacemaker/neutron-l3.yaml b/puppet/services/pacemaker/neutron-l3.yaml new file mode 100644 index 00000000..cb9c32d9 --- /dev/null +++ b/puppet/services/pacemaker/neutron-l3.yaml @@ -0,0 +1,31 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron L3 service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronL3Base: + type: ../neutron-l3.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron L3 role. + value: + config_settings: + map_merge: + - get_attr: [NeutronL3Base, role_data, config_settings] + - tripleo::profile::pacemaker::neutron::enable_l3: True + neutron::agents::l3::enabled: false + neutron::agents::l3::manage_service: false + step_config: | + include ::tripleo::profile::pacemaker::neutron::l3 diff --git a/puppet/services/pacemaker/neutron-metadata.yaml b/puppet/services/pacemaker/neutron-metadata.yaml new file mode 100644 index 00000000..1c74b26f --- /dev/null +++ b/puppet/services/pacemaker/neutron-metadata.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Metadata service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronMetadataBase: + type: ../neutron-metadata.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron Metadata role. + value: + config_settings: + map_merge: + - get_attr: [NeutronMetadataBase, role_data, config_settings] + - tripleo::profile::pacemaker::neutron::enable_metadata: True + step_config: | + include ::tripleo::profile::pacemaker::neutron::metadata diff --git a/puppet/services/pacemaker/neutron-midonet.yaml b/puppet/services/pacemaker/neutron-midonet.yaml new file mode 100644 index 00000000..f9fd992c --- /dev/null +++ b/puppet/services/pacemaker/neutron-midonet.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Midonet with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronMidonetBase: + type: ../neutron-midonet.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron Midonet plugin. + value: + config_settings: + map_merge: + - get_attr: [NeutronMidonetBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::pacemaker::neutron::plugins::midonet diff --git a/puppet/services/pacemaker/neutron-ovs-agent.yaml b/puppet/services/pacemaker/neutron-ovs-agent.yaml new file mode 100644 index 00000000..a17d7a61 --- /dev/null +++ b/puppet/services/pacemaker/neutron-ovs-agent.yaml @@ -0,0 +1,25 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron OVS agent with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronOvsBase: + type: ../neutron-ovs-agent.yaml + +outputs: + role_data: + description: Role data for the Neutron OVS agent service. + value: + config_settings: + get_attr: [NeutronOvsBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::pacemaker::neutron::ovs diff --git a/puppet/services/pacemaker/neutron-plugin-ml2.yaml b/puppet/services/pacemaker/neutron-plugin-ml2.yaml new file mode 100644 index 00000000..9091b5b9 --- /dev/null +++ b/puppet/services/pacemaker/neutron-plugin-ml2.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron ML2 Plugin with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronMl2Base: + type: ../neutron-plugin-ml2.yaml + +outputs: + role_data: + description: Role data for the Neutron ML2 plugin. + value: + config_settings: + map_merge: + - get_attr: [NeutronMl2Base, role_data, config_settings] + - neutron::agents::ml2::ovs::enabled: false + neutron::agents::ml2::ovs::manage_service: false + step_config: | + include ::tripleo::profile::pacemaker::neutron::plugins::ml2 diff --git a/puppet/services/pacemaker/neutron-plugin-nuage.yaml b/puppet/services/pacemaker/neutron-plugin-nuage.yaml new file mode 100644 index 00000000..704d922a --- /dev/null +++ b/puppet/services/pacemaker/neutron-plugin-nuage.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Nuage Plugin with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronPluginNuageBase: + type: ../neutron-plugin-nuage.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron Nuage plugin. + value: + config_settings: + map_merge: + - get_attr: [NeutronPluginNuageBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::pacemaker::neutron::plugins::nuage diff --git a/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml b/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml new file mode 100644 index 00000000..d8c75509 --- /dev/null +++ b/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron OpenContrail Plugin with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronPluginOpenContrail: + type: ../neutron-plugin-nuage.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron OpenContrail plugin. + value: + config_settings: + map_merge: + - get_attr: [NeutronPluginOpenContrail, role_data, config_settings] + step_config: | + include ::tripleo::profile::pacemaker::neutron::plugins::opencontrail diff --git a/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml b/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml new file mode 100644 index 00000000..c2e8eaac --- /dev/null +++ b/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron PLUMgrid Plugin with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronPluginPlumgridBase: + type: ../neutron-plugin-ml2.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron PLUMgrid plugin. + value: + config_settings: + map_merge: + - get_attr: [NeutronPluginPlumgridBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::pacemaker::neutron::plugins::plumgrid diff --git a/puppet/services/pacemaker/neutron-server.yaml b/puppet/services/pacemaker/neutron-server.yaml new file mode 100644 index 00000000..60599e7e --- /dev/null +++ b/puppet/services/pacemaker/neutron-server.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Server with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronServerBase: + type: ../neutron-server.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron Server. + value: + config_settings: + map_merge: + - get_attr: [NeutronServerBase, role_data, config_settings] + - neutron::server::enabled: false + neutron::server::manage_service: false + step_config: | + include ::tripleo::profile::pacemaker::neutron::server diff --git a/puppet/services/pacemaker/nova-api.yaml b/puppet/services/pacemaker/nova-api.yaml new file mode 100644 index 00000000..1b5011b6 --- /dev/null +++ b/puppet/services/pacemaker/nova-api.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova API service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NovaApiBase: + type: ../nova-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Nova API role. + value: + config_settings: + map_merge: + - get_attr: [NovaApiBase, role_data, config_settings] + - nova::api::manage_service: false + nova::api::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::nova::api diff --git a/puppet/services/pacemaker/nova-conductor.yaml b/puppet/services/pacemaker/nova-conductor.yaml new file mode 100644 index 00000000..a484f0df --- /dev/null +++ b/puppet/services/pacemaker/nova-conductor.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova Conductor service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NovaConductorBase: + type: ../nova-conductor.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Nova Conductor role. + value: + config_settings: + map_merge: + - get_attr: [NovaConductorBase, role_data, config_settings] + - nova::conductor::manage_service: false + nova::conductor::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::nova::conductor diff --git a/puppet/services/pacemaker/nova-consoleauth.yaml b/puppet/services/pacemaker/nova-consoleauth.yaml new file mode 100644 index 00000000..f9b6b058 --- /dev/null +++ b/puppet/services/pacemaker/nova-consoleauth.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova Consoleauth service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NovaConsoleauthBase: + type: ../nova-consoleauth.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Nova Consoleauth role. + value: + config_settings: + map_merge: + - get_attr: [NovaConsoleauthBase, role_data, config_settings] + - nova::consoleauth::manage_service: false + nova::consoleauth::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::nova::consoleauth diff --git a/puppet/services/pacemaker/nova-scheduler.yaml b/puppet/services/pacemaker/nova-scheduler.yaml new file mode 100644 index 00000000..0032cbe6 --- /dev/null +++ b/puppet/services/pacemaker/nova-scheduler.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova Scheduler service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NovaSchedulerBase: + type: ../nova-scheduler.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Nova Scheduler role. + value: + config_settings: + map_merge: + - get_attr: [NovaSchedulerBase, role_data, config_settings] + - nova::scheduler::manage_service: false + nova::scheduler::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::nova::scheduler diff --git a/puppet/services/pacemaker/nova-vncproxy.yaml b/puppet/services/pacemaker/nova-vncproxy.yaml new file mode 100644 index 00000000..52395240 --- /dev/null +++ b/puppet/services/pacemaker/nova-vncproxy.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Nova Vncproxy service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NovaVncproxyBase: + type: ../nova-vncproxy.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Nova Vncproxy role. + value: + config_settings: + map_merge: + - get_attr: [NovaVncproxyBase, role_data, config_settings] + - nova::vncproxy::manage_service: false + nova::vncproxy::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::nova::vncproxy diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml new file mode 100644 index 00000000..20fb2e40 --- /dev/null +++ b/puppet/services/pacemaker/rabbitmq.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + RabbitMQ service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + RabbitMQServiceBase: + type: ../rabbitmq.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the RabbitMQ pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [RabbitMQServiceBase, role_data, config_settings] + - rabbitmq::service_manage: false + step_config: | + include ::tripleo::profile::pacemaker::rabbitmq diff --git a/puppet/services/pacemaker/sahara-api.yaml b/puppet/services/pacemaker/sahara-api.yaml new file mode 100644 index 00000000..a5db77c4 --- /dev/null +++ b/puppet/services/pacemaker/sahara-api.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Sahara API service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + SaharaApiBase: + type: ../sahara-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Sahara API role. + value: + config_settings: + map_merge: + - get_attr: [SaharaApiBase, role_data, config_settings] + - sahara::service::api::manage_service: false + sahara::service::api::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::sahara diff --git a/puppet/services/pacemaker/sahara-engine.yaml b/puppet/services/pacemaker/sahara-engine.yaml new file mode 100644 index 00000000..129f88bf --- /dev/null +++ b/puppet/services/pacemaker/sahara-engine.yaml @@ -0,0 +1,32 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Sahara Engine service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + SaharaEngineBase: + type: ../sahara-engine.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Sahara Engine role. + value: + config_settings: + map_merge: + - get_attr: [SaharaEngineBase, role_data, config_settings] + - sahara::service::engine::manage_service: false + sahara::service::engine::enabled: false + # No puppet manifests since sahara-engine is included in + # ::tripleo::profile::pacemaker::sahara which is maintained alongside of + # pacemaker/sahara-api.yaml. + step_config: diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml new file mode 100644 index 00000000..3688c4a8 --- /dev/null +++ b/puppet/services/rabbitmq.yaml @@ -0,0 +1,39 @@ +heat_template_version: 2016-04-08 + +description: > + RabbitMQ service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitFDLimit: + default: 65536 + description: Configures RabbitMQ FD limit + type: string + RabbitIPv6: + default: false + description: Enable IPv6 in RabbitMQ + type: boolean + +outputs: + role_data: + description: Role data for the RabbitMQ role. + value: + config_settings: + rabbitmq::file_limit: {get_param: RabbitFDLimit} + rabbitmq::default_user: {get_param: RabbitUserName} + rabbitmq::default_pass: {get_param: RabbitPassword} + rabbit_ipv6: {get_param: RabbitIPv6} + step_config: | + include ::tripleo::profile::base::rabbitmq diff --git a/puppet/services/sahara-api.yaml b/puppet/services/sahara-api.yaml new file mode 100644 index 00000000..93bf7385 --- /dev/null +++ b/puppet/services/sahara-api.yaml @@ -0,0 +1,52 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Sahara API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + SaharaPassword: + default: unset + description: The password for the sahara service account, used by sahara-api. + type: string + hidden: true + SaharaWorkers: + default: 0 + description: The number of workers for the sahara-api. + type: number + SaharaApiVirtualIP: + type: string + default: '' + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + +resources: + SaharaBase: + type: ./sahara-base.yaml + +outputs: + role_data: + description: Role data for the Sahara API role. + value: + config_settings: + map_merge: + - get_attr: [SaharaBase, role_data, config_settings] + - sahara::host: {get_param: SaharaApiVirtualIP} + sahara::port: {get_param: [EndpointMap, SaharaInternal, port]} + sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + sahara::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + sahara::admin_password: {get_param: SaharaPassword} + sahara::service::api::api_workers: {get_param: SaharaApiWorkers} + sahara::keystone::auth::public_url: {get_param: [EndpointMap, SaharaPublic, uri]} + sahara::keystone::auth::internal_url: {get_param: [EndpointMap, SaharaInternal, uri]} + sahara::keystone::auth::admin_url: {get_param: [EndpointMap, SaharaAdmin, uri]} + sahara::keystone::auth::password: {get_param: SaharaPassword } + sahara::keystone::auth::region: {get_param: KeystoneRegion} + step_config: | + include ::tripleo::profile::base::sahara::api diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml new file mode 100644 index 00000000..275d7536 --- /dev/null +++ b/puppet/services/sahara-base.yaml @@ -0,0 +1,48 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Sahara base service. Shared for all Sahara services. + +parameters: + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + Debug: + type: string + default: '' + description: Set to True to enable debugging on all services. + +outputs: + role_data: + description: Role data for the Sahara base service. + value: + config_settings: + sahara::rabbit_password: {get_param: RabbitPassword} + sahara::rabbit_user: {get_param: RabbitUserName} + sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + sahara::rabbit_port: {get_param: RabbitClientPort} + sahara::debug: {get_param: Debug} + sahara::use_neutron: true + sahara::plugins: + - cdh + - hdp + - mapr + - vanilla + - spark + - storm + sahara::rpc_backend: rabbit diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml new file mode 100644 index 00000000..f0411a35 --- /dev/null +++ b/puppet/services/sahara-engine.yaml @@ -0,0 +1,47 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Sahara Engine service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + SaharaPassword: + default: unset + description: The password for the sahara service account, used by sahara-api. + type: string + hidden: true + +resources: + SaharaBase: + type: ./sahara-base.yaml + +outputs: + role_data: + description: Role data for the Sahara Engine role. + value: + config_settings: + map_merge: + - get_attr: [SaharaBase, role_data, config_settings] + - sahara_dsn: &sahara_dsn + list_join: + - '' + - - {get_param: [EndpointMap, MysqlVirtual, protocol]} + - '://sahara:' + - {get_param: SaharaPassword} + - '@' + - {get_param: [EndpointMap, MysqlVirtual, host]} + - '/sahara' + sahara::database_connection: *sahara_dsn + sahara::db::mysql::password: {get_param: SaharaPassword} + sahara::db::mysql::user: sahara + sahara::db::mysql::host: {get_param: [EndpointMap, MysqlVirtual, host]} + sahara::db::mysql::dbname: sahara + sahara::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + step_config: | + include ::tripleo::profile::base::sahara::engine diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml index f9681634..91f0e049 100644 --- a/puppet/services/services.yaml +++ b/puppet/services/services.yaml @@ -15,10 +15,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - default: '' - type: string - description: The URI virtual IP for the MySQL service. resources: @@ -29,12 +25,10 @@ resources: concurrent: true resource_properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: - config_settings: - description: Configuration settings. - value: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}} - step_config: - description: Step configuration. - value: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]} + role_data: + description: Combined Role data for this set of services. + value: + config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}} + step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]} diff --git a/puppet/services/snmp.yaml b/puppet/services/snmp.yaml new file mode 100644 index 00000000..24ee2933 --- /dev/null +++ b/puppet/services/snmp.yaml @@ -0,0 +1,31 @@ +heat_template_version: 2016-04-08 + +description: > + SNMP client configured with Puppet, to facilitate Ceilometer Hardware + monitoring in the undercloud. This service is required to enable hardware + monitoring. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + SnmpdReadonlyUserName: + default: ro_snmp_user + description: The user name for SNMPd with readonly rights running on all Overcloud nodes + type: string + SnmpdReadonlyUserPassword: + description: The user password for SNMPd with readonly rights running on all Overcloud nodes + type: string + hidden: true + +outputs: + role_data: + description: Role data for the SNMP services + value: + config_settings: + snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} + snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} + step_config: | + include ::tripleo::profile::base::snmp diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml new file mode 100644 index 00000000..930b9e3d --- /dev/null +++ b/puppet/services/swift-proxy.yaml @@ -0,0 +1,54 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Swift Proxy service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + SwiftPassword: + description: The password for the swift service account, used by the swift proxy services. + type: string + hidden: true + SwiftProxyNodeTimeout: + default: 60 + description: Timeout for requests going from swift-proxy to swift a/c/o services. + type: number + SwiftWorkers: + default: 0 + description: Number of workers for Swift service. + type: number + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + + +outputs: + role_data: + description: Role data for the Swift proxy service. + value: + config_settings: + # Swift + swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + swift::proxy::authtoken::admin_password: {get_param: SwiftPassword} + swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout} + swift::proxy::workers: {get_param: SwiftWorkers} + swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]} + swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]} + swift::keystone::auth::admin_url: {get_param: [EndpointMap, SwiftAdmin, uri]} + swift::keystone::auth::public_url_s3: {get_param: [EndpointMap, SwiftS3Public, uri]} + swift::keystone::auth::internal_url_s3: {get_param: [EndpointMap, SwiftS3Internal, uri]} + swift::keystone::auth::admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]} + swift::keystone::auth::password: {get_param: SwiftPassword} + swift::keystone::auth::region: {get_param: KeystoneRegion} + step_config: | + include ::tripleo::profile::base::swift::proxy diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml new file mode 100644 index 00000000..980c95f5 --- /dev/null +++ b/puppet/services/swift-storage.yaml @@ -0,0 +1,44 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Swift Storage service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + SwiftMountCheck: + default: false + description: Value of mount_check in Swift account/container/object -server.conf + type: boolean + + # DEPRECATED options for compatibility with overcloud.yaml + # This should be removed and manipulation of the ControllerServices list + # used instead, but we need client support for that first + ControllerEnableSwiftStorage: + default: true + description: Whether to enable Swift Storage on the Controller + type: boolean + +parameter_groups: +- label: deprecated + description: Do not use deprecated params, they will be removed. + parameters: + - ControllerEnableSwiftStorage + +outputs: + role_data: + description: Role data for the Swift Proxy role. + value: + config_settings: + # Swift + swift::storage::all::mount_check: {get_param: SwiftMountCheck} + tripleo::profile::base::swift::storage::enable_swift_storage: {get_param: ControllerEnableSwiftStorage} + step_config: | + include ::tripleo::profile::base::swift::storage diff --git a/puppet/services/time/ntp.yaml b/puppet/services/time/ntp.yaml new file mode 100644 index 00000000..930dca41 --- /dev/null +++ b/puppet/services/time/ntp.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + NTP service deployment using puppet, this YAML file + creates the interface between the HOT template + and the puppet manifest that actually installs + and configure NTP. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NtpServer: + default: [] + description: NTP servers + type: comma_delimited_list + +outputs: + role_data: + description: Role ntp using composable services. + value: + config_settings: + ntp::ntpservers: {get_param: NtpServer} + step_config: | + include ::ntp diff --git a/puppet/services/time/timezone.yaml b/puppet/services/time/timezone.yaml new file mode 100644 index 00000000..13fda986 --- /dev/null +++ b/puppet/services/time/timezone.yaml @@ -0,0 +1,24 @@ +heat_template_version: 2016-04-08 + +description: > + Composable Timezone service + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + TimeZone: + default: 'UTC' + description: The timezone to be set on the overcloud. + type: string + +outputs: + role_data: + description: Timezone role using composable services. + value: + config_settings: + timezone::timezone: {get_param: TimeZone} + step_config: | + include ::timezone diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml index eb06b241..1c36a047 100644 --- a/puppet/swift-storage-post.yaml +++ b/puppet/swift-storage-post.yaml @@ -8,8 +8,11 @@ parameters: type: boolean servers: type: json - NodeConfigIdentifiers: - type: json + RoleData: + type: json + default: {} + DeployIdentifier: + type: string description: Value which changes if the node configuration may need to be re-applied resources: @@ -23,7 +26,7 @@ resources: servers: {get_param: servers} config: {get_resource: StorageArtifactsConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} StoragePuppetConfig: type: OS::Heat::SoftwareConfig @@ -31,46 +34,56 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: - get_file: manifests/overcloud_object.pp + list_join: + - '' + - - get_file: manifests/overcloud_object.pp + - get_file: manifests/ringbuilder.pp + - {get_param: [RoleData, step_config]} - StorageDeployment_Step1: + StorageRingbuilderDeployment_Step2: type: OS::Heat::StructuredDeployments depends_on: StorageArtifactsDeploy properties: - name: StorageDeployment_Step1 + name: StorageRingbuilderDeployment_Step2 servers: {get_param: servers} config: {get_resource: StoragePuppetConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + step: 2 + update_identifier: {get_param: DeployIdentifier} - StorageRingbuilderPuppetConfig: - type: OS::Heat::SoftwareConfig + StorageRingbuilderDeployment_Step3: + type: OS::Heat::StructuredDeployments + depends_on: StorageRingbuilderDeployment_Step2 properties: - group: puppet - options: - enable_debug: {get_param: ConfigDebug} - outputs: - - name: result - config: - get_file: manifests/ringbuilder.pp + name: StorageRingbuilderDeployment_Step3 + servers: {get_param: servers} + config: {get_resource: StoragePuppetConfig} + input_values: + step: 3 + update_identifier: {get_param: DeployIdentifier} - StorageRingbuilderDeployment_Step2: + StorageDeployment_Step4: type: OS::Heat::StructuredDeployments - depends_on: StorageDeployment_Step1 + depends_on: StorageRingbuilderDeployment_Step3 properties: - name: StorageRingbuilderDeployment_Step2 + name: StorageDeployment_Step4 servers: {get_param: servers} - config: {get_resource: StorageRingbuilderPuppetConfig} + config: {get_resource: StoragePuppetConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + step: 4 + update_identifier: {get_param: DeployIdentifier} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: StorageRingbuilderDeployment_Step2 + depends_on: StorageDeployment_Step4 type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 296428db..dc274dcd 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -18,10 +18,6 @@ parameters: default: default description: Name of an existing Nova key pair to enable SSH access to the instances type: string - MountCheck: - default: 'false' - description: Value of mount_check in Swift account/container/object -server.conf - type: boolean MinPartHours: type: number default: 1 @@ -46,10 +42,6 @@ parameters: description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -65,10 +57,6 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json - TimeZone: - default: 'UTC' - description: The timezone to be set on Ceph nodes. - type: string Hostname: type: string default: '' # Defaults to Heat created hostname @@ -121,11 +109,21 @@ parameters: NodeIndex: type: number default: 0 + ServiceConfigSettings: + type: json + default: {} + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: SwiftStorage: type: OS::Nova::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} flavor: {get_param: Flavor} @@ -220,16 +218,22 @@ resources: properties: ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: @@ -251,15 +255,24 @@ resources: - heat_config_%{::deploy_config_name} - object_extraconfig - extraconfig + - service_configs - object - swift_devices_and_proxy # provided by SwiftDevicesAndProxyConfig - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - network merge_behavior: deeper datafiles: + service_configs: + mapped_data: {get_param: ServiceConfigSettings} common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} object_extraconfig: mapped_data: {get_param: ObjectStorageExtraConfig} extraconfig: @@ -267,15 +280,12 @@ resources: object: raw_data: {get_file: hieradata/object.yaml} mapped_data: # data supplied directly to this deployment configuration, etc - swift::swift_hash_suffix: { get_input: swift_hash_suffix } + swift::swift_hash_path_suffix: { get_input: swift_hash_suffix } tripleo::ringbuilder::build_ring: { get_input: swift_ring_build } tripleo::ringbuilder::part_power: { get_input: swift_part_power } tripleo::ringbuilder::replicas: {get_input: swift_replicas } swift::storage::all::storage_local_net_ip: {get_input: swift_management_network} - swift_mount_check: {get_input: swift_mount_check } tripleo::ringbuilder::min_part_hours: { get_input: swift_min_part_hours } - ntp::servers: {get_input: ntp_servers} - timezone::timezone: {get_input: timezone} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} tripleo::packages::enable_install: {get_input: enable_package_install} @@ -294,13 +304,10 @@ resources: snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} swift_hash_suffix: {get_param: HashSuffix} - swift_mount_check: {get_param: MountCheck} swift_min_part_hours: {get_param: MinPartHours} swift_ring_build: {get_param: RingBuild} swift_part_power: {get_param: PartPower} swift_replicas: { get_param: Replicas} - ntp_servers: {get_param: NtpServer} - timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} @@ -413,11 +420,3 @@ outputs: management_ip_address: description: IP address of the server in the management network value: {get_attr: [ManagementPort, ip_address]} - config_identifier: - description: identifier which changes if the node configuration may need re-applying - value: - list_join: - - ',' - - - {get_attr: [SwiftStorageHieraDeploy, deploy_stdout]} - - {get_attr: [NodeTLSCAData, deploy_stdout]} - - {get_param: UpdateIdentifier} diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml index 3e8e9182..51129053 100644 --- a/puppet/vip-config.yaml +++ b/puppet/vip-config.yaml @@ -32,11 +32,21 @@ resources: horizon_vip: {get_input: horizon_vip} redis_vip: {get_input: redis_vip} mysql_vip: {get_input: mysql_vip} - tripleo::loadbalancer::public_virtual_ip: {get_input: public_virtual_ip} - tripleo::loadbalancer::controller_virtual_ip: {get_input: control_virtual_ip} - tripleo::loadbalancer::internal_api_virtual_ip: {get_input: internal_api_virtual_ip} - tripleo::loadbalancer::storage_virtual_ip: {get_input: storage_virtual_ip} - tripleo::loadbalancer::storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip} + public_virtual_ip: {get_input: public_virtual_ip} + controller_virtual_ip: {get_input: control_virtual_ip} + internal_api_virtual_ip: {get_input: internal_api_virtual_ip} + storage_virtual_ip: {get_input: storage_virtual_ip} + storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip} + ironic_api_vip: {get_input: ironic_api_vip} + # public_virtual_ip and controller_virtual_ip are needed in + # both HAproxy & keepalived. + tripleo::haproxy::public_virtual_ip: {get_input: public_virtual_ip} + tripleo::haproxy::controller_virtual_ip: {get_input: control_virtual_ip} + tripleo::keepalived::public_virtual_ip: {get_input: public_virtual_ip} + tripleo::keepalived::controller_virtual_ip: {get_input: control_virtual_ip} + tripleo::keepalived::internal_api_virtual_ip: {get_input: internal_api_virtual_ip} + tripleo::keepalived::storage_virtual_ip: {get_input: storage_virtual_ip} + tripleo::keepalived::storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip} tripleo::redis_notification::haproxy_monitor_ip: {get_input: control_virtual_ip} |