diff options
Diffstat (limited to 'puppet')
-rw-r--r-- | puppet/ceph-cluster-config.yaml | 21 | ||||
-rw-r--r-- | puppet/controller.yaml | 318 | ||||
-rw-r--r-- | puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml | 10 | ||||
-rw-r--r-- | puppet/extraconfig/ceph/ceph-external-config.yaml | 22 | ||||
-rw-r--r-- | puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml | 5 | ||||
-rw-r--r-- | puppet/hieradata/ceph.yaml | 5 | ||||
-rw-r--r-- | puppet/hieradata/common.yaml | 2 | ||||
-rw-r--r-- | puppet/hieradata/compute.yaml | 6 | ||||
-rw-r--r-- | puppet/hieradata/controller.yaml | 21 | ||||
-rw-r--r-- | puppet/manifests/overcloud_cephstorage.pp | 8 | ||||
-rw-r--r-- | puppet/manifests/overcloud_compute.pp | 26 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller.pp | 108 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller_pacemaker.pp | 648 | ||||
-rw-r--r-- | puppet/manifests/overcloud_object.pp | 8 | ||||
-rw-r--r-- | puppet/manifests/overcloud_volume.pp | 4 | ||||
-rw-r--r-- | puppet/manifests/ringbuilder.pp | 22 |
16 files changed, 833 insertions, 401 deletions
diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml index 99265493..5e54a621 100644 --- a/puppet/ceph-cluster-config.yaml +++ b/puppet/ceph-cluster-config.yaml @@ -27,6 +27,15 @@ parameters: type: comma_delimited_list ceph_mon_ips: type: comma_delimited_list + NovaRbdPoolName: + default: vms + type: string + CinderRbdPoolName: + default: volumes + type: string + GlanceRbdPoolName: + default: images + type: string resources: CephClusterConfigImpl: @@ -69,11 +78,21 @@ resources: secret: 'ADMIN_KEY', mode: '0644', cap_mon: 'allow r', - cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=images' + cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL' } }" params: ADMIN_KEY: {get_param: ceph_admin_key} + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} + cinder_rbd_pool_name: {get_param: CinderRbdPoolName} + glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + ceph_pools: + - {get_param: CinderRbdPoolName} + - {get_param: NovaRbdPoolName} + - {get_param: GlanceRbdPoolName} outputs: config_id: diff --git a/puppet/controller.yaml b/puppet/controller.yaml index 0bb8035b..009199d4 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -4,6 +4,11 @@ description: > OpenStack controller node configured by Puppet. parameters: + AdminEmail: + default: 'admin@example.com' + description: The email for the keystone admin account. + type: string + hidden: true AdminPassword: default: unset description: The password for the keystone admin account, used for monitoring, querying neutron etc. @@ -180,6 +185,30 @@ parameters: type: string constraints: - allowed_values: ['swift', 'file', 'rbd'] + GlanceFilePcmkDevice: + default: '' + description: > + An exported storage device that should be mounted by Pacemaker + as Glance storage. Effective when GlanceFilePcmkManage is true. + type: string + GlanceFilePcmkFstype: + default: 'nfs' + description: > + Filesystem type for Pacemaker mount used as Glance storage. + Effective when GlanceFilePcmkManage is true. + type: string + GlanceFilePcmkManage: + default: false + description: > + Whether to make Glance file backend a mount managed by Pacemaker. + Effective when GlanceBackend is 'file'. + type: boolean + GlanceFilePcmkOptions: + default: '' + description: > + Mount options for Pacemaker mount used as Glance storage. + Effective when GlanceFilePcmkManage is true. + type: string HAProxySyslogAddress: default: /dev/log description: Syslog address where HAproxy will send its log @@ -253,6 +282,10 @@ parameters: type: string constraints: - allowed_values: [ 'basic', 'cadf' ] + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint MysqlClusterUniquePart: description: A unique identifier of the MySQL cluster the controller is in. type: string @@ -437,7 +470,7 @@ parameters: Specifies the interface where the public-facing virtual ip will be assigned. This should be int_public when a VLAN is being used. type: string - PublicVirtualIP: # DEPRECATED: use per service settings instead + PublicVirtualIP: type: string default: '' # Has to be here because of the ignored empty value bug RabbitCookie: @@ -694,9 +727,28 @@ resources: - - 'http://' - {get_param: HeatApiVirtualIP} - ':8000/v1/waitcondition' + heat_public_url: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':8004/v1/%(tenant_id)s' + heat_internal_url: + list_join: + - '' + - - 'http://' + - {get_param: HeatApiVirtualIP} + - ':8004/v1/%(tenant_id)s' + heat_admin_url: + list_join: + - '' + - - 'http://' + - {get_param: HeatApiVirtualIP} + - ':8004/v1/%(tenant_id)s' heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey} horizon_allowed_hosts: {get_param: HorizonAllowedHosts} horizon_secret: {get_param: HorizonSecret} + admin_email: {get_param: AdminEmail} admin_password: {get_param: AdminPassword} admin_token: {get_param: AdminToken} neutron_public_interface_ip: {get_param: NeutronPublicInterfaceIP} @@ -725,9 +777,49 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/cinder' + cinder_public_url: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':8776/v1/%(tenant_id)s' + cinder_internal_url: + list_join: + - '' + - - 'http://' + - {get_param: CinderApiVirtualIP} + - ':8776/v1/%(tenant_id)s' + cinder_admin_url: + list_join: + - '' + - - 'http://' + - {get_param: CinderApiVirtualIP} + - ':8776/v1/%(tenant_id)s' + cinder_public_url_v2: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':8776/v2/%(tenant_id)s' + cinder_internal_url_v2: + list_join: + - '' + - - 'http://' + - {get_param: CinderApiVirtualIP} + - ':8776/v2/%(tenant_id)s' + cinder_admin_url_v2: + list_join: + - '' + - - 'http://' + - {get_param: CinderApiVirtualIP} + - ':8776/v2/%(tenant_id)s' glance_port: {get_param: GlancePort} glance_password: {get_param: GlancePassword} glance_backend: {get_param: GlanceBackend} + glance_file_pcmk_device: {get_param: GlanceFilePcmkDevice} + glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype} + glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage} + glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions} glance_notifier_strategy: {get_param: GlanceNotifierStrategy} glance_log_file: {get_param: GlanceLogFile} glance_dsn: @@ -769,13 +861,25 @@ resources: - '' - - 'http://' - {get_param: KeystoneAdminApiVirtualIP} - - ':35357/' + - ':35357' keystone_auth_uri: list_join: - '' - - 'http://' - {get_param: KeystonePublicApiVirtualIP} - ':5000/v2.0/' + keystone_public_url: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':5000' + keystone_internal_url: + list_join: + - '' + - - 'http://' + - {get_param: KeystonePublicApiVirtualIP} + - ':5000' keystone_ec2_uri: list_join: - '' @@ -861,7 +965,19 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/ovs_neutron?charset=utf8' - neutron_url: + neutron_internal_url: + list_join: + - '' + - - 'http://' + - {get_param: NeutronApiVirtualIP} + - ':9696' + neutron_public_url: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':9696' + neutron_admin_url: list_join: - '' - - 'http://' @@ -885,9 +1001,29 @@ resources: ceilometer_dsn: list_join: - '' - - - 'mysql://ceilometer:unset@' + - - 'mysql://ceilometer:' + - {get_param: CeilometerPassword} + - '@' - {get_param: MysqlVirtualIP} - '/ceilometer' + ceilometer_public_url: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':8777' + ceilometer_internal_url: + list_join: + - '' + - - 'http://' + - {get_param: CeilometerApiVirtualIP} + - ':8777' + ceilometer_admin_url: + list_join: + - '' + - - 'http://' + - {get_param: CeilometerApiVirtualIP} + - ':8777' snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} nova_password: {get_param: NovaPassword} @@ -899,6 +1035,60 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/nova' + nova_public_url: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':8774/v2/%(tenant_id)s' + nova_internal_url: + list_join: + - '' + - - 'http://' + - {get_param: NovaApiVirtualIP} + - ':8774/v2/%(tenant_id)s' + nova_admin_url: + list_join: + - '' + - - 'http://' + - {get_param: NovaApiVirtualIP} + - ':8774/v2/%(tenant_id)s' + nova_v3_public_url: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':8774/v3' + nova_v3_internal_url: + list_join: + - '' + - - 'http://' + - {get_param: NovaApiVirtualIP} + - ':8774/v3' + nova_v3_admin_url: + list_join: + - '' + - - 'http://' + - {get_param: NovaApiVirtualIP} + - ':8774/v3' + nova_ec2_public_url: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':8773/services/Cloud' + nova_ec2_internal_url: + list_join: + - '' + - - 'http://' + - {get_param: NovaApiVirtualIP} + - ':8773/services/Cloud' + nova_ec2_admin_url: + list_join: + - '' + - - 'http://' + - {get_param: NovaApiVirtualIP} + - ':8773/services/Admin' fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} rabbit_username: {get_param: RabbitUserName} @@ -928,6 +1118,42 @@ resources: swift_replicas: {get_param: SwiftReplicas} swift_min_part_hours: {get_param: SwiftMinPartHours} swift_mount_check: {get_param: SwiftMountCheck} + swift_public_url: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':8080/v1/AUTH_%(tenant_id)s' + swift_internal_url: + list_join: + - '' + - - 'http://' + - {get_param: SwiftProxyVirtualIP} + - ':8080/v1/AUTH_%(tenant_id)s' + swift_admin_url: + list_join: + - '' + - - 'http://' + - {get_param: SwiftProxyVirtualIP} + - ':8080' + swift_public_url_s3: + list_join: + - '' + - - 'http://' + - {get_param: PublicVirtualIP} + - ':8080' + swift_internal_url_s3: + list_join: + - '' + - - 'http://' + - {get_param: SwiftProxyVirtualIP} + - ':8080' + swift_admin_url_s3: + list_join: + - '' + - - 'http://' + - {get_param: SwiftProxyVirtualIP} + - ':8080' enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} @@ -945,9 +1171,34 @@ resources: - ':' - {get_param: GlancePort} glance_registry_host: {get_param: GlanceRegistryVirtualIP} + glance_public_url: + list_join: + - '' + - - {get_param: GlanceProtocol} + - '://' + - {get_param: PublicVirtualIP} + - ':' + - {get_param: GlancePort} + glance_internal_url: + list_join: + - '' + - - {get_param: GlanceProtocol} + - '://' + - {get_param: GlanceApiVirtualIP} + - ':' + - {get_param: GlancePort} + glance_admin_url: + list_join: + - '' + - - {get_param: GlanceProtocol} + - '://' + - {get_param: GlanceApiVirtualIP} + - ':' + - {get_param: GlancePort} heat_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} keystone_public_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} keystone_admin_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} + keystone_region: {get_param: KeystoneRegion} mongo_db_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MongoDbNetwork]}]} neutron_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} @@ -1030,6 +1281,14 @@ resources: tripleo::ringbuilder::replicas: {get_input: swift_replicas} tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours} swift_mount_check: {get_input: swift_mount_check} + swift::keystone::auth::public_url: {get_input: swift_public_url } + swift::keystone::auth::internal_url: {get_input: swift_internal_url } + swift::keystone::auth::admin_url: {get_input: swift_admin_url } + swift::keystone::auth::public_url_s3: {get_input: swift_public_url_v3 } + swift::keystone::auth::internal_url_s3: {get_input: swift_internal_url_v3 } + swift::keystone::auth::admin_url_s3: {get_input: swift_admin_url_v3 } + swift::keystone::auth::password: {get_input: swift_password } + swift::keystone::auth::region: {get_input: keystone_region} # NOTE(dprince): build_ring support is currently not wired in. # See: https://review.openstack.org/#/c/109225/ @@ -1057,6 +1316,14 @@ resources: cinder::glance::glance_api_servers: {get_input: glance_api_servers} cinder_backend_config: {get_input: CinderBackendConfig} cinder::db::mysql::password: {get_input: cinder_password} + cinder::keystone::auth::public_url: {get_input: cinder_public_url } + cinder::keystone::auth::internal_url: {get_input: cinder_internal_url } + cinder::keystone::auth::admin_url: {get_input: cinder_admin_url } + cinder::keystone::auth::public_url_v2: {get_input: cinder_public_url_v2 } + cinder::keystone::auth::internal_url_v2: {get_input: cinder_internal_url_v2 } + cinder::keystone::auth::admin_url_v2: {get_input: cinder_admin_url_v2 } + cinder::keystone::auth::password: {get_input: cinder_password } + cinder::keystone::auth::region: {get_input: keystone_region} # Glance glance::api::bind_port: {get_input: glance_port} @@ -1081,6 +1348,15 @@ resources: glance::backend::swift::swift_store_key: {get_input: glance_password} glance_backend: {get_input: glance_backend} glance::db::mysql::password: {get_input: glance_password} + glance::keystone::auth::public_url: {get_input: glance_public_url } + glance::keystone::auth::internal_url: {get_input: glance_internal_url } + glance::keystone::auth::admin_url: {get_input: glance_admin_url } + glance::keystone::auth::password: {get_input: glance_password } + glance::keystone::auth::region: {get_input: keystone_region} + glance_file_pcmk_device: {get_input: glance_file_pcmk_device} + glance_file_pcmk_fstype: {get_input: glance_file_pcmk_fstype} + glance_file_pcmk_manage: {get_input: glance_file_pcmk_manage} + glance_file_pcmk_options: {get_input: glance_file_pcmk_options} # Heat heat_stack_domain_admin_password: {get_input: heat_stack_domain_admin_password} @@ -1102,6 +1378,11 @@ resources: heat::database_connection: {get_input: heat_dsn} heat::debug: {get_input: debug} heat::db::mysql::password: {get_input: heat_password} + heat::keystone::auth::public_url: {get_input: heat_public_url } + heat::keystone::auth::internal_url: {get_input: heat_internal_url } + heat::keystone::auth::admin_url: {get_input: heat_admin_url } + heat::keystone::auth::password: {get_input: heat_password } + heat::keystone::auth::region: {get_input: keystone_region} # Keystone keystone::admin_token: {get_input: admin_token} @@ -1121,6 +1402,12 @@ resources: keystone::rabbit_port: {get_input: rabbit_client_port} keystone::notification_driver: {get_input: keystone_notification_driver} keystone::notification_format: {get_input: keystone_notification_format} + keystone::roles::admin::email: {get_input: admin_email} + keystone::roles::admin::password: {get_input: admin_password} + keystone::endpoint::public_url: {get_input: keystone_public_url} + keystone::endpoint::internal_url: {get_input: keystone_internal_url} + keystone::endpoint::admin_url: {get_input: keystone_identity_uri} + keystone::endpoint::region: {get_input: keystone_region} # MongoDB mongodb::server::bind_ip: {get_input: mongo_db_network} mongodb::server::nojournal: {get_input: mongodb_no_journal} @@ -1177,6 +1464,11 @@ resources: neutron_dsn: {get_input: neutron_dsn} neutron::agents::metadata::auth_url: {get_input: keystone_identity_uri} neutron::db::mysql::password: {get_input: neutron_password} + neutron::keystone::auth::public_url: {get_input: neutron_public_url } + neutron::keystone::auth::internal_url: {get_input: neutron_internal_url } + neutron::keystone::auth::admin_url: {get_input: neutron_admin_url } + neutron::keystone::auth::password: {get_input: neutron_password } + neutron::keystone::auth::region: {get_input: keystone_region} # Ceilometer ceilometer_backend: {get_input: ceilometer_backend} @@ -1195,6 +1487,11 @@ resources: ceilometer::agent::auth::auth_url: {get_input: keystone_auth_address} ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url} ceilometer::db::mysql::password: {get_input: ceilometer_password} + ceilometer::keystone::auth::public_url: {get_input: ceilometer_public_url } + ceilometer::keystone::auth::internal_url: {get_input: ceilometer_internal_url } + ceilometer::keystone::auth::admin_url: {get_input: ceilometer_admin_url } + ceilometer::keystone::auth::password: {get_input: ceilometer_password } + ceilometer::keystone::auth::region: {get_input: keystone_region} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} @@ -1213,10 +1510,21 @@ resources: nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} nova::network::neutron::neutron_admin_password: {get_input: neutron_password} - nova::network::neutron::neutron_url: {get_input: neutron_url} + nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} nova::vncproxy::host: {get_input: nova_api_network} nova::db::mysql::password: {get_input: nova_password} + nova::keystone::auth::public_url: {get_input: nova_public_url} + nova::keystone::auth::internal_url: {get_input: nova_internal_url} + nova::keystone::auth::admin_url: {get_input: nova_admin_url} + nova::keystone::auth::public_url_v3: {get_input: nova_v3_public_url} + nova::keystone::auth::internal_url_v3: {get_input: nova_v3_internal_url} + nova::keystone::auth::admin_url_v3: {get_input: nova_v3_admin_url} + nova::keystone::auth::ec2_public_url: {get_input: nova_ec2_public_url} + nova::keystone::auth::ec2_internal_url: {get_input: nova_ec2_internal_url} + nova::keystone::auth::ec2_admin_url: {get_input: nova_ec2_admin_url} + nova::keystone::auth::password: {get_input: nova_password } + nova::keystone::auth::region: {get_input: keystone_region} # Horizon apache::ip: {get_input: horizon_network} diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml index d08a1692..2413f5a4 100644 --- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml +++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml @@ -265,10 +265,14 @@ resources: for (mac,swport) in nexus[nexus_switch]['servers'].iteritems(): lmac=mac.lower() if lmac in mac2host: - if mac2host[lmac] in nexus_cp[nexus_switch]['servers']: - nexus_cp[nexus_switch]['servers'][mac2host[lmac]]['ports'] += ',' + swport['ports'] + hostname = mac2host[lmac] + # for puppet we need a unique title even at the 2nd key level + serv_key = nexus_switch + "::" + hostname + if serv_key in nexus_cp[nexus_switch]['servers']: + nexus_cp[nexus_switch]['servers'][serv_key]['ports'] += ',' + swport['ports'] else: - nexus_cp[nexus_switch]['servers'][mac2host[lmac]] = swport + nexus_cp[nexus_switch]['servers'][serv_key] = swport + nexus_cp[nexus_switch]['servers'][serv_key]['hostname'] = hostname del nexus_cp[nexus_switch]['servers'][mac] # Note this echo means you can view the data via heat deployment-show print json.dumps(nexus_cp) diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml index 62907104..fadc8a00 100644 --- a/puppet/extraconfig/ceph/ceph-external-config.yaml +++ b/puppet/extraconfig/ceph/ceph-external-config.yaml @@ -29,6 +29,15 @@ parameters: type: comma_delimited_list ceph_mon_ips: type: comma_delimited_list + NovaRbdPoolName: + default: vms + type: string + CinderRbdPoolName: + default: volumes + type: string + GlanceRbdPoolName: + default: images + type: string resources: CephClusterConfigImpl: @@ -51,12 +60,21 @@ resources: secret: 'CLIENT_KEY', mode: '0644', cap_mon: 'allow r', - cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=images' + cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL' } }" params: CLIENT_KEY: {get_param: ceph_client_key} - + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} + cinder_rbd_pool_name: {get_param: CinderRbdPoolName} + glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + ceph_pools: + - {get_param: CinderRbdPoolName} + - {get_param: NovaRbdPoolName} + - {get_param: GlanceRbdPoolName} outputs: config_id: diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml index 5985116b..6730ddf1 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml @@ -69,6 +69,9 @@ parameters: N1000vExistingBridge: type: boolean default: true + N1000vVSMHostMgmtIntfVlan: + type: number + default: 0 #Plugin Parameters N1000vVSMUser: type: string @@ -125,6 +128,7 @@ resources: n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask} n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip} n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip} + n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan} # Cisco N1KV driver Parameters neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username} @@ -159,6 +163,7 @@ resources: n1kv_vsm_password: {get_param: N1000vVSMPassword} n1kv_vsm_mgmt_netmask: {get_param: N1000vMgmtNetmask} n1kv_vsm_gateway_ip: {get_param: N1000vMgmtGatewayIP} + n1kv_phy_brige_vlan: {get_param: N1000vVSMHostMgmtIntfVlan} n1kv_vsm_pacemaker_ctrl: {get_param: N1000vPacemakerControl} n1kv_vsm_existing_br: {get_param: N1000vExistingBridge} n1kv_vsm_username: {get_param: N1000vVSMUser} diff --git a/puppet/hieradata/ceph.yaml b/puppet/hieradata/ceph.yaml index 18a48622..ca6d3954 100644 --- a/puppet/hieradata/ceph.yaml +++ b/puppet/hieradata/ceph.yaml @@ -7,11 +7,6 @@ ceph::profile::params::osds: {/srv/data: {}} ceph::profile::params::manage_repo: false ceph::profile::params::authentication_type: cephx -ceph_pools: - - volumes - - vms - - images - ceph_classes: [] ceph_osd_selinux_permissive: true diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index 4a872d57..030f661d 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -29,3 +29,5 @@ cinder::rabbit_heartbeat_timeout_threshold: 60 ceilometer::rabbit_heartbeat_timeout_threshold: 60 heat::rabbit_heartbeat_timeout_threshold: 60 keystone::rabbit_heartbeat_timeout_threshold: 60 + +nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL' diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index d7c1f879..659008a5 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -12,13 +12,9 @@ nova::compute::libvirt::migration_support: true nova::compute::rbd::libvirt_rbd_user: 'openstack' nova::compute::rbd::rbd_keyring: 'client.openstack' -nova::compute::rbd::libvirt_images_rbd_pool: 'vms' nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" -nova::config::nova_config: - cinder/catalog_info: - value: 'volumev2:cinderv2:internalURL' - ceilometer::agent::auth::auth_tenant_name: 'service' +ceilometer::agent::auth::auth_endpoint_type: 'internalURL' compute_classes: [] diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index b659ed78..07bfe543 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -39,9 +39,18 @@ cinder::api::keystone_tenant: 'service' swift::proxy::authtoken::admin_tenant_name: 'service' ceilometer::api::keystone_tenant: 'service' heat::keystone_tenant: 'service' +glance::keystone::auth::tenant: 'service' +nova::keystone::auth::tenant: 'service' +neutron::keystone::auth::tenant: 'service' +cinder::keystone::auth::tenant: 'service' +swift::keystone::auth::tenant: 'service' +ceilometer::keystone::auth::tenant: 'service' +heat::keystone::auth::tenant: 'service' # keystone keystone::cron::token_flush::maxdelay: 3600 +keystone::roles::admin::service_tenant: 'service' +keystone::roles::admin::admin_tenant: 'admin' #swift swift::proxy::pipeline: @@ -58,12 +67,17 @@ swift::proxy::pipeline: - 'proxy-server' swift::proxy::account_autocreate: true +swift::keystone::auth::configure_s3_endpoint: false +swift::keystone::auth::operator_roles: + - admin + - swiftoperator # glance glance::api::pipeline: 'keystone' glance::registry::pipeline: 'keystone' glance::backend::swift::swift_store_create_container_on_put: true glance::backend::rbd::rbd_store_user: 'openstack' +glance_file_pcmk_directory: '/var/lib/glance/images' # neutron neutron::server::sync_db: true @@ -74,6 +88,10 @@ nova::notify_on_state_change: 'vm_and_task_state' nova::api::default_floating_pool: 'public' nova::api::osapi_v3: true nova::scheduler::filter::ram_allocation_ratio: '1.0' +nova::keystone::auth::configure_ec2_endpoint: false + +# ceilometer +ceilometer::agent::auth::auth_endpoint_type: 'internalURL' # cinder cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler @@ -92,6 +110,9 @@ pacemaker::resource_defaults::defaults: # horizon horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache horizon::django_session_engine: 'django.contrib.sessions.backends.cache' +horizon::vhost_extra_params: + add_listen: false + priority: 10 # mysql mysql::server::manage_config_file: true diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index a88ca2d9..51f5e88d 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -25,13 +25,13 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) { exec { 'set selinux to permissive on boot': command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } exec { 'set selinux to permissive': - command => "setenforce 0", + command => 'setenforce 0', onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } -> Class['ceph::profile::osd'] } diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 2150bab8..4c927569 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -24,14 +24,14 @@ if count(hiera('ntp::servers')) > 0 { file { ['/etc/libvirt/qemu/networks/autostart/default.xml', '/etc/libvirt/qemu/networks/default.xml']: ensure => absent, - before => Service['libvirt'] + before => Service['libvirt'], } # in case libvirt has been already running before the Puppet run, make # sure the default network is destroyed exec { 'libvirt-default-net-destroy': command => '/usr/bin/virsh net-destroy default', - onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', - before => Service['libvirt'], + onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', + before => Service['libvirt'], } include ::nova @@ -55,10 +55,10 @@ if $rbd_ephemeral_storage or $rbd_persistent_storage { } if hiera('cinder_enable_nfs_backend', false) { - if ($::selinux != "false") { + if str2bool($::selinux) { selboolean { 'virt_use_nfs': - value => on, - persistent => true, + value => on, + persistent => true, } -> Package['nfs-utils'] } @@ -69,20 +69,20 @@ include ::nova::compute::libvirt include ::nova::network::neutron include ::neutron -class { 'neutron::plugins::ml2': +class { '::neutron::plugins::ml2': flat_networks => split(hiera('neutron_flat_networks'), ','), tenant_network_types => [hiera('neutron_tenant_network_type')], } -class { 'neutron::agents::ml2::ovs': +class { '::neutron::agents::ml2::ovs': bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), tunnel_types => split(hiera('neutron_tunnel_types'), ','), } if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { - class { 'neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), } } @@ -97,7 +97,7 @@ snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } -class { 'snmp': +class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index c3302362..06fffdca 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages if hiera('step') >= 1 { @@ -70,18 +70,18 @@ if hiera('step') >= 2 { include ::tripleo::redis_notification } - if str2bool(hiera('enable_galera', 'true')) { + if str2bool(hiera('enable_galera', true)) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' } else { $mysql_config_file = '/etc/my.cnf.d/server.cnf' } # TODO Galara - class { 'mysql::server': - config_file => $mysql_config_file, - override_options => { + class { '::mysql::server': + config_file => $mysql_config_file, + override_options => { 'mysqld' => { - 'bind-address' => hiera('mysql_bind_host'), - 'max_connections' => hiera('mysql_max_connections'), + 'bind-address' => hiera('mysql_bind_host'), + 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', }, }, @@ -126,31 +126,31 @@ if hiera('step') >= 2 { $enable_ceph = hiera('ceph_storage_count', 0) > 0 if $enable_ceph { - class { 'ceph::profile::params': - mon_initial_members => downcase(hiera('ceph_mon_initial_members')) + class { '::ceph::profile::params': + mon_initial_members => downcase(hiera('ceph_mon_initial_members')), } include ::ceph::profile::mon } - if str2bool(hiera('enable_ceph_storage', 'false')) { + if str2bool(hiera('enable_ceph_storage', false)) { if str2bool(hiera('ceph_osd_selinux_permissive', true)) { exec { 'set selinux to permissive on boot': command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } exec { 'set selinux to permissive': - command => "setenforce 0", + command => 'setenforce 0', onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } -> Class['ceph::profile::osd'] } include ::ceph::profile::osd } - if str2bool(hiera('enable_external_ceph', 'false')) { + if str2bool(hiera('enable_external_ceph', false)) { include ::ceph::profile::client } @@ -159,6 +159,8 @@ if hiera('step') >= 2 { if hiera('step') >= 3 { include ::keystone + include ::keystone::roles::admin + include ::keystone::endpoint #TODO: need a cleanup-keystone-tokens.sh solution here keystone_config { @@ -194,9 +196,9 @@ if hiera('step') >= 3 { $glance_backend = downcase(hiera('glance_backend', 'swift')) case $glance_backend { - swift: { $backend_store = 'glance.store.swift.Store' } - file: { $backend_store = 'glance.store.filesystem.Store' } - rbd: { $backend_store = 'glance.store.rbd.Store' } + 'swift': { $backend_store = 'glance.store.swift.Store' } + 'file': { $backend_store = 'glance.store.filesystem.Store' } + 'rbd': { $backend_store = 'glance.store.rbd.Store' } default: { fail('Unrecognized glance_backend parameter.') } } $http_store = ['glance.store.http.Store'] @@ -204,8 +206,8 @@ if hiera('step') >= 3 { # TODO: notifications, scrubber, etc. include ::glance - class { 'glance::api': - known_stores => $glance_store + class { '::glance::api': + known_stores => $glance_store, } include ::glance::registry include join(['::glance::backend::', $glance_backend]) @@ -237,24 +239,24 @@ if hiera('step') >= 3 { require => Package['neutron'], } - class { 'neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), + class { '::neutron::plugins::ml2': + flat_networks => split(hiera('neutron_flat_networks'), ','), tenant_network_types => [hiera('neutron_tenant_network_type')], - mechanism_drivers => [hiera('neutron_mechanism_drivers')], + mechanism_drivers => [hiera('neutron_mechanism_drivers')], } - class { 'neutron::agents::ml2::ovs': + class { '::neutron::agents::ml2::ovs': bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), + tunnel_types => split(hiera('neutron_tunnel_types'), ','), } if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { - include neutron::plugins::ml2::cisco::nexus1000v + include ::neutron::plugins::ml2::cisco::nexus1000v - class { 'neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), } - class { 'n1k_vsm': + class { '::n1k_vsm': n1kv_source => hiera('n1kv_vsm_source', undef), n1kv_version => hiera('n1kv_vsm_version', undef), pacemaker_control => false, @@ -270,7 +272,7 @@ if hiera('step') >= 3 { } if hiera('neutron_enable_bigswitch_ml2', false) { - include neutron::plugins::ml2::bigswitch::restproxy + include ::neutron::plugins::ml2::bigswitch::restproxy } neutron_l3_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); @@ -289,7 +291,7 @@ if hiera('step') >= 3 { include ::cinder::glance include ::cinder::scheduler include ::cinder::volume - class {'cinder::setup_test_volume': + class { '::cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -314,7 +316,7 @@ if hiera('step') >= 3 { $ceph_pools = hiera('ceph_pools') ceph::pool { $ceph_pools : } - $cinder_pool_requires = [Ceph::Pool['volumes']] + $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]] } else { $cinder_pool_requires = [] @@ -324,7 +326,7 @@ if hiera('step') >= 3 { $cinder_rbd_backend = 'tripleo_ceph' cinder::backend::rbd { $cinder_rbd_backend : - rbd_pool => 'volumes', + rbd_pool => hiera('cinder_rbd_pool_name'), rbd_user => 'openstack', rbd_secret_uuid => hiera('ceph::profile::params::fsid'), require => $cinder_pool_requires, @@ -369,18 +371,18 @@ if hiera('step') >= 3 { if hiera('cinder_enable_nfs_backend', false) { $cinder_nfs_backend = 'tripleo_nfs' - if ($::selinux != "false") { + if str2bool($::selinux) { selboolean { 'virt_use_nfs': - value => on, - persistent => true, + value => on, + persistent => true, } -> Package['nfs-utils'] } package {'nfs-utils': } -> cinder::backend::nfs { $cinder_nfs_backend : - nfs_servers => hiera('cinder_nfs_servers'), - nfs_mount_options => hiera('cinder_nfs_mount_options'), - nfs_shares_config => '/etc/cinder/shares-nfs.conf', + nfs_servers => hiera('cinder_nfs_servers'), + nfs_mount_options => hiera('cinder_nfs_mount_options'), + nfs_shares_config => '/etc/cinder/shares-nfs.conf', } } @@ -404,9 +406,9 @@ if hiera('step') >= 3 { include ::swift::proxy::formpost # swift storage - if str2bool(hiera('enable_swift_storage', 'true')) { - class {'swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')) + if str2bool(hiera('enable_swift_storage', true)) { + class { '::swift::storage::all': + mount_check => str2bool(hiera('swift_mount_check')), } if(!defined(File['/srv/node'])) { file { '/srv/node': @@ -440,7 +442,7 @@ if hiera('step') >= 3 { include ::ceilometer::alarm::evaluator include ::ceilometer::expirer include ::ceilometer::collector - include ceilometer::agent::auth + include ::ceilometer::agent::auth class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, } @@ -461,11 +463,10 @@ if hiera('step') >= 3 { $_profile_support = 'None' } $neutron_options = {'profile_support' => $_profile_support } - $vhost_params = { add_listen => false } - class { 'horizon': - cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), - vhost_extra_params => $vhost_params, - neutron_options => $neutron_options, + + class { '::horizon': + cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), + neutron_options => $neutron_options, } $snmpd_user = hiera('snmpd_readonly_user_name') @@ -473,7 +474,7 @@ if hiera('step') >= 3 { authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } - class { 'snmp': + class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } @@ -484,6 +485,15 @@ if hiera('step') >= 3 { if hiera('step') >= 4 { include ::keystone::cron::token_flush + + include ::ceilometer::keystone::auth + include ::cinder::keystone::auth + include ::glance::keystone::auth + include ::heat::keystone::auth + include ::neutron::keystone::auth + include ::nova::keystone::auth + include ::swift::keystone::auth + } #END STEP 4 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')]) diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index b8fa89f8..b80f2c79 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -18,7 +18,7 @@ Pcmk_resource <| |> { try_sleep => 3, } -include tripleo::packages +include ::tripleo::packages if $::hostname == downcase(hiera('bootstrap_nodeid')) { $pacemaker_master = true @@ -28,7 +28,7 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) { $sync_db = false } -$enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5 +$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5 # When to start and enable services which haven't been Pacemakerized # FIXME: remove when we start all OpenStack services using Pacemaker @@ -55,7 +55,7 @@ if hiera('step') >= 1 { $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) user { 'hacluster': - ensure => present, + ensure => present, } -> class { '::pacemaker': hacluster_pwd => hiera('hacluster_pwd'), @@ -68,7 +68,7 @@ if hiera('step') >= 1 { disable => !$enable_fencing, } if $enable_fencing { - include tripleo::fencing + include ::tripleo::fencing # enable stonith after all fencing devices have been created Class['tripleo::fencing'] -> Class['pacemaker::stonith'] @@ -93,7 +93,7 @@ if hiera('step') >= 1 { environment_variables => hiera('rabbitmq_environment'), } -> file { '/var/lib/rabbitmq/.erlang.cookie': - ensure => 'present', + ensure => file, owner => 'rabbitmq', group => 'rabbitmq', mode => '0400', @@ -120,7 +120,7 @@ if hiera('step') >= 1 { } # Galera - if str2bool(hiera('enable_galera', 'true')) { + if str2bool(hiera('enable_galera', true)) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' } else { $mysql_config_file = '/etc/my.cnf.d/server.cnf' @@ -154,7 +154,7 @@ if hiera('step') >= 1 { 'wsrep_causal_reads' => '0', 'wsrep_notify_cmd' => '', 'wsrep_sst_method' => 'rsync', - } + }, } class { '::mysql::server': @@ -178,7 +178,7 @@ if hiera('step') >= 2 { if $pacemaker_master { - include pacemaker::resource_defaults + include ::pacemaker::resource_defaults # FIXME: we should not have to access tripleo::loadbalancer class # parameters here to configure pacemaker VIPs. The configuration @@ -199,8 +199,8 @@ if hiera('step') >= 2 { first_action => 'start', second_action => 'start', constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['control_vip']], + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['control_vip']], } pacemaker::constraint::colocation { 'control_vip-with-haproxy': source => "ip-${control_vip}", @@ -222,8 +222,8 @@ if hiera('step') >= 2 { first_action => 'start', second_action => 'start', constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['public_vip']], + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['public_vip']], } pacemaker::constraint::colocation { 'public_vip-with-haproxy': source => "ip-${public_vip}", @@ -246,8 +246,8 @@ if hiera('step') >= 2 { first_action => 'start', second_action => 'start', constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['redis_vip']], + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['redis_vip']], } pacemaker::constraint::colocation { 'redis_vip-with-haproxy': source => "ip-${redis_vip}", @@ -270,8 +270,8 @@ if hiera('step') >= 2 { first_action => 'start', second_action => 'start', constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['internal_api_vip']], + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['internal_api_vip']], } pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy': source => "ip-${internal_api_vip}", @@ -294,8 +294,8 @@ if hiera('step') >= 2 { first_action => 'start', second_action => 'start', constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_vip']], + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_vip']], } pacemaker::constraint::colocation { 'storage_vip-with-haproxy': source => "ip-${storage_vip}", @@ -318,8 +318,8 @@ if hiera('step') >= 2 { first_action => 'start', second_action => 'start', constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_mgmt_vip']], + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_mgmt_vip']], } pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy': source => "ip-${storage_mgmt_vip}", @@ -331,7 +331,7 @@ if hiera('step') >= 2 { } pacemaker::resource::service { $::memcached::params::service_name : - clone_params => true, + clone_params => 'interleave=true', require => Class['::memcached'], } @@ -385,7 +385,7 @@ if hiera('step') >= 2 { timeout => 30, tries => 180, try_sleep => 10, - environment => ["AVAILABLE_WHEN_READONLY=0"], + environment => ['AVAILABLE_WHEN_READONLY=0'], require => File['/etc/sysconfig/clustercheck'], } @@ -411,28 +411,28 @@ MYSQL_HOST=localhost\n", # Create all the database schemas if $sync_db { - class { 'keystone::db::mysql': - require => Exec['galera-ready'], + class { '::keystone::db::mysql': + require => Exec['galera-ready'], } - class { 'glance::db::mysql': - require => Exec['galera-ready'], + class { '::glance::db::mysql': + require => Exec['galera-ready'], } - class { 'nova::db::mysql': - require => Exec['galera-ready'], + class { '::nova::db::mysql': + require => Exec['galera-ready'], } - class { 'neutron::db::mysql': - require => Exec['galera-ready'], + class { '::neutron::db::mysql': + require => Exec['galera-ready'], } - class { 'cinder::db::mysql': - require => Exec['galera-ready'], + class { '::cinder::db::mysql': + require => Exec['galera-ready'], } - class { 'heat::db::mysql': - require => Exec['galera-ready'], + class { '::heat::db::mysql': + require => Exec['galera-ready'], } if downcase(hiera('ceilometer_backend')) == 'mysql' { - class { 'ceilometer::db::mysql': - require => Exec['galera-ready'], + class { '::ceilometer::db::mysql': + require => Exec['galera-ready'], } } } @@ -444,31 +444,31 @@ MYSQL_HOST=localhost\n", $enable_ceph = hiera('ceph_storage_count', 0) > 0 if $enable_ceph { - class { 'ceph::profile::params': - mon_initial_members => downcase(hiera('ceph_mon_initial_members')) + class { '::ceph::profile::params': + mon_initial_members => downcase(hiera('ceph_mon_initial_members')), } include ::ceph::profile::mon } - if str2bool(hiera('enable_ceph_storage', 'false')) { + if str2bool(hiera('enable_ceph_storage', false)) { if str2bool(hiera('ceph_osd_selinux_permissive', true)) { exec { 'set selinux to permissive on boot': command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } exec { 'set selinux to permissive': - command => "setenforce 0", + command => 'setenforce 0', onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } -> Class['ceph::profile::osd'] } include ::ceph::profile::osd } - if str2bool(hiera('enable_external_ceph', 'false')) { + if str2bool(hiera('enable_external_ceph', false)) { include ::ceph::profile::client } @@ -478,9 +478,9 @@ MYSQL_HOST=localhost\n", if hiera('step') >= 3 { class { '::keystone': - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } #TODO: need a cleanup-keystone-tokens.sh solution here @@ -517,25 +517,35 @@ if hiera('step') >= 3 { $glance_backend = downcase(hiera('glance_backend', 'swift')) case $glance_backend { - swift: { $backend_store = 'glance.store.swift.Store' } - file: { $backend_store = 'glance.store.filesystem.Store' } - rbd: { $backend_store = 'glance.store.rbd.Store' } + 'swift': { $backend_store = 'glance.store.swift.Store' } + 'file': { $backend_store = 'glance.store.filesystem.Store' } + 'rbd': { $backend_store = 'glance.store.rbd.Store' } default: { fail('Unrecognized glance_backend parameter.') } } $http_store = ['glance.store.http.Store'] $glance_store = concat($http_store, $backend_store) + if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) { + pacemaker::resource::filesystem { 'glance-fs': + device => hiera('glance_file_pcmk_device'), + directory => hiera('glance_file_pcmk_directory'), + fstype => hiera('glance_file_pcmk_fstype'), + fsoptions => hiera('glance_file_pcmk_options', ''), + clone_params => '', + } + } + # TODO: notifications, scrubber, etc. include ::glance - class { 'glance::api': - known_stores => $glance_store, + class { '::glance::api': + known_stores => $glance_store, manage_service => false, - enabled => false, + enabled => false, } class { '::glance::registry' : - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } include join(['::glance::backend::', $glance_backend]) @@ -546,51 +556,51 @@ if hiera('step') >= 3 { include ::nova::config class { '::nova::api' : - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } class { '::nova::cert' : manage_service => false, - enabled => false, + enabled => false, } class { '::nova::conductor' : manage_service => false, - enabled => false, + enabled => false, } class { '::nova::consoleauth' : manage_service => false, - enabled => false, + enabled => false, } class { '::nova::vncproxy' : manage_service => false, - enabled => false, + enabled => false, } include ::nova::scheduler::filter class { '::nova::scheduler' : manage_service => false, - enabled => false, + enabled => false, } include ::nova::network::neutron # Neutron class definitions include ::neutron class { '::neutron::server' : - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } class { '::neutron::agents::dhcp' : manage_service => false, - enabled => false, + enabled => false, } class { '::neutron::agents::l3' : manage_service => false, - enabled => false, + enabled => false, } - class { 'neutron::agents::metadata': + class { '::neutron::agents::metadata': manage_service => false, - enabled => false, + enabled => false, } file { '/etc/neutron/dnsmasq-neutron.conf': content => hiera('neutron_dnsmasq_options'), @@ -599,16 +609,16 @@ if hiera('step') >= 3 { notify => Service['neutron-dhcp-service'], require => Package['neutron'], } - class { 'neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), + class { '::neutron::plugins::ml2': + flat_networks => split(hiera('neutron_flat_networks'), ','), tenant_network_types => [hiera('neutron_tenant_network_type')], - mechanism_drivers => [hiera('neutron_mechanism_drivers')], + mechanism_drivers => [hiera('neutron_mechanism_drivers')], } - class { 'neutron::agents::ml2::ovs': - manage_service => false, - enabled => false, - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), + class { '::neutron::agents::ml2::ovs': + manage_service => false, + enabled => false, + bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), + tunnel_types => split(hiera('neutron_tunnel_types'), ','), } if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { @@ -619,21 +629,21 @@ if hiera('step') >= 3 { include ::neutron::plugins::ml2::cisco::type_nexus_vxlan } if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { - include neutron::plugins::ml2::cisco::nexus1000v + include ::neutron::plugins::ml2::cisco::nexus1000v - class { 'neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), } - class { 'n1k_vsm': - n1kv_source => hiera('n1kv_vsm_source', undef), - n1kv_version => hiera('n1kv_vsm_version', undef), + class { '::n1k_vsm': + n1kv_source => hiera('n1kv_vsm_source', undef), + n1kv_version => hiera('n1kv_vsm_version', undef), } } if hiera('neutron_enable_bigswitch_ml2', false) { - include neutron::plugins::ml2::bigswitch::restproxy + include ::neutron::plugins::ml2::bigswitch::restproxy } neutron_l3_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); @@ -644,20 +654,20 @@ if hiera('step') >= 3 { include ::cinder class { '::cinder::api': - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } class { '::cinder::scheduler' : manage_service => false, - enabled => false, + enabled => false, } class { '::cinder::volume' : manage_service => false, - enabled => false, + enabled => false, } include ::cinder::glance - class {'cinder::setup_test_volume': + class { '::cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -682,7 +692,7 @@ if hiera('step') >= 3 { $ceph_pools = hiera('ceph_pools') ceph::pool { $ceph_pools : } - $cinder_pool_requires = [Ceph::Pool['volumes']] + $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]] } else { $cinder_pool_requires = [] @@ -692,7 +702,7 @@ if hiera('step') >= 3 { $cinder_rbd_backend = 'tripleo_ceph' cinder::backend::rbd { $cinder_rbd_backend : - rbd_pool => 'volumes', + rbd_pool => hiera('cinder_rbd_pool_name'), rbd_user => 'openstack', rbd_secret_uuid => hiera('ceph::profile::params::fsid'), require => $cinder_pool_requires, @@ -737,18 +747,18 @@ if hiera('step') >= 3 { if hiera('cinder_enable_nfs_backend', false) { $cinder_nfs_backend = 'tripleo_nfs' - if ($::selinux != "false") { + if str2bool($::selinux) { selboolean { 'virt_use_nfs': - value => on, - persistent => true, + value => on, + persistent => true, } -> Package['nfs-utils'] } - package {'nfs-utils': } -> + package { 'nfs-utils': } -> cinder::backend::nfs { $cinder_nfs_backend: - nfs_servers => hiera('cinder_nfs_servers'), - nfs_mount_options => hiera('cinder_nfs_mount_options'), - nfs_shares_config => '/etc/cinder/shares-nfs.conf', + nfs_servers => hiera('cinder_nfs_servers'), + nfs_mount_options => hiera('cinder_nfs_mount_options'), + nfs_shares_config => '/etc/cinder/shares-nfs.conf', } } @@ -760,7 +770,7 @@ if hiera('step') >= 3 { # swift proxy class { '::swift::proxy' : manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } include ::swift::proxy::proxy_logging include ::swift::proxy::healthcheck @@ -774,21 +784,21 @@ if hiera('step') >= 3 { include ::swift::proxy::formpost # swift storage - if str2bool(hiera('enable_swift_storage', 'true')) { + if str2bool(hiera('enable_swift_storage', true)) { class {'::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')) + mount_check => str2bool(hiera('swift_mount_check')), } class {'::swift::storage::account': manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } class {'::swift::storage::container': manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } class {'::swift::storage::object': manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } if(!defined(File['/srv/node'])) { file { '/srv/node': @@ -804,12 +814,11 @@ if hiera('step') >= 3 { } # Ceilometer - $ceilometer_backend = downcase(hiera('ceilometer_backend')) - case $ceilometer_backend { - /mysql/ : { + case downcase(hiera('ceilometer_backend')) { + /mysql/: { $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') } - default : { + default: { $mongo_node_string = join($mongo_node_ips_with_port, ',') $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" } @@ -818,34 +827,34 @@ if hiera('step') >= 3 { include ::ceilometer::config class { '::ceilometer::api' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::agent::notification' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::agent::central' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::alarm::notifier' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::alarm::evaluator' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::collector' : manage_service => false, - enabled => false, + enabled => false, } include ::ceilometer::expirer class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, sync_db => $sync_db, } - include ceilometer::agent::auth + include ::ceilometer::agent::auth Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } @@ -855,19 +864,19 @@ if hiera('step') >= 3 { } class { '::heat::api' : manage_service => false, - enabled => false, + enabled => false, } class { '::heat::api_cfn' : manage_service => false, - enabled => false, + enabled => false, } class { '::heat::api_cloudwatch' : manage_service => false, - enabled => false, + enabled => false, } class { '::heat::engine' : manage_service => false, - enabled => false, + enabled => false, } # httpd/apache and horizon @@ -883,15 +892,9 @@ if hiera('step') >= 3 { $_profile_support = 'None' } $neutron_options = {'profile_support' => $_profile_support } - $vhost_params = { - add_listen => false, - priority => 10, - } - class { 'horizon': - cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), - vhost_extra_params => $vhost_params, - server_aliases => $::hostname, - neutron_options => $neutron_options, + class { '::horizon': + cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), + neutron_options => $neutron_options, } $snmpd_user = hiera('snmpd_readonly_user_name') @@ -899,7 +902,7 @@ if hiera('step') >= 3 { authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } - class { 'snmp': + class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } @@ -915,12 +918,16 @@ if hiera('step') >= 4 { # Keystone pacemaker::resource::service { $::keystone::params::service_name : - clone_params => "interleave=true", + clone_params => 'interleave=true', + verify_on_create => true, + require => [File['/etc/keystone/ssl/certs/ca.pem'], + File['/etc/keystone/ssl/private/signing_key.pem'], + File['/etc/keystone/ssl/certs/signing_cert.pem']], } pacemaker::constraint::base { 'haproxy-then-keystone-constraint': constraint_type => 'order', - first_resource => "haproxy-clone", + first_resource => 'haproxy-clone', second_resource => "${::keystone::params::service_name}-clone", first_action => 'start', second_action => 'start', @@ -929,7 +936,7 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint': constraint_type => 'order', - first_resource => "rabbitmq-clone", + first_resource => 'rabbitmq-clone', second_resource => "${::keystone::params::service_name}-clone", first_action => 'start', second_action => 'start', @@ -938,7 +945,7 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'memcached-then-keystone-constraint': constraint_type => 'order', - first_resource => "memcached-clone", + first_resource => 'memcached-clone', second_resource => "${::keystone::params::service_name}-clone", first_action => 'start', second_action => 'start', @@ -947,7 +954,7 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'galera-then-keystone-constraint': constraint_type => 'order', - first_resource => "galera-master", + first_resource => 'galera-master', second_resource => "${::keystone::params::service_name}-clone", first_action => 'promote', second_action => 'start', @@ -957,11 +964,11 @@ if hiera('step') >= 4 { # Cinder pacemaker::resource::service { $::cinder::params::api_service : - clone_params => "interleave=true", + clone_params => 'interleave=true', require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::cinder::params::scheduler_service : - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::service { $::cinder::params::volume_service : } @@ -975,45 +982,45 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::keystone::params::service_name]], } pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint': - constraint_type => "order", - first_resource => "${::cinder::params::api_service}-clone", + constraint_type => 'order', + first_resource => "${::cinder::params::api_service}-clone", second_resource => "${::cinder::params::scheduler_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::cinder::params::api_service], + Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], } pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation': - source => "${::cinder::params::scheduler_service}-clone", - target => "${::cinder::params::api_service}-clone", - score => "INFINITY", + source => "${::cinder::params::scheduler_service}-clone", + target => "${::cinder::params::api_service}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::cinder::params::api_service], Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], } pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint': - constraint_type => "order", - first_resource => "${::cinder::params::scheduler_service}-clone", - second_resource => "${::cinder::params::volume_service}", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], - Pacemaker::Resource::Service[$::cinder::params::volume_service]], + constraint_type => 'order', + first_resource => "${::cinder::params::scheduler_service}-clone", + second_resource => $::cinder::params::volume_service, + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], + Pacemaker::Resource::Service[$::cinder::params::volume_service]], } pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation': - source => "${::cinder::params::volume_service}", - target => "${::cinder::params::scheduler_service}-clone", - score => "INFINITY", + source => $::cinder::params::volume_service, + target => "${::cinder::params::scheduler_service}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], Pacemaker::Resource::Service[$::cinder::params::volume_service]], } # Glance pacemaker::resource::service { $::glance::params::registry_service_name : - clone_params => "interleave=true", + clone_params => 'interleave=true', require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::glance::params::api_service_name : - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::constraint::base { 'keystone-then-glance-registry-constraint': @@ -1026,18 +1033,18 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::keystone::params::service_name]], } pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::glance::params::registry_service_name}-clone", second_resource => "${::glance::params::api_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], - Pacemaker::Resource::Service[$::glance::params::api_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], + Pacemaker::Resource::Service[$::glance::params::api_service_name]], } pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation': source => "${::glance::params::api_service_name}-clone", target => "${::glance::params::registry_service_name}-clone", - score => "INFINITY", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], Pacemaker::Resource::Service[$::glance::params::api_service_name]], } @@ -1049,155 +1056,154 @@ if hiera('step') >= 4 { # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } -> pacemaker::resource::service { $::neutron::params::server_service: - op_params => "start timeout=90", - clone_params => "interleave=true", - require => Pacemaker::Resource::Service[$::keystone::params::service_name] + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::neutron::params::l3_agent_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::service { $::neutron::params::dhcp_agent_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::service { $::neutron::params::ovs_agent_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::service { $::neutron::params::metadata_agent_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: - ocf_agent_name => "neutron:OVSCleanup", - clone_params => "interleave=true", + ocf_agent_name => 'neutron:OVSCleanup', + clone_params => 'interleave=true', } pacemaker::resource::ocf { 'neutron-netns-cleanup': - ocf_agent_name => "neutron:NetnsCleanup", - clone_params => "interleave=true", + ocf_agent_name => 'neutron:NetnsCleanup', + clone_params => 'interleave=true', } # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::ovs_cleanup_service}-clone", - second_resource => "neutron-netns-cleanup-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], - Pacemaker::Resource::Ocf['neutron-netns-cleanup']], + constraint_type => 'order', + first_resource => "${::neutron::params::ovs_cleanup_service}-clone", + second_resource => 'neutron-netns-cleanup-clone', + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], + Pacemaker::Resource::Ocf['neutron-netns-cleanup']], } pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': - source => "neutron-netns-cleanup-clone", - target => "${::neutron::params::ovs_cleanup_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], + source => 'neutron-netns-cleanup-clone', + target => "${::neutron::params::ovs_cleanup_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], Pacemaker::Resource::Ocf['neutron-netns-cleanup']], } pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': - constraint_type => "order", - first_resource => "neutron-netns-cleanup-clone", + constraint_type => 'order', + first_resource => 'neutron-netns-cleanup-clone', second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], - Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], } pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': - source => "${::neutron::params::ovs_agent_service}-clone", - target => "neutron-netns-cleanup-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], - Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], + source => "${::neutron::params::ovs_agent_service}-clone", + target => 'neutron-netns-cleanup-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], } #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => "order", - first_resource => "${::keystone::params::service_name}-clone", + constraint_type => 'order', + first_resource => "${::keystone::params::service_name}-clone", second_resource => "${::neutron::params::server_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::neutron::params::server_service]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::keystone::params::service_name], + Pacemaker::Resource::Service[$::neutron::params::server_service]], } pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::server_service}-clone", + constraint_type => 'order', + first_resource => "${::neutron::params::server_service}-clone", second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::server_service], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], } pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::ovs_agent_service}-clone", + constraint_type => 'order', + first_resource => "${::neutron::params::ovs_agent_service}-clone", second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], + Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], } pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': - source => "${::neutron::params::dhcp_agent_service}-clone", - target => "${::neutron::params::ovs_agent_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], + source => "${::neutron::params::dhcp_agent_service}-clone", + target => "${::neutron::params::ovs_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], + Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], } pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::dhcp_agent_service}-clone", + constraint_type => 'order', + first_resource => "${::neutron::params::dhcp_agent_service}-clone", second_resource => "${::neutron::params::l3_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]] + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]], } pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': - source => "${::neutron::params::l3_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]] + source => "${::neutron::params::l3_agent_service}-clone", + target => "${::neutron::params::dhcp_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]], } pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::l3_agent_service}-clone", + constraint_type => 'order', + first_resource => "${::neutron::params::l3_agent_service}-clone", second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]] + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], } pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::l3_agent_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]] + source => "${::neutron::params::metadata_agent_service}-clone", + target => "${::neutron::params::l3_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], } # Nova pacemaker::resource::service { $::nova::params::api_service_name : - clone_params => "interleave=true", - op_params => "start timeout=90s monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=90s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::conductor_service_name : - clone_params => "interleave=true", - op_params => "start timeout=90s monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=90s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::consoleauth_service_name : - clone_params => "interleave=true", - op_params => "start timeout=90s monitor start-delay=10s", - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + clone_params => 'interleave=true', + op_params => 'start timeout=90s monitor start-delay=10s', + require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::nova::params::vncproxy_service_name : - clone_params => "interleave=true", - op_params => "start timeout=90s monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=90s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::scheduler_service_name : - clone_params => "interleave=true", - op_params => "start timeout=90s monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=90s monitor start-delay=10s', } pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': @@ -1210,75 +1216,85 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::keystone::params::service_name]], } pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::consoleauth_service_name}-clone", second_resource => "${::nova::params::vncproxy_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], + Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], } pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation': - source => "${::nova::params::vncproxy_service_name}-clone", - target => "${::nova::params::consoleauth_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::vncproxy_service_name}-clone", + target => "${::nova::params::consoleauth_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], } pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::vncproxy_service_name}-clone", second_resource => "${::nova::params::api_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], - Pacemaker::Resource::Service[$::nova::params::api_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], + Pacemaker::Resource::Service[$::nova::params::api_service_name]], } pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation': - source => "${::nova::params::api_service_name}-clone", - target => "${::nova::params::vncproxy_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::api_service_name}-clone", + target => "${::nova::params::vncproxy_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], Pacemaker::Resource::Service[$::nova::params::api_service_name]], } pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::api_service_name}-clone", second_resource => "${::nova::params::scheduler_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], - Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], + Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], } pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation': - source => "${::nova::params::scheduler_service_name}-clone", - target => "${::nova::params::api_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::scheduler_service_name}-clone", + target => "${::nova::params::api_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], } pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::scheduler_service_name}-clone", second_resource => "${::nova::params::conductor_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], - Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], + Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], } pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation': - source => "${::nova::params::conductor_service_name}-clone", - target => "${::nova::params::scheduler_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::conductor_service_name}-clone", + target => "${::nova::params::scheduler_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], } # Ceilometer - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : - clone_params => 'interleave=true', - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], + case downcase(hiera('ceilometer_backend')) { + /mysql/: { + pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } + } + default: { + pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : + clone_params => 'interleave=true', + require => [Pacemaker::Resource::Service[$::keystone::params::service_name], + Pacemaker::Resource::Service[$::mongodb::params::service_name]], + } + } } pacemaker::resource::service { $::ceilometer::params::collector_service_name : clone_params => 'interleave=true', @@ -1308,7 +1324,7 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint': constraint_type => 'order', - first_resource => "redis-master", + first_resource => 'redis-master', second_resource => "${::ceilometer::params::agent_central_service_name}-clone", first_action => 'promote', second_action => 'start', @@ -1454,8 +1470,8 @@ if hiera('step') >= 4 { second_resource => "${::heat::params::api_cfn_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], + require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], + Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], } pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation': source => "${::heat::params::api_cfn_service_name}-clone", @@ -1470,8 +1486,8 @@ if hiera('step') >= 4 { second_resource => "${::heat::params::api_cloudwatch_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], + require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], + Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], } pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation': source => "${::heat::params::api_cloudwatch_service_name}-clone", @@ -1486,8 +1502,8 @@ if hiera('step') >= 4 { second_resource => "${::heat::params::engine_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::engine_service_name]], + require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], + Pacemaker::Resource::Service[$::heat::params::engine_service_name]], } pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation': source => "${::heat::params::engine_service_name}-clone", @@ -1508,7 +1524,7 @@ if hiera('step') >= 4 { # Horizon pacemaker::resource::service { $::horizon::params::http_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } #VSM @@ -1519,7 +1535,7 @@ if hiera('step') >= 4 { require => Class['n1k_vsm'], meta_params => 'resource-stickiness=INFINITY', } - if str2bool(hiera('n1k_vsm::pacemaker_control', 'true')) { + if str2bool(hiera('n1k_vsm::pacemaker_control', true)) { pacemaker::resource::ocf { 'vsm-s' : ocf_agent_name => 'heartbeat:VirtualDomain', resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml', @@ -1527,9 +1543,9 @@ if hiera('step') >= 4 { meta_params => 'resource-stickiness=INFINITY', } pacemaker::constraint::colocation { 'vsm-colocation-contraint': - source => "vsm-p", - target => "vsm-s", - score => "-INFINITY", + source => 'vsm-p', + target => 'vsm-s', + score => '-INFINITY', require => [Pacemaker::Resource::Ocf['vsm-p'], Pacemaker::Resource::Ocf['vsm-s']], } @@ -1540,5 +1556,41 @@ if hiera('step') >= 4 { } #END STEP 4 +if hiera('step') >= 5 { + + if $pacemaker_master { + + class {'::keystone::roles::admin' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } -> + class {'::keystone::endpoint' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } -> + class { '::ceilometer::keystone::auth' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } -> + class { '::cinder::keystone::auth' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } -> + class { '::glance::keystone::auth' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } -> + class { '::heat::keystone::auth' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } -> + class { '::neutron::keystone::auth' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } -> + class { '::nova::keystone::auth' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } -> + class { '::swift::keystone::auth' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } + + } + +} #END STEP 5 + $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')]) package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 5f4b070d..5f0b4c82 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -22,8 +22,8 @@ if count(hiera('ntp::servers')) > 0 { } include ::swift -class {'swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')) +class { '::swift::storage::all': + mount_check => str2bool(hiera('swift_mount_check')), } if(!defined(File['/srv/node'])) { file { '/srv/node': @@ -43,7 +43,7 @@ snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } -class { 'snmp': +class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index eaaed66e..7f24959a 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -47,7 +47,7 @@ snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } -class { 'snmp': +class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp index 1897dcd0..4296208b 100644 --- a/puppet/manifests/ringbuilder.pp +++ b/puppet/manifests/ringbuilder.pp @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages define add_devices( $swift_zones = '1' @@ -37,31 +37,33 @@ define add_devices( $base = regsubst($name,'^r1.*-(.*)$','\1') $object = regsubst($base, '%PORT%', '6000') ring_object_device { $object: - zone => '1', - weight => 100, + zone => '1', + weight => 100, } $container = regsubst($base, '%PORT%', '6001') ring_container_device { $container: - zone => '1', - weight => 100, + zone => '1', + weight => 100, } $account = regsubst($base, '%PORT%', '6002') ring_account_device { $account: - zone => '1', - weight => 100, + zone => '1', + weight => 100, } } class tripleo::ringbuilder ( $swift_zones = '1', $devices = '', - $build_ring = 'True', + $build_ring = true, $part_power, $replicas, $min_part_hours, ) { - if str2bool(downcase("$build_ring")) { + validate_bool($build_ring) + + if $build_ring { $device_array = strip(split(rstrip($devices), ',')) @@ -74,7 +76,7 @@ class tripleo::ringbuilder ( # add all other devices add_devices {$device_array: - swift_zones => $swift_zones + swift_zones => $swift_zones, } -> # rebalance |