diff options
Diffstat (limited to 'puppet')
50 files changed, 544 insertions, 558 deletions
diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml index fd161886..f5873ddb 100644 --- a/puppet/ceph-cluster-config.yaml +++ b/puppet/ceph-cluster-config.yaml @@ -123,8 +123,10 @@ resources: gnocchi::storage::ceph::ceph_keyring: list_join: - '.' - - - 'client' + - - '/etc/ceph/ceph' + - 'client' - {get_param: CephClientUserName} + - 'keyring' ceph_client_user_name: {get_param: CephClientUserName} ceph_pools: - {get_param: CinderRbdPoolName} diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml index e90710c7..2b9ae751 100644 --- a/puppet/ceph-storage-post.yaml +++ b/puppet/ceph-storage-post.yaml @@ -13,6 +13,10 @@ parameters: NodeConfigIdentifiers: type: json description: Value which changes if the node configuration may need to be re-applied + StepConfig: + type: string + description: Config manifests that will be used to step through the deployment. + default: '' resources: @@ -33,26 +37,44 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: - get_file: manifests/overcloud_cephstorage.pp + list_join: + - '' + - - get_file: manifests/overcloud_cephstorage.pp + - {get_param: StepConfig} - CephStorageDeployment_Step1: + CephStorageDeployment_Step2: type: OS::Heat::StructuredDeployments depends_on: CephStorageArtifactsDeploy properties: - name: CephStorageDeployment_Step1 + name: CephStorageDeployment_Step2 servers: {get_param: servers} config: {get_resource: CephStoragePuppetConfig} input_values: + step: 2 + update_identifier: {get_param: NodeConfigIdentifiers} + + CephStorageDeployment_Step3: + type: OS::Heat::StructuredDeployments + depends_on: CephStorageDeployment_Step2 + properties: + name: CephStorageDeployment_Step3 + servers: {get_param: servers} + config: {get_resource: CephStoragePuppetConfig} + input_values: + step: 3 update_identifier: {get_param: NodeConfigIdentifiers} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: CephStorageDeployment_Step1 + depends_on: CephStorageDeployment_Step3 type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} - diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index d2b90c59..eedb35e4 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -96,6 +96,9 @@ parameters: NodeIndex: type: number default: 0 + ServiceConfigSettings: + type: json + default: {} resources: CephStorage: @@ -247,6 +250,7 @@ resources: - heat_config_%{::deploy_config_name} - ceph_extraconfig - extraconfig + - service_configs - ceph_cluster # provided by CephClusterConfig - ceph - '"%{::osfamily}"' @@ -254,6 +258,8 @@ resources: - network merge_behavior: deeper datafiles: + service_configs: + mapped_data: {get_param: ServiceConfigSettings} common: raw_data: {get_file: hieradata/common.yaml} network: diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index 13914878..d760de5e 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -112,9 +112,6 @@ parameters: GlanceApiVirtualIP: type: string default: '' - MysqlVirtualIPUri: - type: string - default: '' NetworkDeploymentActions: type: comma_delimited_list description: > @@ -281,7 +278,15 @@ resources: config: {get_resource: BlockStorageConfig} input_values: debug: {get_param: Debug} - cinder_dsn: {list_join: ['', ['mysql+pymysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIPUri} , '/cinder']]} + cinder_dsn: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://cinder:' + - {get_param: CinderPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/cinder' snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} cinder_lvm_loop_device_size: diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml index a122df0e..698cadba 100644 --- a/puppet/compute-post.yaml +++ b/puppet/compute-post.yaml @@ -13,7 +13,10 @@ parameters: NodeConfigIdentifiers: type: json description: Value which changes if the node configuration may need to be re-applied - + StepConfig: + type: string + description: Config manifests that will be used to step through the deployment. + default: '' resources: @@ -34,25 +37,55 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: - get_file: manifests/overcloud_compute.pp + list_join: + - '' + - - get_file: manifests/overcloud_compute.pp + - {get_param: StepConfig} + + ComputeServicesBaseDeployment_Step2: + type: OS::Heat::StructuredDeployments + depends_on: [ComputeArtifactsDeploy] + properties: + name: ComputeServicesBaseDeployment_Step2 + servers: {get_param: servers} + config: {get_resource: ComputePuppetConfig} + input_values: + step: 2 + update_identifier: {get_param: NodeConfigIdentifiers} + + ComputeOvercloudServicesDeployment_Step3: + type: OS::Heat::StructuredDeployments + depends_on: ComputeServicesBaseDeployment_Step2 + properties: + name: ComputeOvercloudServicesDeployment_Step3 + servers: {get_param: servers} + config: {get_resource: ComputePuppetConfig} + input_values: + step: 3 + update_identifier: {get_param: NodeConfigIdentifiers} - ComputePuppetDeployment: + ComputeOvercloudServicesDeployment_Step4: type: OS::Heat::StructuredDeployments - depends_on: ComputeArtifactsDeploy + depends_on: ComputeOvercloudServicesDeployment_Step3 properties: - name: ComputePuppetDeployment + name: ComputeOvercloudServicesDeployment_Step4 servers: {get_param: servers} config: {get_resource: ComputePuppetConfig} input_values: + step: 4 update_identifier: {get_param: NodeConfigIdentifiers} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: ComputePuppetDeployment + depends_on: ComputeOvercloudServicesDeployment_Step4 type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} diff --git a/puppet/compute.yaml b/puppet/compute.yaml index e56deefd..b7f7f4a5 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -339,6 +339,9 @@ parameters: type: json description: Optional scheduler hints to pass to nova default: {} + ServiceConfigSettings: + type: json + default: {} resources: @@ -481,6 +484,7 @@ resources: - heat_config_%{::deploy_config_name} - compute_extraconfig - extraconfig + - service_configs - compute - ceph_cluster # provided by CephClusterConfig - ceph @@ -495,6 +499,8 @@ resources: - neutron_opencontrail_data # Optionally provided by ComputeExtraConfigPre merge_behavior: deeper datafiles: + service_configs: + mapped_data: {get_param: ServiceConfigSettings} compute_extraconfig: mapped_data: {get_param: NovaComputeExtraConfig} extraconfig: @@ -525,6 +531,12 @@ resources: nova_api_host: {get_input: nova_api_host} nova::compute::vncproxy_host: {get_input: nova_public_ip} nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend} + # TUNNELLED mode provides a security enhancement when using shared storage but is not + # supported when not using shared storage. + # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12 + # In future versions of QEMU (2.6, mostly), Dan's native encryption + # work will obsolete the need to use TUNNELLED transport mode. + nova::migration::live_migration_tunnelled: {get_input: nova_enable_rbd_backend} rbd_persistent_storage: {get_input: cinder_enable_rbd_backend} nova_password: {get_input: nova_password} nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} @@ -539,7 +551,7 @@ resources: ceilometer::rabbit_password: {get_input: rabbit_password} ceilometer::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} ceilometer::rabbit_port: {get_input: rabbit_client_port} - ceilometer::metering_secret: {get_input: ceilometer_metering_secret} + ceilometer::telemetry_secret: {get_input: ceilometer_metering_secret} ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} ceilometer::agent::auth::auth_url: {get_input: ceilometer_agent_auth_url} ceilometer_compute_agent: {get_input: ceilometer_compute_agent} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index 3aa0df14..9c0d8e82 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -560,11 +560,6 @@ parameters: default: true description: Whether to manage Swift rings or not type: boolean - SwiftPassword: - description: The password for the swift service account, used by the swift proxy - services. - hidden: true - type: string SwiftProxyVirtualIP: type: string default: '' @@ -572,10 +567,6 @@ parameters: type: number default: 3 description: How many replicas to use in the swift rings. - SwiftWorkers: - default: 0 - description: Number of workers for Swift service. - type: number TimeZone: default: 'UTC' description: The timezone to be set on controller nodes. @@ -596,9 +587,6 @@ parameters: MysqlVirtualIP: type: string default: '' - MysqlVirtualIPUri: - type: string - default: '' NeutronApiVirtualIP: type: string default: '' @@ -827,7 +815,6 @@ resources: cinder_workers: {get_param: CinderWorkers} nova_workers: {get_param: NovaWorkers} neutron_workers: {get_param: NeutronWorkers} - swift_workers: {get_param: SwiftWorkers} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} haproxy_log_address: {get_param: HAProxySyslogAddress} @@ -856,10 +843,11 @@ resources: cinder_dsn: list_join: - '' - - - 'mysql+pymysql://cinder:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://cinder:' - {get_param: CinderPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/cinder' cinder_public_url: {get_param: [EndpointMap, CinderPublic, uri]} cinder_internal_url: {get_param: [EndpointMap, CinderInternal, uri]} @@ -962,10 +950,11 @@ resources: neutron_dsn: list_join: - '' - - - 'mysql+pymysql://neutron:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://neutron:' - {get_param: NeutronPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/ovs_neutron?charset=utf8' neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] } neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] } @@ -995,18 +984,20 @@ resources: ceilometer_dsn: list_join: - '' - - - 'mysql+pymysql://ceilometer:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://ceilometer:' - {get_param: CeilometerPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/ceilometer' gnocchi_dsn: list_join: - '' - - - 'mysql+pymysql://gnocchi:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://gnocchi:' - {get_param: GnocchiPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/gnocchi' gnocchi_internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]} gnocchi_public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] } @@ -1024,18 +1015,20 @@ resources: nova_dsn: list_join: - '' - - - 'mysql+pymysql://nova:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://nova:' - {get_param: NovaPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/nova' nova_api_dsn: list_join: - '' - - - 'mysql+pymysql://nova_api:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://nova_api:' - {get_param: NovaPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/nova_api' upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} instance_name_template: {get_param: InstanceNameTemplate} @@ -1056,18 +1049,11 @@ resources: control_virtual_interface: {get_param: ControlVirtualInterface} public_virtual_interface: {get_param: PublicVirtualInterface} swift_hash_suffix: {get_param: SwiftHashSuffix} - swift_password: {get_param: SwiftPassword} swift_part_power: {get_param: SwiftPartPower} swift_ring_build: {get_param: SwiftRingBuild} swift_replicas: {get_param: SwiftReplicas} swift_min_part_hours: {get_param: SwiftMinPartHours} swift_mount_check: {get_param: SwiftMountCheck} - swift_public_url: {get_param: [EndpointMap, SwiftPublic, uri]} - swift_internal_url: {get_param: [EndpointMap, SwiftInternal, uri]} - swift_admin_url: {get_param: [EndpointMap, SwiftAdmin, uri]} - swift_public_url_s3: {get_param: [EndpointMap, SwiftS3Public, uri]} - swift_internal_url_s3: {get_param: [EndpointMap, SwiftS3Internal, uri]} - swift_admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} sahara_password: {get_param: SaharaPassword} @@ -1077,10 +1063,11 @@ resources: sahara_dsn: list_join: - '' - - - 'mysql://sahara:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://sahara:' - {get_param: SaharaPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/sahara' swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} @@ -1199,26 +1186,15 @@ resources: tripleo::fencing::config: {get_input: fencing_config} # Swift + # FIXME: need to move proxy_local_net_ip into swift-proxy.yaml swift::proxy::proxy_local_net_ip: {get_input: swift_proxy_network} - swift::proxy::authtoken::auth_uri: {get_input: keystone_auth_uri} - swift::proxy::authtoken::identity_uri: {get_input: keystone_identity_uri} swift::storage::all::storage_local_net_ip: {get_input: swift_management_network} - swift::swift_hash_suffix: {get_input: swift_hash_suffix} - swift::proxy::authtoken::admin_password: {get_input: swift_password} - swift::proxy::workers: {get_input: swift_workers} + swift::swift_hash_path_suffix: {get_input: swift_hash_suffix} tripleo::ringbuilder::build_ring: { get_input: swift_ring_build } tripleo::ringbuilder::part_power: {get_input: swift_part_power} tripleo::ringbuilder::replicas: {get_input: swift_replicas} tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours} swift_mount_check: {get_input: swift_mount_check} - swift::keystone::auth::public_url: {get_input: swift_public_url } - swift::keystone::auth::internal_url: {get_input: swift_internal_url } - swift::keystone::auth::admin_url: {get_input: swift_admin_url } - swift::keystone::auth::public_url_s3: {get_input: swift_public_url_v3 } - swift::keystone::auth::internal_url_s3: {get_input: swift_internal_url_v3 } - swift::keystone::auth::admin_url_s3: {get_input: swift_admin_url_v3 } - swift::keystone::auth::password: {get_input: swift_password } - swift::keystone::auth::region: {get_input: keystone_region} # Cinder cinder_enable_db_purge: {get_input: cinder_enable_db_purge} @@ -1288,7 +1264,7 @@ resources: # Neutron neutron::bind_host: {get_input: neutron_api_network} neutron::server::auth_uri: {get_input: keystone_auth_uri} - neutron::server::identity_uri: {get_input: keystone_identity_uri} + neutron::server::auth_url: {get_input: keystone_identity_uri} neutron::server::database_connection: {get_input: neutron_dsn} neutron::server::api_workers: {get_input: neutron_workers} neutron::network_device_mtu: {get_input: neutron_tenant_mtu} @@ -1318,7 +1294,7 @@ resources: neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} - neutron::server::auth_password: {get_input: neutron_password} + neutron::server::password: {get_input: neutron_password} neutron_dsn: {get_input: neutron_dsn} neutron::db::mysql::password: {get_input: neutron_password} neutron::keystone::auth::public_url: {get_input: neutron_public_url } @@ -1326,7 +1302,6 @@ resources: neutron::keystone::auth::admin_url: {get_input: neutron_admin_url } neutron::keystone::auth::password: {get_input: neutron_password } neutron::keystone::auth::region: {get_input: keystone_region} - neutron::server::notifications::nova_url: {get_input: nova_internal_url} neutron::server::notifications::auth_url: {get_input: neutron_auth_url} neutron::server::notifications::tenant_name: 'service' neutron::server::notifications::project_name: 'service' @@ -1335,7 +1310,7 @@ resources: # Ceilometer ceilometer_backend: {get_input: ceilometer_backend} ceilometer_mysql_conn_string: {get_input: ceilometer_dsn} - ceilometer::metering_secret: {get_input: ceilometer_metering_secret} + ceilometer::telemetry_secret: {get_input: ceilometer_metering_secret} ceilometer::rabbit_userid: {get_input: rabbit_username} ceilometer::rabbit_password: {get_input: rabbit_password} ceilometer::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} @@ -1343,8 +1318,8 @@ resources: ceilometer::debug: {get_input: debug} ceilometer::api::host: {get_input: ceilometer_api_network} ceilometer::api::keystone_password: {get_input: ceilometer_password} - ceilometer::api::keystone_auth_uri: {get_input: keystone_auth_uri} - ceilometer::api::keystone_identity_uri: {get_input: keystone_identity_uri} + ceilometer::api::auth_uri: {get_input: keystone_auth_uri} + ceilometer::api::identity_uri: {get_input: keystone_identity_uri} ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} ceilometer::agent::auth::auth_url: {get_input: keystone_auth_uri} ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url} @@ -1497,13 +1472,15 @@ resources: timezone::timezone: {get_input: timezone} control_virtual_interface: {get_input: control_virtual_interface} public_virtual_interface: {get_input: public_virtual_interface} - tripleo::loadbalancer::control_virtual_interface: {get_input: control_virtual_interface} - tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface} - tripleo::loadbalancer::haproxy_log_address: {get_input: haproxy_log_address} - tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]} - tripleo::loadbalancer::haproxy_stats_user: {get_input: haproxy_stats_user} - tripleo::loadbalancer::haproxy_stats_password: {get_input: haproxy_stats_password} - tripleo::loadbalancer::redis_password: {get_input: redis_password} + tripleo::keepalived::control_virtual_interface: {get_input: control_virtual_interface} + tripleo::keepalived::public_virtual_interface: {get_input: public_virtual_interface} + tripleo::haproxy::control_virtual_interface: {get_input: control_virtual_interface} + tripleo::haproxy::public_virtual_interface: {get_input: public_virtual_interface} + tripleo::haproxy::haproxy_log_address: {get_input: haproxy_log_address} + tripleo::haproxy::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]} + tripleo::haproxy::haproxy_stats_user: {get_input: haproxy_stats_user} + tripleo::haproxy::haproxy_stats_password: {get_input: haproxy_stats_password} + tripleo::haproxy::redis_password: {get_input: redis_password} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml index 26ce7138..3e455347 100644 --- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml +++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml @@ -85,7 +85,7 @@ resources: tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} - tripleo::loadbalancer::midonet_api: true + tripleo::haproxy::midonet_api: true # Missed Neutron Puppet data neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml index 5942088c..308c609a 100644 --- a/puppet/extraconfig/ceph/ceph-external-config.yaml +++ b/puppet/extraconfig/ceph/ceph-external-config.yaml @@ -97,8 +97,10 @@ resources: gnocchi::storage::ceph::ceph_keyring: list_join: - '.' - - - 'client' + - - '/etc/ceph/ceph' + - 'client' - {get_param: CephClientUserName} + - 'keyring' ceph_client_user_name: {get_param: CephClientUserName} ceph_pools: - {get_param: CinderRbdPoolName} diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index 34965959..65cf9577 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -17,6 +17,7 @@ nova::network::neutron::neutron_username: 'neutron' nova::network::neutron::dhcp_domain: '' neutron::allow_overlapping_ips: true +neutron::server::project_name: 'service' kernel_modules: nf_conntrack: {} diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 0b3b8fe7..de6e3db1 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -115,6 +115,7 @@ neutron::server::sync_db: true nova::notify_on_state_change: 'vm_and_task_state' nova::api::default_floating_pool: 'public' nova::api::sync_db_api: true +nova::api::enable_proxy_headers_parsing: true nova::scheduler::filter::ram_allocation_ratio: '1.0' nova::cron::archive_deleted_rows::hour: '*/12' nova::cron::archive_deleted_rows::destination: '/dev/null' @@ -129,6 +130,10 @@ cinder::cron::db_purge::destination: '/dev/null' cinder::host: hostgroup cinder_user_enabled_backends: [] +# TODO(jaosorior): Move to cinder profile once cinder is moved as a composable +# service. +cinder::api::enable_proxy_headers_parsing: true + # heat heat::engine::configure_delegated_roles: false heat::engine::trusts_delegated_roles: [] @@ -140,6 +145,7 @@ heat::cron::purge_deleted::destination: '/dev/null' heat::keystone::domain::domain_name: 'heat_stack' heat::keystone::domain::domain_admin: 'heat_stack_domain_admin' heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost' +heat::auth_plugin: 'password' # pacemaker pacemaker::corosync::cluster_name: 'tripleo_cluster' @@ -160,26 +166,26 @@ horizon::vhost_extra_params: mysql::server::manage_config_file: true -tripleo::loadbalancer::keystone_admin: true -tripleo::loadbalancer::keystone_public: true -tripleo::loadbalancer::neutron: true -tripleo::loadbalancer::cinder: true -tripleo::loadbalancer::glance_api: true -tripleo::loadbalancer::glance_registry: true -tripleo::loadbalancer::nova_osapi: true -tripleo::loadbalancer::nova_metadata: true -tripleo::loadbalancer::nova_novncproxy: true -tripleo::loadbalancer::mysql: true -tripleo::loadbalancer::redis: true -tripleo::loadbalancer::sahara: true -tripleo::loadbalancer::swift_proxy_server: true -tripleo::loadbalancer::ceilometer: true -tripleo::loadbalancer::aodh: true -tripleo::loadbalancer::gnocchi: true -tripleo::loadbalancer::heat_api: true -tripleo::loadbalancer::heat_cloudwatch: true -tripleo::loadbalancer::heat_cfn: true -tripleo::loadbalancer::horizon: true +tripleo::haproxy::keystone_admin: true +tripleo::haproxy::keystone_public: true +tripleo::haproxy::neutron: true +tripleo::haproxy::cinder: true +tripleo::haproxy::glance_api: true +tripleo::haproxy::glance_registry: true +tripleo::haproxy::nova_osapi: true +tripleo::haproxy::nova_metadata: true +tripleo::haproxy::nova_novncproxy: true +tripleo::haproxy::mysql: true +tripleo::haproxy::redis: true +tripleo::haproxy::sahara: true +tripleo::haproxy::swift_proxy_server: true +tripleo::haproxy::ceilometer: true +tripleo::haproxy::aodh: true +tripleo::haproxy::gnocchi: true +tripleo::haproxy::heat_api: true +tripleo::haproxy::heat_cloudwatch: true +tripleo::haproxy::heat_cfn: true +tripleo::haproxy::horizon: true controller_classes: [] # firewall diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index fd7faff1..4add2f02 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -16,41 +16,46 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), {}) -create_resources(sysctl::value, hiera('sysctl_settings'), {}) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> +if hiera('step') >= 1 { -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} + create_resources(kmod::load, hiera('kernel_modules'), {}) + create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> -include ::timezone + include ::timezone -if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], + if count(hiera('ntp::servers')) > 0 { + include ::ntp } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] } -if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') -} else { - $mon_host = hiera('ceph_mon_host') -} -class { '::ceph::profile::params': - mon_host => $mon_host, -} -include ::ceph::conf -include ::ceph::profile::client -include ::ceph::profile::osd +if hiera('step') >= 3 { + if str2bool(hiera('ceph_osd_selinux_permissive', true)) { + exec { 'set selinux to permissive on boot': + command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", + onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", + path => ['/usr/bin', '/usr/sbin'], + } + + exec { 'set selinux to permissive': + command => 'setenforce 0', + onlyif => "which setenforce && getenforce | grep -i 'enforcing'", + path => ['/usr/bin', '/usr/sbin'], + } -> Class['ceph::profile::osd'] + } -hiera_include('ceph_classes') -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present} + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } + class { '::ceph::profile::params': + mon_host => $mon_host, + } + include ::ceph::conf + include ::ceph::profile::client + include ::ceph::profile::osd + + hiera_include('ceph_classes') + package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present} +} diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 43e87789..cf20c0ca 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -16,8 +16,8 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), {}) -create_resources(sysctl::value, hiera('sysctl_settings'), {}) +create_resources(kmod::load, hiera('kernel_modules'), { }) +create_resources(sysctl::value, hiera('sysctl_settings'), { }) Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { @@ -26,200 +26,198 @@ if count(hiera('ntp::servers')) > 0 { include ::timezone -file { ['/etc/libvirt/qemu/networks/autostart/default.xml', - '/etc/libvirt/qemu/networks/default.xml']: - ensure => absent, - before => Service['libvirt'], -} -# in case libvirt has been already running before the Puppet run, make -# sure the default network is destroyed -exec { 'libvirt-default-net-destroy': - command => '/usr/bin/virsh net-destroy default', - onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', - before => Service['libvirt'], -} - -# When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique -exec { 'reset-iscsi-initiator-name': - command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi', - onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset', -}-> - -file { '/etc/iscsi/.initiator_reset': - ensure => present, -} - -include ::nova -include ::nova::config -include ::nova::compute +if hiera('step') >= 4 { -$rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) -$rbd_persistent_storage = hiera('rbd_persistent_storage', false) -if $rbd_ephemeral_storage or $rbd_persistent_storage { - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') + file { ['/etc/libvirt/qemu/networks/autostart/default.xml', + '/etc/libvirt/qemu/networks/default.xml']: + ensure => absent, + before => Service['libvirt'], } - class { '::ceph::profile::params': - mon_host => $mon_host, + # in case libvirt has been already running before the Puppet run, make + # sure the default network is destroyed + exec { 'libvirt-default-net-destroy': + command => '/usr/bin/virsh net-destroy default', + onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', + before => Service['libvirt'], } - include ::ceph::conf - include ::ceph::profile::client - $client_keys = hiera('ceph::profile::params::client_keys') - $client_user = join(['client.', hiera('ceph_client_user_name')]) - class { '::nova::compute::rbd': - libvirt_rbd_secret_key => $client_keys[$client_user]['secret'], + # When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique + exec { 'reset-iscsi-initiator-name': + command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi', + onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset', + }-> + + file { '/etc/iscsi/.initiator_reset': + ensure => present, } -} -if hiera('cinder_enable_nfs_backend', false) { - if str2bool($::selinux) { - selboolean { 'virt_use_nfs': - value => on, - persistent => true, - } -> Package['nfs-utils'] + include ::nova + include ::nova::config + include ::nova::compute + + $rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) + $rbd_persistent_storage = hiera('rbd_persistent_storage', false) + if $rbd_ephemeral_storage or $rbd_persistent_storage { + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } + class { '::ceph::profile::params': + mon_host => $mon_host, + } + include ::ceph::conf + include ::ceph::profile::client + + $client_keys = hiera('ceph::profile::params::client_keys') + $client_user = join(['client.', hiera('ceph_client_user_name')]) + class { '::nova::compute::rbd': + libvirt_rbd_secret_key => $client_keys[$client_user]['secret'], + } } - package {'nfs-utils': } -> Service['nova-compute'] -} + if hiera('cinder_enable_nfs_backend', false) { + if str2bool($::selinux) { + selboolean { 'virt_use_nfs': + value => on, + persistent => true, + } -> Package['nfs-utils'] + } -if str2bool(hiera('nova::use_ipv6', false)) { - $vncserver_listen = '::0' -} else { - $vncserver_listen = '0.0.0.0' -} + package { 'nfs-utils': } -> Service['nova-compute'] + } -if $rbd_ephemeral_storage { - class { '::nova::compute::libvirt': - libvirt_disk_cachemodes => ['network=writeback'], - libvirt_hw_disk_discard => 'unmap', - vncserver_listen => $vncserver_listen, + if str2bool(hiera('nova::use_ipv6', false)) { + $vncserver_listen = '::0' + } else { + $vncserver_listen = '0.0.0.0' } -} else { - class { '::nova::compute::libvirt' : - vncserver_listen => $vncserver_listen, + + if $rbd_ephemeral_storage { + class { '::nova::compute::libvirt': + libvirt_disk_cachemodes => ['network=writeback'], + libvirt_hw_disk_discard => 'unmap', + vncserver_listen => $vncserver_listen, + } + } else { + class { '::nova::compute::libvirt' : + vncserver_listen => $vncserver_listen, + } } -} -nova_config { - 'DEFAULT/my_ip': value => $ipaddress; - 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; - 'DEFAULT/host': value => $fqdn; - # TUNNELLED mode provides a security enhancement when using shared storage but is not - # supported when not using shared storage. - # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12 - # In future versions of QEMU (2.6, mostly), Dan's native encryption - # work will obsolete the need to use TUNNELLED transport mode. - 'libvirt/live_migration_tunnelled': value => $rbd_ephemeral_storage; -} + nova_config { + 'DEFAULT/my_ip': value => $ipaddress; + 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; + 'DEFAULT/host': value => $fqdn; + } -if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - file {'/etc/libvirt/qemu.conf': - ensure => present, - content => hiera('midonet_libvirt_qemu_data') + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + file { '/etc/libvirt/qemu.conf': + ensure => present, + content => hiera('midonet_libvirt_qemu_data') + } } -} -include ::nova::network::neutron -include ::neutron -include ::neutron::config - -# If the value of core plugin is set to 'nuage', -# include nuage agent, -# If the value of core plugin is set to 'midonet', -# include midonet agent, -# else use the default value of 'ml2' -if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::nuage::vrs - include ::nova::compute::neutron - - class { '::nuage::metadataagent': - nova_os_tenant_name => hiera('nova::api::admin_tenant_name'), - nova_os_password => hiera('nova_password'), - nova_metadata_ip => hiera('nova_metadata_node_ips'), - nova_auth_ip => hiera('keystone_public_api_virtual_ip'), + include ::nova::network::neutron + include ::neutron + include ::neutron::config + + # If the value of core plugin is set to 'nuage', + # include nuage agent, + # If the value of core plugin is set to 'midonet', + # include midonet agent, + # else use the default value of 'ml2' + if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { + include ::nuage::vrs + include ::nova::compute::neutron + + class { '::nuage::metadataagent': + nova_os_tenant_name => hiera('nova::api::admin_tenant_name'), + nova_os_password => hiera('nova_password'), + nova_metadata_ip => hiera('nova_metadata_node_ips'), + nova_auth_ip => hiera('keystone_public_api_virtual_ip'), + } } -} -elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips + class { '::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips + } } -} -elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { - - include ::contrail::vrouter - # NOTE: it's not possible to use this class without a functional - # contrail controller up and running - #class {'::contrail::vrouter::provision_vrouter': - # require => Class['contrail::vrouter'], - #} -} -elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { - # forward all ipv4 traffic - # this is required for the vms to pass through the gateways public interface - sysctl::value { 'net.ipv4.ip_forward': value => '1' } - - # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on - file { '/etc/sudoers.d/ifc_ctl_sudoers': - ensure => file, - owner => root, - group => root, - mode => '0440', - content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", + elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { + + include ::contrail::vrouter + # NOTE: it's not possible to use this class without a functional + # contrail controller up and running + #class {'::contrail::vrouter::provision_vrouter': + # require => Class['contrail::vrouter'], + #} } -} -else { - - # NOTE: this code won't live in puppet-neutron until Neutron OVS agent - # can be gracefully restarted. See https://review.openstack.org/#/c/297211 - # In the meantime, it's safe to restart the agent on each change in neutron.conf, - # because Puppet changes are supposed to be done during bootstrap and upgrades. - # Some resource managed by Neutron_config (like messaging and logging options) require - # a restart of OVS agent. This code does it. - # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code - # from here and fix it in puppet-neutron. - Neutron_config<||> ~> Service['neutron-ovs-agent-service'] - - include ::neutron::plugins::ml2 - include ::neutron::agents::ml2::ovs - - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { + # forward all ipv4 traffic + # this is required for the vms to pass through the gateways public interface + sysctl::value { 'net.ipv4.ip_forward': value => '1' } + + # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on + file { '/etc/sudoers.d/ifc_ctl_sudoers': + ensure => file, + owner => root, + group => root, + mode => '0440', + content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", } } + else { + + # NOTE: this code won't live in puppet-neutron until Neutron OVS agent + # can be gracefully restarted. See https://review.openstack.org/#/c/297211 + # In the meantime, it's safe to restart the agent on each change in neutron.conf, + # because Puppet changes are supposed to be done during bootstrap and upgrades. + # Some resource managed by Neutron_config (like messaging and logging options) require + # a restart of OVS agent. This code does it. + # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code + # from here and fix it in puppet-neutron. + Neutron_config<||> ~> Service['neutron-ovs-agent-service'] + + include ::neutron::plugins::ml2 + include ::neutron::agents::ml2::ovs + + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), + } + } - if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::agents::bigswitch + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::agents::bigswitch + } } -} -neutron_config { - 'DEFAULT/host': value => $fqdn; -} + neutron_config { + 'DEFAULT/host': value => $fqdn; + } -include ::ceilometer -include ::ceilometer::config -include ::ceilometer::agent::compute -include ::ceilometer::agent::auth + include ::ceilometer + include ::ceilometer::config + include ::ceilometer::agent::compute + include ::ceilometer::agent::auth -$snmpd_user = hiera('snmpd_readonly_user_name') -snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), -} -class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], -} + $snmpd_user = hiera('snmpd_readonly_user_name') + snmp::snmpv3_user { $snmpd_user: + authtype => 'MD5', + authpass => hiera('snmpd_readonly_user_password'), + } + class { '::snmp': + agentaddress => ['udp:161','udp6:[::1]:161'], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + } -hiera_include('compute_classes') -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_compute': ensure => present} + hiera_include('compute_classes') + package_manifest{ '/var/lib/tripleo/installed-packages/overcloud_compute': ensure => present } + +} diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 8b2dc8b0..53bf62c7 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -117,9 +117,6 @@ if hiera('step') >= 2 { include ::aodh::db::mysql } - # pre-install swift here so we can build rings - include ::swift - $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) if $enable_ceph { @@ -225,8 +222,8 @@ if hiera('step') >= 4 { class {'::tripleo::network::midonet::api': zookeeper_servers => $zookeeper_node_ips, - vip => hiera('tripleo::loadbalancer::public_virtual_ip'), - keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), + vip => hiera('public_virtual_ip'), + keystone_ip => hiera('public_virtual_ip'), keystone_admin_token => hiera('keystone::admin_token'), # TODO: create a 'bind' hiera key for api bind_address => hiera('neutron::bind_host'), @@ -271,9 +268,9 @@ if hiera('step') >= 4 { if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { class {'::neutron::plugins::midonet': - midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), + midonet_api_ip => hiera('public_virtual_ip'), keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::auth_password') + keystone_password => hiera('neutron::server::password') } } else { @@ -315,7 +312,6 @@ if hiera('step') >= 4 { include ::cinder include ::cinder::config - include ::tripleo::ssl::cinder_config include ::cinder::api include ::cinder::glance include ::cinder::scheduler @@ -450,19 +446,6 @@ if hiera('step') >= 4 { enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), } - # swift proxy - include ::swift::proxy - include ::swift::proxy::proxy_logging - include ::swift::proxy::healthcheck - include ::swift::proxy::cache - include ::swift::proxy::keystone - include ::swift::proxy::authtoken - include ::swift::proxy::staticweb - include ::swift::proxy::ratelimit - include ::swift::proxy::catch_errors - include ::swift::proxy::tempurl - include ::swift::proxy::formpost - # swift storage if str2bool(hiera('enable_swift_storage', true)) { class { '::swift::storage::all': diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index bbcf83d5..d6d14a83 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -318,9 +318,6 @@ if hiera('step') >= 2 { } } - # pre-install swift here so we can build rings - include ::swift - # Ceph $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) @@ -461,8 +458,8 @@ MYSQL_HOST=localhost\n", class {'::tripleo::network::midonet::api': zookeeper_servers => $zookeeper_node_ips, - vip => hiera('tripleo::loadbalancer::public_virtual_ip'), - keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), + vip => hiera('public_virtual_ip'), + keystone_ip => hiera('public_virtual_ip'), keystone_admin_token => hiera('keystone::admin_token'), # TODO: create a 'bind' hiera key for api bind_address => hiera('neutron::bind_host'), @@ -495,9 +492,9 @@ MYSQL_HOST=localhost\n", } if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { class {'::neutron::plugins::midonet': - midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), + midonet_api_ip => hiera('public_virtual_ip'), keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::auth_password') + keystone_password => hiera('neutron::server::password') } } if hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { @@ -542,7 +539,6 @@ MYSQL_HOST=localhost\n", include ::cinder include ::cinder::config - include ::tripleo::ssl::cinder_config class { '::cinder::api': sync_db => $sync_db, manage_service => false, @@ -699,22 +695,6 @@ MYSQL_HOST=localhost\n", enabled => false, } - # swift proxy - class { '::swift::proxy' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, - } - include ::swift::proxy::proxy_logging - include ::swift::proxy::healthcheck - include ::swift::proxy::cache - include ::swift::proxy::keystone - include ::swift::proxy::authtoken - include ::swift::proxy::staticweb - include ::swift::proxy::ratelimit - include ::swift::proxy::catch_errors - include ::swift::proxy::tempurl - include ::swift::proxy::formpost - # swift storage if str2bool(hiera('enable_swift_storage', true)) { class {'::swift::storage::all': @@ -1227,11 +1207,6 @@ password=\"${mysql_root_password}\"", pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : clone_params => 'interleave=true', } - pacemaker::resource::ocf { 'delay' : - ocf_agent_name => 'heartbeat:Delay', - clone_params => 'interleave=true', - resource_params => 'startdelay=10', - } # Fedora doesn't know `require-all` parameter for constraints yet if $::operatingsystem == 'Fedora' { $redis_ceilometer_constraint_params = undef @@ -1303,22 +1278,6 @@ password=\"${mysql_root_password}\"", require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], } - pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::api_service_name}-clone", - second_resource => 'delay-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation': - source => 'delay-clone', - target => "${::ceilometer::params::api_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], - } # Aodh pacemaker::resource::service { $::aodh::params::evaluator_service_name : clone_params => 'interleave=true', @@ -1329,22 +1288,6 @@ password=\"${mysql_root_password}\"", pacemaker::resource::service { $::aodh::params::listener_service_name : clone_params => 'interleave=true', } - pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint': - constraint_type => 'order', - first_resource => 'delay-clone', - second_resource => "${::aodh::params::evaluator_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation': - source => "${::aodh::params::evaluator_service_name}-clone", - target => 'delay-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint': constraint_type => 'order', first_resource => "${::aodh::params::evaluator_service_name}-clone", diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index ae074589..3585c993 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -16,42 +16,46 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), {}) -create_resources(sysctl::value, hiera('sysctl_settings'), {}) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> +if hiera('step') >= 1 { + create_resources(kmod::load, hiera('kernel_modules'), {}) + create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} - -include ::timezone + include ::timezone -include ::swift -class { '::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')), -} -if(!defined(File['/srv/node'])) { - file { '/srv/node': - ensure => directory, - owner => 'swift', - group => 'swift', - require => Package['openstack-swift'], + if count(hiera('ntp::servers')) > 0 { + include ::ntp } } -$swift_components = ['account', 'container', 'object'] -swift::storage::filter::recon { $swift_components : } -swift::storage::filter::healthcheck { $swift_components : } +if hiera('step') >= 4 { + class { '::swift::storage::all': + mount_check => str2bool(hiera('swift_mount_check')), + } + if(!defined(File['/srv/node'])) { + file { '/srv/node': + ensure => directory, + owner => 'swift', + group => 'swift', + require => Package['openstack-swift'], + } + } + + $swift_components = ['account', 'container', 'object'] + swift::storage::filter::recon { $swift_components : } + swift::storage::filter::healthcheck { $swift_components : } -$snmpd_user = hiera('snmpd_readonly_user_name') -snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), -} -class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + $snmpd_user = hiera('snmpd_readonly_user_name') + snmp::snmpv3_user { $snmpd_user: + authtype => 'MD5', + authpass => hiera('snmpd_readonly_user_password'), + } + class { '::snmp': + agentaddress => ['udp:161','udp6:[::1]:161'], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + } + + hiera_include('object_classes') } -hiera_include('object_classes') package_manifest{'/var/lib/tripleo/installed-packages/overcloud_object': ensure => present} diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp index a623da29..2411ff84 100644 --- a/puppet/manifests/ringbuilder.pp +++ b/puppet/manifests/ringbuilder.pp @@ -89,6 +89,11 @@ class tripleo::ringbuilder ( } } +if hiera('step') >= 2 { + # pre-install swift here so we can build rings + include ::swift +} + if hiera('step') >= 3 { include ::tripleo::ringbuilder } diff --git a/puppet/services/README.rst b/puppet/services/README.rst index 38d2ac64..15c8c1f1 100644 --- a/puppet/services/README.rst +++ b/puppet/services/README.rst @@ -48,3 +48,7 @@ are re-asserted when applying latter ones. 5) Service activation (Pacemaker) 6) Fencing (Pacemaker) + +Note: Not all roles currently support all steps: + + * ObjectStorage role only supports steps 2, 3 and 4 diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index ca50d91d..89e6ee0f 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' Debug: default: '' description: Set to True to enable debugging on all services. @@ -63,13 +60,14 @@ outputs: description: Role data for the Glance API role. value: config_settings: - glance_dsn: &glance_dsn + glance::api::database_connection: list_join: - '' - - - 'mysql+pymysql://glance:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://glance:' - {get_param: GlancePassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/glance' glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]} glance::api::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } @@ -84,7 +82,6 @@ outputs: glance::api::workers: {get_param: GlanceWorkers} glance_notifier_strategy: {get_param: GlanceNotifierStrategy} glance_log_file: {get_param: GlanceLogFile} - glance::api::database_connection: *glance_dsn glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] } glance::backend::swift::swift_store_user: service:glance glance::backend::swift::swift_store_key: {get_param: GlancePassword} diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml index 1a1a515a..6f2f0372 100644 --- a/puppet/services/glance-registry.yaml +++ b/puppet/services/glance-registry.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' Debug: default: '' description: Set to True to enable debugging on all services. @@ -30,16 +27,16 @@ outputs: description: Role data for the Glance Registry role. value: config_settings: - glance_dsn: &glance_dsn + glance::registry::database_connection: list_join: - '' - - - 'mysql+pymysql://glance:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://glance:' - {get_param: GlancePassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/glance' glance::registry::keystone_password: {get_param: GlancePassword} - glance::registry::database_connection: *glance_dsn glance::registry::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } glance::registry::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } glance::registry::debug: {get_param: Debug} diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml index 99eb1074..c1f26c15 100644 --- a/puppet/services/heat-api-cfn.yaml +++ b/puppet/services/heat-api-cfn.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' HeatWorkers: default: 0 description: Number of workers for Heat service. diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml index f3d68042..2c56951b 100644 --- a/puppet/services/heat-api-cloudwatch.yaml +++ b/puppet/services/heat-api-cloudwatch.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' HeatWorkers: default: 0 description: Number of workers for Heat service. diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml index 4fc259ac..d3461e63 100644 --- a/puppet/services/heat-api.yaml +++ b/puppet/services/heat-api.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' HeatWorkers: default: 0 description: Number of workers for Heat service. diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml index 50fcbf59..8617df27 100644 --- a/puppet/services/heat-base.yaml +++ b/puppet/services/heat-base.yaml @@ -37,3 +37,4 @@ outputs: heat::rabbit_use_ssl: {get_param: RabbitClientUseSSL} heat::rabbit_port: {get_param: RabbitClientPort} heat::debug: {get_param: Debug} + heat::enable_proxy_headers_parsing: true diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml index 143d24bb..4a5ec2c0 100644 --- a/puppet/services/heat-engine.yaml +++ b/puppet/services/heat-engine.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' HeatEnableDBPurge: type: boolean default: true @@ -43,19 +40,18 @@ outputs: - get_attr: [HeatBase, role_data, config_settings] - heat::engine::num_engine_workers: {get_param: HeatWorkers} tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge} - heat_dsn: &heat_dsn + heat::database_connection: list_join: - '' - - - 'mysql+pymysql://heat:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://heat:' - {get_param: HeatPassword} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/heat' - heat::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]} heat::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} heat::keystone_password: {get_param: HeatPassword} - heat::database_connection: *heat_dsn heat::db::mysql::password: {get_param: HeatPassword} heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword} step_config: | diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index 1654f0e7..25d92d4a 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -54,9 +54,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' Debug: type: string default: '' @@ -97,15 +94,15 @@ outputs: description: Role data for the Keystone role. value: config_settings: - keystone_dsn: &keystone_dsn + keystone::database_connection: list_join: - '' - - - 'mysql+pymysql://keystone:' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://keystone:' - {get_param: AdminToken} - '@' - - {get_param: MysqlVirtualIPUri} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/keystone' - keystone::database_connection: *keystone_dsn keystone::admin_token: {get_param: AdminToken} keystone::roles::admin::password: {get_param: AdminPassword} keystone_ca_certificate: {get_param: KeystoneCACertificate} diff --git a/puppet/services/loadbalancer.yaml b/puppet/services/loadbalancer.yaml index 0c1757bf..1b9654fc 100644 --- a/puppet/services/loadbalancer.yaml +++ b/puppet/services/loadbalancer.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' outputs: role_data: diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml index 1833fbff..fcd0adca 100644 --- a/puppet/services/memcached.yaml +++ b/puppet/services/memcached.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' outputs: role_data: @@ -20,4 +17,3 @@ outputs: config_settings: step_config: | include ::tripleo::profile::base::memcached - diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml index 548b4ba0..80ccf1c2 100644 --- a/puppet/services/neutron-dhcp.yaml +++ b/puppet/services/neutron-dhcp.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' NeutronEnableIsolatedMetadata: default: 'False' description: If True, DHCP provide metadata route to VM. diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml index 2ea1b19d..20c82dc1 100644 --- a/puppet/services/neutron-l3.yaml +++ b/puppet/services/neutron-l3.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' Debug: type: string default: '' diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml index 1fe139f3..e221b3a1 100644 --- a/puppet/services/neutron-metadata.yaml +++ b/puppet/services/neutron-metadata.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' NeutronMetadataProxySharedSecret: description: Shared secret to prevent spoofing type: string diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml index ad964216..5a581dca 100644 --- a/puppet/services/pacemaker/glance-api.yaml +++ b/puppet/services/pacemaker/glance-api.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' GlanceFilePcmkDevice: default: '' description: > @@ -43,7 +40,6 @@ resources: type: ../glance-api.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: diff --git a/puppet/services/pacemaker/glance-registry.yaml b/puppet/services/pacemaker/glance-registry.yaml index 393fbaaf..8b88cb93 100644 --- a/puppet/services/pacemaker/glance-registry.yaml +++ b/puppet/services/pacemaker/glance-registry.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: @@ -19,7 +16,6 @@ resources: type: ../glance-registry.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml index ba620f89..5833c42d 100644 --- a/puppet/services/pacemaker/heat-api-cfn.yaml +++ b/puppet/services/pacemaker/heat-api-cfn.yaml @@ -9,16 +9,12 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: HeatApiCfnBase: type: ../heat-api-cfn.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml index db71891c..8b67702c 100644 --- a/puppet/services/pacemaker/heat-api-cloudwatch.yaml +++ b/puppet/services/pacemaker/heat-api-cloudwatch.yaml @@ -9,16 +9,12 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: HeatApiCloudwatchBase: type: ../heat-api-cloudwatch.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml index b1c37d41..6628e8dd 100644 --- a/puppet/services/pacemaker/heat-api.yaml +++ b/puppet/services/pacemaker/heat-api.yaml @@ -9,16 +9,12 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: HeatApiBase: type: ../heat-api.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml index 1e39b363..e1195780 100644 --- a/puppet/services/pacemaker/heat-engine.yaml +++ b/puppet/services/pacemaker/heat-engine.yaml @@ -9,16 +9,12 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: HeatEngineBase: type: ../heat-engine.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml index db52cae7..04e90368 100644 --- a/puppet/services/pacemaker/keystone.yaml +++ b/puppet/services/pacemaker/keystone.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: @@ -19,7 +16,6 @@ resources: type: ../keystone.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: diff --git a/puppet/services/pacemaker/loadbalancer.yaml b/puppet/services/pacemaker/loadbalancer.yaml index 771b3d9b..ce67e925 100644 --- a/puppet/services/pacemaker/loadbalancer.yaml +++ b/puppet/services/pacemaker/loadbalancer.yaml @@ -9,16 +9,12 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: LoadbalancerServiceBase: type: ../loadbalancer.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: @@ -27,8 +23,8 @@ outputs: config_settings: map_merge: - get_attr: [LoadbalancerServiceBase, role_data, config_settings] - - tripleo::loadbalancer::haproxy_service_manage: false - tripleo::loadbalancer::mysql_clustercheck: true - tripleo::loadbalancer::manage_vip: false + - tripleo::haproxy::haproxy_service_manage: false + tripleo::haproxy::mysql_clustercheck: true + tripleo::haproxy::keepalived: false step_config: | include ::tripleo::profile::pacemaker::loadbalancer diff --git a/puppet/services/pacemaker/memcached.yaml b/puppet/services/pacemaker/memcached.yaml index 306f805e..9a11855e 100644 --- a/puppet/services/pacemaker/memcached.yaml +++ b/puppet/services/pacemaker/memcached.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: @@ -28,4 +25,3 @@ outputs: - memcached::service_manage: false step_config: | include ::tripleo::profile::pacemaker::memcached - diff --git a/puppet/services/pacemaker/neutron-dhcp.yaml b/puppet/services/pacemaker/neutron-dhcp.yaml index 0e972b28..6f514379 100644 --- a/puppet/services/pacemaker/neutron-dhcp.yaml +++ b/puppet/services/pacemaker/neutron-dhcp.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: @@ -19,7 +16,6 @@ resources: type: ../neutron-dhcp.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: diff --git a/puppet/services/pacemaker/neutron-l3.yaml b/puppet/services/pacemaker/neutron-l3.yaml index 84bff808..cb9c32d9 100644 --- a/puppet/services/pacemaker/neutron-l3.yaml +++ b/puppet/services/pacemaker/neutron-l3.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: @@ -19,7 +16,6 @@ resources: type: ../neutron-l3.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: @@ -29,5 +25,7 @@ outputs: map_merge: - get_attr: [NeutronL3Base, role_data, config_settings] - tripleo::profile::pacemaker::neutron::enable_l3: True + neutron::agents::l3::enabled: false + neutron::agents::l3::manage_service: false step_config: | include ::tripleo::profile::pacemaker::neutron::l3 diff --git a/puppet/services/pacemaker/neutron-metadata.yaml b/puppet/services/pacemaker/neutron-metadata.yaml index 79baf1ea..1c74b26f 100644 --- a/puppet/services/pacemaker/neutron-metadata.yaml +++ b/puppet/services/pacemaker/neutron-metadata.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: @@ -19,7 +16,6 @@ resources: type: ../neutron-metadata.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml index 613db449..20fb2e40 100644 --- a/puppet/services/pacemaker/rabbitmq.yaml +++ b/puppet/services/pacemaker/rabbitmq.yaml @@ -9,16 +9,12 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' resources: RabbitMQServiceBase: type: ../rabbitmq.yaml properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: role_data: diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml index ae5678a3..581b4ba4 100644 --- a/puppet/services/rabbitmq.yaml +++ b/puppet/services/rabbitmq.yaml @@ -9,9 +9,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - type: string - default: '' RabbitUserName: default: guest description: The username for RabbitMQ diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml index f9681634..7ed880fc 100644 --- a/puppet/services/services.yaml +++ b/puppet/services/services.yaml @@ -15,10 +15,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MysqlVirtualIPUri: - default: '' - type: string - description: The URI virtual IP for the MySQL service. resources: @@ -29,7 +25,6 @@ resources: concurrent: true resource_properties: EndpointMap: {get_param: EndpointMap} - MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} outputs: config_settings: diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml new file mode 100644 index 00000000..a86aeaf5 --- /dev/null +++ b/puppet/services/swift-proxy.yaml @@ -0,0 +1,49 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Swift Proxy service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + SwiftPassword: + description: The password for the swift service account, used by the swift proxy services. + type: string + hidden: true + SwiftWorkers: + default: 0 + description: Number of workers for Swift service. + type: number + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + + +outputs: + role_data: + description: Role data for the Swift proxy service. + value: + config_settings: + # Swift + swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + swift::proxy::authtoken::admin_password: {get_param: SwiftPassword} + swift::proxy::workers: {get_param: SwiftWorkers} + swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]} + swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]} + swift::keystone::auth::admin_url: {get_param: [EndpointMap, SwiftAdmin, uri]} + swift::keystone::auth::public_url_s3: {get_param: [EndpointMap, SwiftS3Public, uri]} + swift::keystone::auth::internal_url_s3: {get_param: [EndpointMap, SwiftS3Internal, uri]} + swift::keystone::auth::admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]} + swift::keystone::auth::password: {get_param: SwiftPassword} + swift::keystone::auth::region: {get_param: KeystoneRegion} + step_config: | + include ::tripleo::profile::base::swift::proxy diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml index b262f947..1aba2bb4 100644 --- a/puppet/swift-storage-post.yaml +++ b/puppet/swift-storage-post.yaml @@ -11,6 +11,10 @@ parameters: NodeConfigIdentifiers: type: json description: Value which changes if the node configuration may need to be re-applied + StepConfig: + type: string + description: Config manifests that will be used to step through the deployment. + default: '' resources: @@ -31,51 +35,56 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: - get_file: manifests/overcloud_object.pp + list_join: + - '' + - - get_file: manifests/overcloud_object.pp + - get_file: manifests/ringbuilder.pp + - {get_param: StepConfig} - StorageDeployment_Step1: + StorageRingbuilderDeployment_Step2: type: OS::Heat::StructuredDeployments depends_on: StorageArtifactsDeploy properties: - name: StorageDeployment_Step1 + name: StorageRingbuilderDeployment_Step2 servers: {get_param: servers} config: {get_resource: StoragePuppetConfig} input_values: + step: 2 update_identifier: {get_param: NodeConfigIdentifiers} - StorageRingbuilderPuppetConfig: - type: OS::Heat::SoftwareConfig + StorageRingbuilderDeployment_Step3: + type: OS::Heat::StructuredDeployments + depends_on: StorageRingbuilderDeployment_Step2 properties: - group: puppet - options: - enable_debug: {get_param: ConfigDebug} - enable_hiera: True - enable_facter: False - inputs: - - name: step - outputs: - - name: result - config: - get_file: manifests/ringbuilder.pp + name: StorageRingbuilderDeployment_Step3 + servers: {get_param: servers} + config: {get_resource: StoragePuppetConfig} + input_values: + step: 3 + update_identifier: {get_param: NodeConfigIdentifiers} - StorageRingbuilderDeployment_Step2: + StorageDeployment_Step4: type: OS::Heat::StructuredDeployments - depends_on: StorageDeployment_Step1 + depends_on: StorageRingbuilderDeployment_Step3 properties: - name: StorageRingbuilderDeployment_Step2 + name: StorageDeployment_Step4 servers: {get_param: servers} - config: {get_resource: StorageRingbuilderPuppetConfig} + config: {get_resource: StoragePuppetConfig} input_values: - step: 3 # Note ringbuilder.pp expects >=3 + step: 4 update_identifier: {get_param: NodeConfigIdentifiers} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: StorageRingbuilderDeployment_Step2 + depends_on: StorageDeployment_Step4 type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 3f6f4733..ed52f928 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -121,6 +121,9 @@ parameters: NodeIndex: type: number default: 0 + ServiceConfigSettings: + type: json + default: {} resources: @@ -257,6 +260,7 @@ resources: - heat_config_%{::deploy_config_name} - object_extraconfig - extraconfig + - service_configs - object - swift_devices_and_proxy # provided by SwiftDevicesAndProxyConfig - all_nodes # provided by allNodesConfig @@ -265,6 +269,8 @@ resources: - network merge_behavior: deeper datafiles: + service_configs: + mapped_data: {get_param: ServiceConfigSettings} common: raw_data: {get_file: hieradata/common.yaml} network: @@ -279,7 +285,7 @@ resources: object: raw_data: {get_file: hieradata/object.yaml} mapped_data: # data supplied directly to this deployment configuration, etc - swift::swift_hash_suffix: { get_input: swift_hash_suffix } + swift::swift_hash_path_suffix: { get_input: swift_hash_suffix } tripleo::ringbuilder::build_ring: { get_input: swift_ring_build } tripleo::ringbuilder::part_power: { get_input: swift_part_power } tripleo::ringbuilder::replicas: {get_input: swift_replicas } diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml index 3e8e9182..92234b6c 100644 --- a/puppet/vip-config.yaml +++ b/puppet/vip-config.yaml @@ -32,11 +32,20 @@ resources: horizon_vip: {get_input: horizon_vip} redis_vip: {get_input: redis_vip} mysql_vip: {get_input: mysql_vip} - tripleo::loadbalancer::public_virtual_ip: {get_input: public_virtual_ip} - tripleo::loadbalancer::controller_virtual_ip: {get_input: control_virtual_ip} - tripleo::loadbalancer::internal_api_virtual_ip: {get_input: internal_api_virtual_ip} - tripleo::loadbalancer::storage_virtual_ip: {get_input: storage_virtual_ip} - tripleo::loadbalancer::storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip} + public_virtual_ip: {get_input: public_virtual_ip} + controller_virtual_ip: {get_input: control_virtual_ip} + internal_api_virtual_ip: {get_input: internal_api_virtual_ip} + storage_virtual_ip: {get_input: storage_virtual_ip} + storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip} + # public_virtual_ip and controller_virtual_ip are needed in + # both HAproxy & keepalived. + tripleo::haproxy::public_virtual_ip: {get_input: public_virtual_ip} + tripleo::haproxy::controller_virtual_ip: {get_input: control_virtual_ip} + tripleo::keepalived::public_virtual_ip: {get_input: public_virtual_ip} + tripleo::keepalived::controller_virtual_ip: {get_input: control_virtual_ip} + tripleo::keepalived::internal_api_virtual_ip: {get_input: internal_api_virtual_ip} + tripleo::keepalived::storage_virtual_ip: {get_input: storage_virtual_ip} + tripleo::keepalived::storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip} tripleo::redis_notification::haproxy_monitor_ip: {get_input: control_virtual_ip} |