diff options
Diffstat (limited to 'puppet')
49 files changed, 725 insertions, 1007 deletions
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index 6dca9df7..b2948144 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -91,10 +91,17 @@ parameters: ServiceConfigSettings: type: json default: {} + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: CephStorage: - type: OS::Nova::Server + type: OS::TripleO::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} image_update_policy: {get_param: ImageUpdatePolicy} diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index 22778820..05918026 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -91,10 +91,17 @@ parameters: ServiceConfigSettings: type: json default: {} + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: BlockStorage: - type: OS::Nova::Server + type: OS::TripleO::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} diff --git a/puppet/compute.yaml b/puppet/compute.yaml index db2d7465..d7cf7787 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -69,43 +69,6 @@ parameters: KeystonePublicApiVirtualIP: type: string default: '' - NeutronBridgeMappings: - description: > - The OVS logical->physical bridge mappings to use. See the Neutron - documentation for details. Defaults to mapping br-ex - the external - bridge on hosts - to a physical name 'datacentre' which can be used - to create provider networks (and we use this for the default floating - network) - if changing this either use different post-install network - scripts or be sure to keep 'datacentre' as a mapping network name. - type: comma_delimited_list - default: "datacentre:br-ex" - NeutronEnableTunnelling: - type: string - default: "True" - NeutronEnableL2Pop: - type: string - description: > - Enable/disable the L2 population feature in the Neutron agents. - default: "False" - NeutronFlatNetworks: - type: comma_delimited_list - default: 'datacentre' - description: > - If set, flat networks to configure in neutron plugins. - NeutronHost: - type: string - default: '' # Has to be here because of the ignored empty value bug - NeutronNetworkType: - type: comma_delimited_list - description: The tenant network type for Neutron. - default: 'vxlan' - NeutronNetworkVLANRanges: - default: 'datacentre:1:1000' - description: > - The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the - Neutron documentation for permitted values. Defaults to permitting any - VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). - type: comma_delimited_list NeutronPassword: description: The password for the neutron service account, used by neutron agents. type: string @@ -118,73 +81,6 @@ parameters: default: nic1 description: A port to add to the NeutronPhysicalBridge. type: string - NeutronTenantMtu: - description: > - The default MTU for tenant networks. For VXLAN/GRE tunneling, this should - be at least 50 bytes smaller than the MTU on the physical network. This - value will be used to set the MTU on the virtual Ethernet device. - This number is related to the value of NeutronDnsmasqOptions, since that - will determine the MTU that is assigned to the VM host through DHCP. - default: 1400 - type: number - NeutronTunnelTypes: - type: comma_delimited_list - description: | - The tunnel types for the Neutron tenant network. - default: 'vxlan' - NeutronTunnelIdRanges: - description: | - Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges - of GRE tunnel IDs that are available for tenant network allocation - default: ["1:4094", ] - type: comma_delimited_list - NeutronVniRanges: - description: | - Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges - of VXLAN VNI IDs that are available for tenant network allocation - default: ["1:4094", ] - type: comma_delimited_list - NeutronMetadataProxySharedSecret: - description: Shared secret to prevent spoofing - type: string - hidden: true - NeutronCorePlugin: - default: 'ml2' - description: | - The core plugin for Neutron. The value should be the entrypoint to be loaded - from neutron.core_plugins namespace. - type: string - NeutronServicePlugins: - default: "router,qos" - description: | - Comma-separated list of service plugin entrypoints to be loaded from the - neutron.service_plugins namespace. - type: comma_delimited_list - NeutronTypeDrivers: - default: "vxlan,vlan,flat,gre" - description: | - Comma-separated list of network type driver entrypoints to be loaded. - type: comma_delimited_list - NeutronMechanismDrivers: - default: 'openvswitch' - description: | - The mechanism drivers for the Neutron tenant network. - type: comma_delimited_list - NeutronAgentExtensions: - default: "qos" - description: | - Comma-separated list of extensions enabled for the Neutron agents. - type: comma_delimited_list - # Not relevant for Computes, should be removed - NeutronAllowL3AgentFailover: - default: 'True' - description: Allow automatic l3-agent failover - type: string - # Not relevant for Computes, should be removed - NeutronL3HA: - default: 'False' - description: Whether to enable l3-agent HA - type: string NodeIndex: type: number default: 0 @@ -316,11 +212,18 @@ parameters: ServiceConfigSettings: type: json default: {} + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: NovaCompute: - type: OS::Nova::Server + type: OS::TripleO::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} @@ -517,7 +420,6 @@ resources: nova::migration::live_migration_tunnelled: {get_input: nova_enable_rbd_backend} rbd_persistent_storage: {get_input: cinder_enable_rbd_backend} nova_password: {get_input: nova_password} - nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address} nova::vncproxy::common::vncproxy_protocol: {get_input: nova_vncproxy_protocol} nova::vncproxy::common::vncproxy_host: {get_input: nova_vncproxy_host} @@ -539,30 +441,11 @@ resources: neutron::rabbit_user: {get_input: rabbit_username} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} neutron::rabbit_port: {get_input: rabbit_client_port} - neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} - neutron_host: {get_input: neutron_host} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} - neutron::network_device_mtu: {get_input: neutron_tenant_mtu} - neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} - neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} - neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} - neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} - neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} - neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings} - neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} - neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} - neutron_physical_bridge: {get_input: neutron_physical_bridge} - neutron_public_interface: {get_input: neutron_public_interface} nova::network::neutron::neutron_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url} - neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} - neutron::core_plugin: {get_input: neutron_core_plugin} - neutron::service_plugins: {get_input: neutron_service_plugins} - neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} - neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} keystone_public_api_virtual_ip: {get_input: keystone_vip} admin_password: {get_input: admin_password} tripleo::packages::enable_install: {get_input: enable_package_install} @@ -606,71 +489,8 @@ resources: ceilometer_compute_agent: {get_param: CeilometerComputeAgent} ceilometer_agent_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]} glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} - neutron_flat_networks: - str_replace: - template: NETWORKS - params: - NETWORKS: {get_param: NeutronFlatNetworks} - neutron_host: {get_param: NeutronHost} neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} - neutron_tunnel_id_ranges: - str_replace: - template: RANGES - params: - RANGES: {get_param: NeutronTunnelIdRanges} - neutron_vni_ranges: - str_replace: - template: RANGES - params: - RANGES: {get_param: NeutronVniRanges} - neutron_tenant_network_types: - str_replace: - template: TYPES - params: - TYPES: {get_param: NeutronNetworkType} - neutron_tunnel_types: - str_replace: - template: TYPES - params: - TYPES: {get_param: NeutronTunnelTypes} - neutron_network_vlan_ranges: - str_replace: - template: RANGES - params: - RANGES: {get_param: NeutronNetworkVLANRanges} - neutron_bridge_mappings: - str_replace: - template: MAPPINGS - params: - MAPPINGS: {get_param: NeutronBridgeMappings} - neutron_tenant_mtu: {get_param: NeutronTenantMtu} - neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} - neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} - neutron_physical_bridge: {get_param: NeutronPhysicalBridge} - neutron_public_interface: {get_param: NeutronPublicInterface} neutron_password: {get_param: NeutronPassword} - neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} - neutron_core_plugin: {get_param: NeutronCorePlugin} - neutron_service_plugins: - str_replace: - template: PLUGINS - params: - PLUGINS: {get_param: NeutronServicePlugins} - neutron_type_drivers: - str_replace: - template: DRIVERS - params: - DRIVERS: {get_param: NeutronTypeDrivers} - neutron_mechanism_drivers: - str_replace: - template: MECHANISMS - params: - MECHANISMS: {get_param: NeutronMechanismDrivers} - neutron_agent_extensions: - str_replace: - template: AGENT_EXTENSIONS - params: - AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]} neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]} keystone_vip: {get_param: KeystonePublicApiVirtualIP} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index 101d971e..dfa63a90 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -12,34 +12,6 @@ parameters: description: The password for the aodh services. type: string hidden: true - #TODO(composable Redis): Remove the Redis password param - #As is used by ceilometer - CeilometerBackend: - default: 'mongodb' - description: The ceilometer backend type. - type: string - CeilometerMeteringSecret: - description: Secret shared by the ceilometer services. - type: string - hidden: true - CeilometerPassword: - description: The password for the ceilometer service and db account. - type: string - hidden: true - CeilometerStoreEvents: - default: false - description: Whether to store events in ceilometer. - type: boolean - CeilometerMeterDispatcher: - default: 'database' - description: Dispatcher to process meter data - type: string - constraints: - - allowed_values: ['gnocchi', 'database'] - CeilometerWorkers: - default: 0 - description: Number of workers for Ceilometer service. - type: number controllerExtraConfig: default: {} description: | @@ -156,10 +128,6 @@ parameters: description: Auth encryption key for heat-engine type: string hidden: true - HorizonAllowedHosts: - default: '*' - description: A list of IP/Hostname allowed to connect to horizon - type: comma_delimited_list HorizonSecret: description: Secret key for Django type: string @@ -236,15 +204,6 @@ parameters: default: nic1 description: What interface to bridge onto br-ex for network nodes. type: string - NeutronTenantMtu: - description: > - The default MTU for tenant networks. For VXLAN/GRE tunneling, this should - be at least 50 bytes smaller than the MTU on the physical network. This - value will be used to set the MTU on the virtual Ethernet device. - This number is related to the value of NeutronDnsmasqOptions, since that - will determine the MTU that is assigned to the VM host through DHCP. - default: 1400 - type: number NovaEnableDBPurge: default: true description: | @@ -390,6 +349,10 @@ parameters: ServiceConfigSettings: type: json default: {} + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 parameter_groups: - label: deprecated @@ -400,7 +363,10 @@ parameter_groups: resources: Controller: - type: OS::Nova::Server + type: OS::TripleO::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} image_update_policy: {get_param: ImageUpdatePolicy} @@ -550,12 +516,10 @@ resources: server: {get_resource: Controller} input_values: bootstack_nodeid: {get_attr: [Controller, name]} - ceilometer_workers: {get_param: CeilometerWorkers} haproxy_log_address: {get_param: HAProxySyslogAddress} haproxy_stats_password: {get_param: HAProxyStatsPassword} haproxy_stats_user: {get_param: HAProxyStatsUser} heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey} - horizon_allowed_hosts: {get_param: HorizonAllowedHosts} horizon_secret: {get_param: HorizonSecret} admin_password: {get_param: AdminPassword} debug: {get_param: Debug} @@ -584,21 +548,15 @@ resources: CLUSTER: {get_param: MysqlClusterUniquePart} neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} neutron_password: {get_param: NeutronPassword} - neutron_tenant_mtu: {get_param: NeutronTenantMtu} neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] } neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] } neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] } neutron_auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] } nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] } - ceilometer_backend: {get_param: CeilometerBackend} - ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} - ceilometer_password: {get_param: CeilometerPassword} - ceilometer_store_events: {get_param: CeilometerStoreEvents} aodh_password: {get_param: AodhPassword} aodh_internal_url: { get_param: [ EndpointMap, AodhInternal, uri ] } aodh_public_url: { get_param: [ EndpointMap, AodhPublic, uri ] } aodh_admin_url: { get_param: [ EndpointMap, AodhAdmin, uri ] } - ceilometer_meter_dispatcher: {get_param: CeilometerMeterDispatcher} gnocchi_password: {get_param: GnocchiPassword} gnocchi_backend: {get_param: GnocchiBackend} gnocchi_indexer_backend: {get_param: GnocchiIndexerBackend} @@ -610,15 +568,6 @@ resources: - '@' - {get_param: RedisVirtualIPUri} - ':6379/' - ceilometer_dsn: - list_join: - - '' - - - {get_param: [EndpointMap, MysqlInternal, protocol]} - - '://ceilometer:' - - {get_param: CeilometerPassword} - - '@' - - {get_param: [EndpointMap, MysqlInternal, host]} - - '/ceilometer' gnocchi_dsn: list_join: - '' @@ -640,10 +589,6 @@ resources: gnocchi_internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]} gnocchi_public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] } gnocchi_admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] } - ceilometer_public_url: {get_param: [EndpointMap, CeilometerPublic, uri]} - ceilometer_internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]} - ceilometer_admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]} - ceilometer_agent_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} nova_enable_db_purge: {get_param: NovaEnableDBPurge} nova_ipv6: {get_param: NovaIPv6} corosync_ipv6: {get_param: CorosyncIPv6} @@ -856,7 +801,6 @@ resources: # Neutron neutron::bind_host: {get_input: neutron_api_network} - neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network} neutron::keystone::auth::public_url: {get_input: neutron_public_url } @@ -866,33 +810,7 @@ resources: neutron::keystone::auth::region: {get_input: keystone_region} # Ceilometer - ceilometer_backend: {get_input: ceilometer_backend} - ceilometer_mysql_conn_string: {get_input: ceilometer_dsn} - ceilometer::telemetry_secret: {get_input: ceilometer_metering_secret} - ceilometer::rabbit_userid: {get_input: rabbit_username} - ceilometer::rabbit_password: {get_input: rabbit_password} - ceilometer::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - ceilometer::rabbit_port: {get_input: rabbit_client_port} - ceilometer::debug: {get_input: debug} ceilometer::api::host: {get_input: ceilometer_api_network} - ceilometer::api::keystone_password: {get_input: ceilometer_password} - ceilometer::api::auth_uri: {get_input: keystone_auth_uri} - ceilometer::api::identity_uri: {get_input: keystone_identity_uri} - ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} - ceilometer::agent::auth::auth_url: {get_input: ceilometer_agent_auth_url} - ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url} - ceilometer::agent::notification::store_events: {get_input: ceilometer_store_events} - ceilometer::db::mysql::password: {get_input: ceilometer_password} - ceilometer::collector::meter_dispatcher: {get_input: ceilometer_meter_dispatcher} - ceilometer::dispatcher::gnocchi::url: {get_input: gnocchi_internal_url } - ceilometer::dispatcher::gnocchi::filter_project: 'service' - ceilometer::dispatcher::gnocchi::archive_policy: 'low' - ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml' - ceilometer::keystone::auth::public_url: {get_input: ceilometer_public_url } - ceilometer::keystone::auth::internal_url: {get_input: ceilometer_internal_url } - ceilometer::keystone::auth::admin_url: {get_input: ceilometer_admin_url } - ceilometer::keystone::auth::password: {get_input: ceilometer_password } - ceilometer::keystone::auth::region: {get_input: keystone_region} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} @@ -950,7 +868,6 @@ resources: nova::api::api_bind_address: {get_input: nova_api_network} nova::api::metadata_listen: {get_input: nova_metadata_network} nova::api::admin_password: {get_input: nova_password} - nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::database_connection: {get_input: nova_dsn} nova::api_database_connection: {get_input: nova_api_dsn} nova::glance_api_servers: {get_input: glance_api_servers} @@ -972,7 +889,6 @@ resources: # Horizon apache::mod::remoteip::proxy_ips: {get_input: horizon_subnet} apache::ip: {get_input: horizon_network} - horizon::allowed_hosts: {get_input: horizon_allowed_hosts} horizon::django_debug: {get_input: debug} horizon::secret_key: {get_input: horizon_secret} horizon::bind_address: {get_input: horizon_network} diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml index 3e455347..aa5c3c43 100644 --- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml +++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml @@ -109,11 +109,3 @@ resources: properties: config: {get_resource: NetworkMidoNetConfig} servers: {get_param: compute_servers} - -outputs: - config_identifier: - value: - list_join: - - ' ' - - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]} - - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]} diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml index 71445800..e924fc87 100644 --- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml +++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml @@ -343,11 +343,3 @@ resources: input_values: ucsm_config: {get_param: NetworkUCSMHostList} actions: ['CREATE'] # Only do this on CREATE - -outputs: - # The Deployment applying the hieradata outputs the derived config-id, which - # changes if the input_values change, so if the stdouts from - # NetworkCiscoDeployment change, we need to reapply puppet (which will - # happen if we return a different config_identifier) - config_identifier: - value: {get_attr: [NetworkCiscoDeployment, deploy_stdouts]} diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index f84f7049..860c8fb5 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -4,7 +4,7 @@ nova::api::enabled: true nova::vncproxy::enabled: true # gnocchi -gnocchi::db::sync::extra_opts: '--skip-storage' +gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types' gnocchi::storage::swift::swift_user: 'service:gnocchi' gnocchi::storage::swift::swift_auth_version: 2 gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26' @@ -82,8 +82,10 @@ keystone::wsgi::apache::ssl: false swift::proxy::pipeline: - 'catch_errors' - 'healthcheck' + - 'proxy-logging' - 'cache' - 'ratelimit' + - 'bulk' - 'tempurl' - 'formpost' - 'authtoken' diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml index 9cbff586..9b2ea4f4 100644 --- a/puppet/hieradata/database.yaml +++ b/puppet/hieradata/database.yaml @@ -13,54 +13,6 @@ nova::db::mysql_api::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" -# Glance -glance::db::mysql::user: glance -glance::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -glance::db::mysql::dbname: glance -glance::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Keystone -keystone::db::mysql::user: keystone -keystone::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -keystone::db::mysql::dbname: keystone -keystone::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Neutron -neutron::db::mysql::user: neutron -neutron::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -neutron::db::mysql::dbname: ovs_neutron -neutron::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Cinder -cinder::db::mysql::user: cinder -cinder::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -cinder::db::mysql::dbname: cinder -cinder::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Heat -heat::db::mysql::user: heat -heat::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -heat::db::mysql::dbname: heat -heat::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Ironic -ironic::db::mysql::user: ironic -ironic::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -ironic::db::mysql::dbname: ironic -ironic::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - # Ceilometer ceilometer::db::mysql::user: ceilometer ceilometer::db::mysql::host: "%{hiera('mysql_virtual_ip')}" @@ -84,11 +36,3 @@ aodh::db::mysql::dbname: aodh aodh::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" - - -sahara::db::mysql::user: sahara -sahara::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -sahara::db::mysql::dbname: sahara -sahara::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index af6b0960..152694d9 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -16,14 +16,6 @@ include ::tripleo::packages include ::tripleo::firewall -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -} - if hiera('step') >= 4 { hiera_include('ceph_classes') } diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index b8e267fc..6e446fd8 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -16,114 +16,13 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), { }) -create_resources(sysctl::value, hiera('sysctl_settings'), { }) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - if hiera('step') >= 4 { - # When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique - exec { 'reset-iscsi-initiator-name': - command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi', - onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset', - }-> - - file { '/etc/iscsi/.initiator_reset': - ensure => present, - } - nova_config { 'DEFAULT/my_ip': value => $ipaddress; 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; } - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - file { '/etc/libvirt/qemu.conf': - ensure => present, - content => hiera('midonet_libvirt_qemu_data') - } - } - - include ::neutron - include ::neutron::config - - # If the value of core plugin is set to 'nuage', - # include nuage agent, - # If the value of core plugin is set to 'midonet', - # include midonet agent, - # else use the default value of 'ml2' - if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::nuage::vrs - include ::nova::compute::neutron - - class { '::nuage::metadataagent': - nova_os_tenant_name => hiera('nova::api::admin_tenant_name'), - nova_os_password => hiera('nova_password'), - nova_metadata_ip => hiera('nova_metadata_node_ips'), - nova_auth_ip => hiera('keystone_public_api_virtual_ip'), - } - } - elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - class { '::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - } - elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { - - include ::contrail::vrouter - # NOTE: it's not possible to use this class without a functional - # contrail controller up and running - #class {'::contrail::vrouter::provision_vrouter': - # require => Class['contrail::vrouter'], - #} - } - elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { - # forward all ipv4 traffic - # this is required for the vms to pass through the gateways public interface - sysctl::value { 'net.ipv4.ip_forward': value => '1' } - - # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on - file { '/etc/sudoers.d/ifc_ctl_sudoers': - ensure => file, - owner => root, - group => root, - mode => '0440', - content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", - } - } - else { - - # NOTE: this code won't live in puppet-neutron until Neutron OVS agent - # can be gracefully restarted. See https://review.openstack.org/#/c/297211 - # In the meantime, it's safe to restart the agent on each change in neutron.conf, - # because Puppet changes are supposed to be done during bootstrap and upgrades. - # Some resource managed by Neutron_config (like messaging and logging options) require - # a restart of OVS agent. This code does it. - # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code - # from here and fix it in puppet-neutron. - Neutron_config<||> ~> Service['neutron-ovs-agent-service'] - - include ::neutron::plugins::ml2 - include ::neutron::agents::ml2::ovs - - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), - } - } - - if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::agents::bigswitch - } - } - include ::ceilometer include ::ceilometer::config include ::ceilometer::agent::compute diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 72c31a31..9cdbda0f 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -18,35 +18,7 @@ include ::tripleo::firewall $enable_load_balancer = hiera('enable_load_balancer', true) -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -} - if hiera('step') >= 2 { - - # MongoDB - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and - # without the brackets as 'members' argument for the 'mongodb_replset' - # resource. - if str2bool(hiera('mongodb::server::ipv6', false)) { - $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[') - $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } else { - $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } - $mongo_node_string = join($mongo_node_ips_with_port, ',') - - $mongodb_replset = hiera('mongodb::server::replset') - $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - } - if str2bool(hiera('enable_galera', true)) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' } else { @@ -74,9 +46,6 @@ if hiera('step') >= 2 { if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { include ::gnocchi::db::mysql } - if downcase(hiera('ceilometer_backend')) == 'mysql' { - include ::ceilometer::db::mysql - } include ::aodh::db::mysql } #END STEP 2 @@ -95,91 +64,6 @@ if hiera('step') >= 4 { } include ::nova::config - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - # Run zookeeper in the controller if configured - if hiera('enable_zookeeper_on_controller') { - class {'::tripleo::cluster::zookeeper': - zookeeper_server_ips => $zookeeper_node_ips, - # TODO: create a 'bind' hiera key for zookeeper - zookeeper_client_ip => hiera('neutron::bind_host'), - zookeeper_hostnames => hiera('controller_node_names') - } - } - - # Run cassandra in the controller if configured - if hiera('enable_cassandra_on_controller') { - class {'::tripleo::cluster::cassandra': - cassandra_servers => $cassandra_node_ips, - # TODO: create a 'bind' hiera key for cassandra - cassandra_ip => hiera('neutron::bind_host'), - } - } - - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - - class {'::tripleo::network::midonet::api': - zookeeper_servers => $zookeeper_node_ips, - vip => hiera('public_virtual_ip'), - keystone_ip => hiera('public_virtual_ip'), - keystone_admin_token => hiera('keystone::admin_token'), - # TODO: create a 'bind' hiera key for api - bind_address => hiera('neutron::bind_host'), - admin_password => hiera('admin_password') - } - - # TODO: find a way to get an empty list from hiera - # TODO: when doing the composable midonet plugin, don't forget to - # set service_plugins to an empty array in Hiera. - class {'::neutron': - service_plugins => [] - } - - } - - # If the value of core plugin is set to 'midonet', - # skip all the ML2 configuration - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - class {'::neutron::plugins::midonet': - midonet_api_ip => hiera('public_virtual_ip'), - keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::password') - } - } - - # Ceilometer - $ceilometer_backend = downcase(hiera('ceilometer_backend')) - case $ceilometer_backend { - /mysql/ : { - $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') - } - default : { - $ceilometer_database_connection = $ceilometer_mongodb_conn_string - } - } - include ::ceilometer - include ::ceilometer::config - include ::ceilometer::api - include ::ceilometer::agent::notification - include ::ceilometer::agent::central - include ::ceilometer::expirer - include ::ceilometer::collector - include ::ceilometer::agent::auth - include ::ceilometer::dispatcher::gnocchi - class { '::ceilometer::db' : - database_connection => $ceilometer_database_connection, - } - - Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } - # Aodh class { '::aodh' : database_connection => hiera('aodh_mysql_conn_string'), @@ -193,27 +77,6 @@ if hiera('step') >= 4 { include ::aodh::listener include ::aodh::client - # Horizon - include ::apache::mod::remoteip - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - $_profile_support = 'cisco' - } else { - $_profile_support = 'None' - } - $neutron_options = {'profile_support' => $_profile_support } - - $memcached_ipv6 = hiera('memcached_ipv6', false) - if $memcached_ipv6 { - $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]') - } else { - $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1') - } - - class { '::horizon': - cache_server_ip => $horizon_memcached_servers, - neutron_options => $neutron_options, - } - # Gnocchi $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string') class { '::gnocchi': diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 7205002a..cfa693be 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -21,10 +21,7 @@ Pcmk_resource <| |> { # TODO(jistr): use pcs resource provider instead of just no-ops Service <| tag == 'aodh-service' or - tag == 'ceilometer-service' or - tag == 'gnocchi-service' or - tag == 'neutron-service' or - tag == 'nova-service' + tag == 'gnocchi-service' |> { hasrestart => true, restart => '/bin/true', @@ -53,10 +50,6 @@ $non_pcmk_start = hiera('step') >= 5 if hiera('step') >= 1 { - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false)) if $corosync_ipv6 { @@ -93,10 +86,6 @@ if hiera('step') >= 1 { op_params => 'start timeout=200s stop timeout=200s', } - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - include ::mongodb::params - } - # Galera if str2bool(hiera('enable_galera', true)) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' @@ -192,48 +181,25 @@ if hiera('step') >= 2 { require => Class['::mysql::server'], before => Exec['galera-ready'], } - } - $mysql_root_password = hiera('mysql::server::root_password') - $mysql_clustercheck_password = hiera('mysql_clustercheck_password') - # This step is to create a sysconfig clustercheck file with the root user and empty password - # on the first install only (because later on the clustercheck db user will be used) - # We are using exec and not file in order to not have duplicate definition errors in puppet - # when we later set the the file to contain the clustercheck data - exec { 'create-root-sysconfig-clustercheck': - command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck", - unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck', - } - exec { 'galera-ready' : - command => '/usr/bin/clustercheck >/dev/null', - timeout => 30, - tries => 180, - try_sleep => 10, - environment => ['AVAILABLE_WHEN_READONLY=0'], - require => Exec['create-root-sysconfig-clustercheck'], - } + exec { 'galera-ready' : + command => '/usr/bin/clustercheck >/dev/null', + timeout => 30, + tries => 180, + try_sleep => 10, + environment => ['AVAILABLE_WHEN_READONLY=0'], + require => Exec['create-root-sysconfig-clustercheck'], + } - xinetd::service { 'galera-monitor' : - port => '9200', - server => '/usr/bin/clustercheck', - per_source => 'UNLIMITED', - log_on_success => '', - log_on_failure => 'HOST', - flags => 'REUSE', - service_type => 'UNLISTED', - user => 'root', - group => 'root', - require => Exec['create-root-sysconfig-clustercheck'], - } - # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck - # to it in a later step. We do this only on one node as it will replicate on - # the other members. We also make sure that the permissions are the minimum necessary - if $pacemaker_master { + # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck + # to it in a later step. We do this only on one node as it will replicate on + # the other members. We also make sure that the permissions are the minimum necessary mysql_user { 'clustercheck@localhost': ensure => 'present', - password_hash => mysql_password($mysql_clustercheck_password), + password_hash => mysql_password(hiera('mysql_clustercheck_password')), require => Exec['galera-ready'], } + mysql_grant { 'clustercheck@localhost/*.*': ensure => 'present', options => ['GRANT'], @@ -241,15 +207,6 @@ if hiera('step') >= 2 { table => '*.*', user => 'clustercheck@localhost', } - } - - # Create all the database schemas - if $sync_db { - if downcase(hiera('ceilometer_backend')) == 'mysql' { - class { '::ceilometer::db::mysql': - require => Exec['galera-ready'], - } - } if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { class { '::gnocchi::db::mysql': @@ -259,7 +216,28 @@ if hiera('step') >= 2 { class { '::aodh::db::mysql': require => Exec['galera-ready'], - } + } + } + # This step is to create a sysconfig clustercheck file with the root user and empty password + # on the first install only (because later on the clustercheck db user will be used) + # We are using exec and not file in order to not have duplicate definition errors in puppet + # when we later set the the file to contain the clustercheck data + exec { 'create-root-sysconfig-clustercheck': + command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck", + unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck', + } + + xinetd::service { 'galera-monitor' : + port => '9200', + server => '/usr/bin/clustercheck', + per_source => 'UNLIMITED', + log_on_success => '', + log_on_failure => 'HOST', + flags => 'REUSE', + service_type => 'UNLISTED', + user => 'root', + group => 'root', + require => Exec['create-root-sysconfig-clustercheck'], } } #END STEP 2 @@ -267,6 +245,7 @@ if hiera('step') >= 2 { if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { # At this stage we are guaranteed that the clustercheck db user exists # so we switch the resource agent to use it. + $mysql_clustercheck_password = hiera('mysql_clustercheck_password') file { '/etc/sysconfig/clustercheck' : ensure => file, mode => '0600', @@ -290,128 +269,6 @@ MYSQL_HOST=localhost\n", include ::nova::config - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - # Run zookeeper in the controller if configured - if hiera('enable_zookeeper_on_controller') { - class {'::tripleo::cluster::zookeeper': - zookeeper_server_ips => $zookeeper_node_ips, - # TODO: create a 'bind' hiera key for zookeeper - zookeeper_client_ip => hiera('neutron::bind_host'), - zookeeper_hostnames => split(hiera('controller_node_names'), ',') - } - } - - # Run cassandra in the controller if configured - if hiera('enable_cassandra_on_controller') { - class {'::tripleo::cluster::cassandra': - cassandra_servers => $cassandra_node_ips, - # TODO: create a 'bind' hiera key for cassandra - cassandra_ip => hiera('neutron::bind_host'), - } - } - - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - - class {'::tripleo::network::midonet::api': - zookeeper_servers => $zookeeper_node_ips, - vip => hiera('public_virtual_ip'), - keystone_ip => hiera('public_virtual_ip'), - keystone_admin_token => hiera('keystone::admin_token'), - # TODO: create a 'bind' hiera key for api - bind_address => hiera('neutron::bind_host'), - admin_password => hiera('admin_password') - } - - # Configure Neutron - # TODO: when doing the composable midonet plugin, don't forget to - # set service_plugins to an empty array in Hiera. - class {'::neutron': - service_plugins => [] - } - - } - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - class {'::neutron::plugins::midonet': - midonet_api_ip => hiera('public_virtual_ip'), - keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::password') - } - } - - # Ceilometer - case downcase(hiera('ceilometer_backend')) { - /mysql/: { - $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') - } - default: { - $mongo_node_string = join($mongo_node_ips_with_port, ',') - $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - } - } - include ::ceilometer - include ::ceilometer::config - class { '::ceilometer::api' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::agent::notification' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::agent::central' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::collector' : - manage_service => false, - enabled => false, - } - include ::ceilometer::expirer - class { '::ceilometer::db' : - database_connection => $ceilometer_database_connection, - sync_db => $sync_db, - } - include ::ceilometer::agent::auth - include ::ceilometer::dispatcher::gnocchi - - Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } - - # httpd/apache and horizon - # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent - class { '::apache' : - service_enable => false, - # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? - } - include ::apache::mod::remoteip - include ::apache::mod::status - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - $_profile_support = 'cisco' - } else { - $_profile_support = 'None' - } - $neutron_options = {'profile_support' => $_profile_support } - - $memcached_ipv6 = hiera('memcached_ipv6', false) - if $memcached_ipv6 { - $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]') - } else { - $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1') - } - - class { '::horizon': - cache_server_ip => $horizon_memcached_servers, - neutron_options => $neutron_options, - } - # Aodh class { '::aodh' : database_connection => hiera('aodh_mysql_conn_string'), @@ -482,6 +339,7 @@ if hiera('step') >= 5 { # password. On second runs or updates /root/.my.cnf will already be populated # with proper credentials. This step happens on every node because this sql # statement does not automatically replicate across nodes. + $mysql_root_password = hiera('mysql::server::root_password') exec { 'galera-set-root-password': command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root", } @@ -527,49 +385,6 @@ password=\"${mysql_root_password}\"", Pacemaker::Resource::Ocf['openstack-core']], } - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - pacemaker::resource::service {'tomcat': - clone_params => 'interleave=true', - } - } - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat - pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::dhcp_agent_service}-clone", - second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], - } - pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::metadata_agent_service}-clone", - second_resource => 'tomcat-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service], - Pacemaker::Resource::Service['tomcat']], - } - pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], - } - } - # Nova pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': constraint_type => 'order', @@ -652,49 +467,12 @@ password=\"${mysql_root_password}\"", Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], } - # Ceilometer and Aodh - case downcase(hiera('ceilometer_backend')) { - /mysql/: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'], - } - } - default: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: - clone_params => 'interleave=true', - require => [Pacemaker::Resource::Ocf['openstack-core'], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], - } - } - } - pacemaker::resource::service { $::ceilometer::params::collector_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::api_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : - clone_params => 'interleave=true', - } # Fedora doesn't know `require-all` parameter for constraints yet if $::operatingsystem == 'Fedora' { - $redis_ceilometer_constraint_params = undef $redis_aodh_constraint_params = undef } else { - $redis_ceilometer_constraint_params = 'require-all=false' $redis_aodh_constraint_params = 'require-all=false' } - pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => 'redis-master', - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'promote', - second_action => 'start', - constraint_params => $redis_ceilometer_constraint_params, - require => [Pacemaker::Resource::Ocf['redis'], - Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]], - } pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint': constraint_type => 'order', first_resource => 'redis-master', @@ -705,49 +483,6 @@ password=\"${mysql_root_password}\"", require => [Pacemaker::Resource::Ocf['redis'], Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]], } - pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::agent_central_service_name}-clone", - second_resource => "${::ceilometer::params::collector_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], - } - pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::collector_service_name}-clone", - second_resource => "${::ceilometer::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation': - source => "${::ceilometer::params::api_service_name}-clone", - target => "${::ceilometer::params::collector_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], - } # Aodh pacemaker::resource::service { $::aodh::params::evaluator_service_name : clone_params => 'interleave=true', @@ -790,17 +525,6 @@ password=\"${mysql_root_password}\"", require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], Pacemaker::Resource::Service[$::aodh::params::listener_service_name]], } - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => "${::mongodb::params::service_name}-clone", - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], - } - } # gnocchi pacemaker::resource::service { $::gnocchi::params::metricd_service_name : diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 418c56e6..1f04c581 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -16,12 +16,6 @@ include ::tripleo::packages include ::tripleo::firewall -if hiera('step') >= 1 { - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> -} - if hiera('step') >= 4 { hiera_include('object_classes') } diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 7fc27d60..7c7da586 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -16,47 +16,7 @@ include ::tripleo::packages include ::tripleo::firewall -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -} - if hiera('step') >= 4 { - - include ::cinder - include ::cinder::config - include ::cinder::glance - include ::cinder::volume - include ::cinder::setup_test_volume - - $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true) - if $cinder_enable_iscsi { - $cinder_iscsi_backend = 'tripleo_iscsi' - - cinder::backend::iscsi { $cinder_iscsi_backend : - iscsi_ip_address => hiera('cinder_iscsi_ip_address'), - iscsi_helper => hiera('cinder_iscsi_helper'), - } - } - - $cinder_enabled_backends = any2array($cinder_iscsi_backend) - class { '::cinder::backends' : - enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), - } - - $snmpd_user = hiera('snmpd_readonly_user_name') - snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), - } - class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], - } - hiera_include('volume_classes') } diff --git a/puppet/services/ceilometer-agent-central.yaml b/puppet/services/ceilometer-agent-central.yaml new file mode 100644 index 00000000..294e7dd2 --- /dev/null +++ b/puppet/services/ceilometer-agent-central.yaml @@ -0,0 +1,43 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Central Agent service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + RedisPassword: + description: The password for the redis service account. + type: string + hidden: true + RedisVirtualIPUri: + type: string + default: '' + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Central Agent role. + value: + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::agent::central::coordination_url: + list_join: + - '' + - - 'redis://:' + - {get_param: RedisPassword} + - '@' + - {get_param: RedisVirtualIPUri} + - ':6379/' + step_config: | + include ::tripleo::profile::base::ceilometer::agent::central diff --git a/puppet/services/ceilometer-agent-notification.yaml b/puppet/services/ceilometer-agent-notification.yaml new file mode 100644 index 00000000..523dabb9 --- /dev/null +++ b/puppet/services/ceilometer-agent-notification.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Notification Agent service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Notification Agent role. + value: + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::agent::notification diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml new file mode 100644 index 00000000..06c2ed12 --- /dev/null +++ b/puppet/services/ceilometer-api.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer API role. + value: + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::api diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml new file mode 100644 index 00000000..1dea785f --- /dev/null +++ b/puppet/services/ceilometer-base.yaml @@ -0,0 +1,105 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + CeilometerBackend: + default: 'mongodb' + description: The ceilometer backend type. + type: string + CeilometerMeteringSecret: + description: Secret shared by the ceilometer services. + type: string + hidden: true + CeilometerPassword: + description: The password for the ceilometer service account. + type: string + hidden: true + CeilometerMeterDispatcher: + default: 'gnocchi' + description: Dispatcher to process meter data + type: string + constraints: + - allowed_values: ['gnocchi', 'database'] + CeilometerWorkers: + default: 0 + description: Number of workers for Ceilometer service. + type: number + CeilometerStoreEvents: + default: false + description: Whether to store events in ceilometer. + type: boolean + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + +outputs: + role_data: + description: Role data for the Ceilometer role. + value: + config_settings: + ceilometer::db::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - - '://ceilometer:' + - {get_param: CeilometerPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/ceilometer' + ceilometer_backend: {get_param: CeilometerBackend} + ceilometer::metering_secret: {get_param: CeilometerMeteringSecret} + # we include db_sync class in puppet-tripleo + ceilometer::db::sync_db: false + ceilometer::api::keystone_password: {get_param: CeilometerPassword} + ceilometer::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + ceilometer::api::keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword} + ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } + ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents} + ceilometer::db::mysql::password: {get_param: CeilometerPassword} + ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher} + ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]} + ceilometer::dispatcher::gnocchi::filter_project: 'service' + ceilometer::dispatcher::gnocchi::archive_policy: 'low' + ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml' + ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]} + ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]} + ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]} + ceilometer::keystone::auth::password: {get_param: CeilometerPassword} + ceilometer::keystone::auth::region: {get_param: KeystoneRegion} + ceilometer::rabbit_userid: {get_param: RabbitUserName} + ceilometer::rabbit_password: {get_param: RabbitPassword} + ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + ceilometer::rabbit_port: {get_param: RabbitClientPort} + ceilometer::db::mysql::user: ceilometer + ceilometer::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]} + ceilometer::db::mysql::dbname: ceilometer + ceilometer::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/services/ceilometer-collector.yaml b/puppet/services/ceilometer-collector.yaml new file mode 100644 index 00000000..29627210 --- /dev/null +++ b/puppet/services/ceilometer-collector.yaml @@ -0,0 +1,26 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Collector service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Collector role. + value: + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::collector diff --git a/puppet/services/ceilometer-expirer.yaml b/puppet/services/ceilometer-expirer.yaml new file mode 100644 index 00000000..796abe1f --- /dev/null +++ b/puppet/services/ceilometer-expirer.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Expirer service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Expirer role. + value: + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::expirer diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml index 85682448..f6d2b645 100644 --- a/puppet/services/cinder-base.yaml +++ b/puppet/services/cinder-base.yaml @@ -56,3 +56,9 @@ outputs: cinder::rabbit_userid: {get_param: RabbitUserName} cinder::rabbit_password: {get_param: RabbitPassword} cinder::rabbit_port: {get_param: RabbitClientPort} + cinder::db::mysql::user: cinder + cinder::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]} + cinder::db::mysql::dbname: cinder + cinder::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml index 6f2f0372..d71157f9 100644 --- a/puppet/services/glance-registry.yaml +++ b/puppet/services/glance-registry.yaml @@ -41,5 +41,12 @@ outputs: glance::registry::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } glance::registry::debug: {get_param: Debug} glance::registry::workers: {get_param: GlanceWorkers} + glance::db::mysql::user: glance + glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]} + glance::db::mysql::dbname: glance + glance::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + step_config: | include ::tripleo::profile::base::glance::registry diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml index 4a5ec2c0..77af55ef 100644 --- a/puppet/services/heat-engine.yaml +++ b/puppet/services/heat-engine.yaml @@ -54,5 +54,11 @@ outputs: heat::keystone_password: {get_param: HeatPassword} heat::db::mysql::password: {get_param: HeatPassword} heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword} + heat::db::mysql::user: heat + heat::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]} + heat::db::mysql::dbname: heat + heat::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" step_config: | include ::tripleo::profile::base::heat::engine diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml new file mode 100644 index 00000000..01cf5791 --- /dev/null +++ b/puppet/services/horizon.yaml @@ -0,0 +1,34 @@ +heat_template_version: 2016-04-08 + +description: > + Horizon service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + HorizonAllowedHosts: + default: '*' + description: A list of IP/Hostname allowed to connect to horizon + type: comma_delimited_list + NeutronMechanismDrivers: + default: 'openvswitch' + description: | + The mechanism drivers for the Neutron tenant network. + type: comma_delimited_list + +outputs: + role_data: + description: Role data for the Horizon role. + value: + config_settings: + horizon::allowed_hosts: {get_param: HorizonAllowedHosts} + neutron::plugins::ml2::mechanism_drivers: + str_replace: + template: MECHANISMS + params: + MECHANISMS: {get_param: NeutronMechanismDrivers} + step_config: | + include ::tripleo::profile::base::horizon diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml index e1626d5b..5ab03fcb 100644 --- a/puppet/services/ironic-api.yaml +++ b/puppet/services/ironic-api.yaml @@ -38,6 +38,5 @@ outputs: ironic::keystone::auth::internal_url: {get_param: [EndpointMap, IronicInternal, uri]} ironic::keystone::auth::admin_url: {get_param: [EndpointMap, IronicAdmin, uri]} ironic::keystone::auth::password: {get_param: IronicPassword } - step_config: | include ::tripleo::profile::base::ironic::api diff --git a/puppet/services/ironic-base.yaml b/puppet/services/ironic-base.yaml index 0eaa53cb..df82bb6c 100644 --- a/puppet/services/ironic-base.yaml +++ b/puppet/services/ironic-base.yaml @@ -41,7 +41,7 @@ outputs: description: Role data for the Ironic role. value: config_settings: - ironic_dsn: &ironic_dsn + ironic::database_connection: list_join: - '' - - {get_param: [EndpointMap, MysqlInternal, protocol]} @@ -51,14 +51,19 @@ outputs: - {get_param: [EndpointMap, MysqlInternal, host]} - '/ironic' ironic::admin_tenant_name: 'service' - ironic::database_connection: *ironic_dsn ironic::debug: {get_param: Debug} ironic::rabbit_userid: {get_param: RabbitUserName} ironic::rabbit_password: {get_param: RabbitPassword} ironic::rabbit_port: {get_param: RabbitClientPort} ironic::rabbit_use_ssl: {get_param: RabbitClientUseSSL} ironic::db::mysql::password: {get_param: IronicPassword} - ironic::keystone::auth::tenant: 'service' + ironic::db::mysql::user: ironic + ironic::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + ironic::db::mysql::dbname: ironic + ironic::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + ironic::keystone::auth::tenant: 'service' step_config: | include ::tripleo::profile::base::ironic diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml index 3fb3d9fd..26d4e0ed 100644 --- a/puppet/services/ironic-conductor.yaml +++ b/puppet/services/ironic-conductor.yaml @@ -31,6 +31,5 @@ outputs: # Prevent tftp_server from defaulting to my_ip setting, which is # controller VIP, not a real IP. ironic::drivers::pxe::tftp_server: {get_input: ironic_api_network} - step_config: | include ::tripleo::profile::base::ironic::conductor diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml new file mode 100644 index 00000000..b429c5ea --- /dev/null +++ b/puppet/services/kernel.yaml @@ -0,0 +1,18 @@ +heat_template_version: 2016-04-08 + +description: > + Load kernel modules with kmod and configure kernel options with sysctl. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Kernel modules + value: + step_config: | + include ::tripleo::profile::base::kernel diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index 25d92d4a..0ad6025c 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -128,5 +128,12 @@ outputs: keystone::public_workers: {get_param: KeystoneWorkers} keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge} keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]} + keystone::db::mysql::user: keystone + keystone::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + keystone::db::mysql::dbname: keystone + keystone::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + step_config: | include ::tripleo::profile::base::keystone diff --git a/puppet/services/neutron-compute-plugin-midonet.yaml b/puppet/services/neutron-compute-plugin-midonet.yaml new file mode 100644 index 00000000..c3b65c49 --- /dev/null +++ b/puppet/services/neutron-compute-plugin-midonet.yaml @@ -0,0 +1,19 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Compute Midonet plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Neutron Compute Plumgrid plugin + value: + config_settings: + step_config: | + include ::tripleo::profile::base::neutron::agents::midonet diff --git a/puppet/services/neutron-compute-plugin-nuage.yaml b/puppet/services/neutron-compute-plugin-nuage.yaml new file mode 100644 index 00000000..c5fbeeca --- /dev/null +++ b/puppet/services/neutron-compute-plugin-nuage.yaml @@ -0,0 +1,26 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Compute Nuage plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NovaPassword: + description: The password for the nova service account, used by nova-api. + type: string + hidden: true + +outputs: + role_data: + description: Role data for the Neutron Compute Nuage plugin + value: + config_settings: + tripleo::profile::base::neutron::agents::nuage::nova_os_tenant_name: 'service' + tripleo::profile::base::neutron::agents::nuage::nova_os_password: {get_param: NovaPassword} + tripleo::profile::base::neutron::agents::nuage::nova_auth_ip: {get_param: [EndpointMap, KeystoneInternal, host]} + step_config: | + include ::tripleo::profile::base::neutron::agents::nuage diff --git a/puppet/services/neutron-compute-plugin-opencontrail.yaml b/puppet/services/neutron-compute-plugin-opencontrail.yaml new file mode 100644 index 00000000..2c79c56b --- /dev/null +++ b/puppet/services/neutron-compute-plugin-opencontrail.yaml @@ -0,0 +1,19 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Compute OpenContrail plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Neutron Compute OpenContrail plugin + value: + config_settings: + step_config: | + include ::tripleo::profile::base::neutron::opencontrail::vrouter diff --git a/puppet/services/neutron-compute-plugin-plumgrid.yaml b/puppet/services/neutron-compute-plugin-plumgrid.yaml new file mode 100644 index 00000000..b8ec389e --- /dev/null +++ b/puppet/services/neutron-compute-plugin-plumgrid.yaml @@ -0,0 +1,19 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Compute Plumgrid plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Neutron Compute Plumgrid plugin + value: + config_settings: + step_config: | + include tripleo::profile::base::neutron::plumgrid diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml index 80ccf1c2..5d02bc90 100644 --- a/puppet/services/neutron-dhcp.yaml +++ b/puppet/services/neutron-dhcp.yaml @@ -13,22 +13,6 @@ parameters: default: 'False' description: If True, DHCP provide metadata route to VM. type: string - NeutronDnsmasqOptions: - default: 'dhcp-option-force=26,%MTU%' - description: > - Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU - to be set to the value of NeutronTenantMtu, which should be set to account - for tunnel overhead. - type: string - NeutronTenantMtu: - description: > - The default MTU for tenant networks. For VXLAN/GRE tunneling, this should - be at least 50 bytes smaller than the MTU on the physical network. This - value will be used to set the MTU on the virtual Ethernet device. - This value will be used to construct the NeutronDnsmasqOptions, since that - will determine the MTU that is assigned to the VM host through DHCP. - default: "1400" - type: string resources: @@ -42,12 +26,6 @@ outputs: config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - - neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf - tripleo::profile::base::neutron::dhcp: - str_replace: - template: {get_param: NeutronDnsmasqOptions} - params: - '%MTU%': {get_param: NeutronTenantMtu} - neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} + - neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} step_config: | include tripleo::profile::base::neutron::dhcp diff --git a/puppet/services/neutron-midonet.yaml b/puppet/services/neutron-midonet.yaml new file mode 100644 index 00000000..736c01c3 --- /dev/null +++ b/puppet/services/neutron-midonet.yaml @@ -0,0 +1,48 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Midonet plugin and services + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronPassword: + description: The password for the neutron service and db account, used by neutron agents. + type: string + hidden: true + AdminPassword: + description: The password for the keystone admin account, used for monitoring, querying neutron etc. + type: string + hidden: true + AdminToken: + description: The keystone auth secret and db password. + type: string + hidden: true + EnableZookeeperOnController: + label: Enable Zookeeper On Controller + description: 'Whether enable Zookeeper cluster on Controller' + type: boolean + default: false + EnableCassandraOnController: + label: Enable Cassandra On Controller + description: 'Whether enable Cassandra cluster on Controller' + type: boolean + default: false + +outputs: + role_data: + description: Role data for the Neutron Midonet plugin and services + value: + config_settings: + tripleo::profile::base::neutron::midonet::admin_password: {get_param: AdminPassword} + tripleo::profile::base::neutron::midonet::keystone_admin_token: {get_param: AdminToken} + tripleo::profile::base::neutron::midonet::neutron_auth_password: {get_param: NeutronPassword} + tripleo::profile::base::neutron::midonet::zk_on_controller: {get_param: EnableZookeeperOnController} + tripleo::profile::base::neutron::midonet::neutron_auth_tenant: 'service' + enable_cassandra_on_controller: {get_param: EnableCassandraOnController} + neutron::service_plugins: [] + step_config: | + include tripleo::profile::base::neutron::plugins::midonet diff --git a/puppet/services/neutron-server.yaml b/puppet/services/neutron-server.yaml index 6299c39e..d759d420 100644 --- a/puppet/services/neutron-server.yaml +++ b/puppet/services/neutron-server.yaml @@ -42,7 +42,7 @@ outputs: config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - neutron_dsn: &neutron_dsn + neutron::server::database_connection: list_join: - '' - - {get_param: [EndpointMap, MysqlInternal, protocol]} @@ -54,7 +54,6 @@ outputs: neutron::server::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } neutron::server::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} neutron::server::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } - neutron::server::database_connection: *neutron_dsn neutron::server::api_workers: {get_param: NeutronWorkers} neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron::server::l3_ha: {get_param: NeutronL3HA} @@ -66,5 +65,11 @@ outputs: neutron::server::notifications::project_name: 'service' neutron::server::notifications::password: {get_param: NovaPassword} neutron::db::mysql::password: {get_param: NeutronPassword} + neutron::db::mysql::user: neutron + neutron::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host]} + neutron::db::mysql::dbname: ovs_neutron + neutron::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" step_config: | include tripleo::profile::base::neutron::server diff --git a/puppet/services/pacemaker/ceilometer-agent-central.yaml b/puppet/services/pacemaker/ceilometer-agent-central.yaml new file mode 100644 index 00000000..8fb7bd23 --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-agent-central.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Central Agent service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Central Agent pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::agent::central::manage_service: false + ceilometer::agent::central::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::agent::central diff --git a/puppet/services/pacemaker/ceilometer-agent-notification.yaml b/puppet/services/pacemaker/ceilometer-agent-notification.yaml new file mode 100644 index 00000000..54709783 --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-agent-notification.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Notification Agent service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Notification Agent pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::agent::notification::manage_service: false + ceilometer::agent::notification::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::agent::notification diff --git a/puppet/services/pacemaker/ceilometer-api.yaml b/puppet/services/pacemaker/ceilometer-api.yaml new file mode 100644 index 00000000..d45b1578 --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-api.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer API service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer API pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::api::manage_service: false + ceilometer::api::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::api diff --git a/puppet/services/pacemaker/ceilometer-collector.yaml b/puppet/services/pacemaker/ceilometer-collector.yaml new file mode 100644 index 00000000..487a557c --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-collector.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Collector service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Collector pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::collector::manage_service: false + ceilometer::collector::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::collector diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml index 5833c42d..780c295e 100644 --- a/puppet/services/pacemaker/heat-api-cfn.yaml +++ b/puppet/services/pacemaker/heat-api-cfn.yaml @@ -25,7 +25,5 @@ outputs: - get_attr: [HeatApiCfnBase, role_data, config_settings] - heat::api_cfn::manage_service: false heat::api_cfn::enabled: false - step_config: - # No puppet manifests since heat-api-cfn is included in - # ::tripleo::profile::pacemaker::heat which is maintained alongside of - # pacemaker/heat-api.yaml. + step_config: | + include ::tripleo::profile::pacemaker::heat::api_cfn diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml index 8b67702c..2fa82fe7 100644 --- a/puppet/services/pacemaker/heat-api-cloudwatch.yaml +++ b/puppet/services/pacemaker/heat-api-cloudwatch.yaml @@ -25,7 +25,5 @@ outputs: - get_attr: [HeatApiCloudwatchBase, role_data, config_settings] - heat::api_cloudwatch::manage_service: false heat::api_cloudwatch::enabled: false - step_config: - # No puppet manifests since heat-api-cloudwatch is included in - # ::tripleo::profile::pacemaker::heat which is maintained alongside of - # pacemaker/heat-api.yaml. + step_config: | + include ::tripleo::profile::pacemaker::heat::api_cloudwatch diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml index 6628e8dd..be897a55 100644 --- a/puppet/services/pacemaker/heat-api.yaml +++ b/puppet/services/pacemaker/heat-api.yaml @@ -26,4 +26,4 @@ outputs: - heat::api::manage_service: false heat::api::enabled: false step_config: | - include ::tripleo::profile::pacemaker::heat + include ::tripleo::profile::pacemaker::heat::api diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml index e1195780..a8ed5c0c 100644 --- a/puppet/services/pacemaker/heat-engine.yaml +++ b/puppet/services/pacemaker/heat-engine.yaml @@ -26,7 +26,5 @@ outputs: - get_attr: [HeatEngineBase, role_data, config_settings] - heat::engine::manage_service: false heat::engine::enabled: false - step_config: - # No puppet manifests since heat-engine is included in - # ::tripleo::profile::pacemaker::heat which is maintained alongside of - # pacemaker/heat-api.yaml. + step_config: | + include ::tripleo::profile::pacemaker::heat::engine diff --git a/puppet/services/pacemaker/neutron-midonet.yaml b/puppet/services/pacemaker/neutron-midonet.yaml new file mode 100644 index 00000000..f9fd992c --- /dev/null +++ b/puppet/services/pacemaker/neutron-midonet.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Midonet with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronMidonetBase: + type: ../neutron-midonet.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron Midonet plugin. + value: + config_settings: + map_merge: + - get_attr: [NeutronMidonetBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::pacemaker::neutron::plugins::midonet diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml index 17ef49fa..f0411a35 100644 --- a/puppet/services/sahara-engine.yaml +++ b/puppet/services/sahara-engine.yaml @@ -37,5 +37,11 @@ outputs: - '/sahara' sahara::database_connection: *sahara_dsn sahara::db::mysql::password: {get_param: SaharaPassword} + sahara::db::mysql::user: sahara + sahara::db::mysql::host: {get_param: [EndpointMap, MysqlVirtual, host]} + sahara::db::mysql::dbname: sahara + sahara::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" step_config: | include ::tripleo::profile::base::sahara::engine diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml index a86aeaf5..930b9e3d 100644 --- a/puppet/services/swift-proxy.yaml +++ b/puppet/services/swift-proxy.yaml @@ -17,6 +17,10 @@ parameters: description: The password for the swift service account, used by the swift proxy services. type: string hidden: true + SwiftProxyNodeTimeout: + default: 60 + description: Timeout for requests going from swift-proxy to swift a/c/o services. + type: number SwiftWorkers: default: 0 description: Number of workers for Swift service. @@ -36,6 +40,7 @@ outputs: swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} swift::proxy::authtoken::admin_password: {get_param: SwiftPassword} + swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout} swift::proxy::workers: {get_param: SwiftWorkers} swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]} swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]} diff --git a/puppet/services/time/ntp.yaml b/puppet/services/time/ntp.yaml index dbef6f91..930dca41 100644 --- a/puppet/services/time/ntp.yaml +++ b/puppet/services/time/ntp.yaml @@ -12,7 +12,7 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - NtpServers: + NtpServer: default: [] description: NTP servers type: comma_delimited_list @@ -22,6 +22,6 @@ outputs: description: Role ntp using composable services. value: config_settings: - ntp::ntpservers: {get_param: NtpServers} + ntp::ntpservers: {get_param: NtpServer} step_config: | - include ::ntp
\ No newline at end of file + include ::ntp diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 6257fbe6..9d049bd3 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -112,11 +112,18 @@ parameters: ServiceConfigSettings: type: json default: {} + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: SwiftStorage: type: OS::Nova::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} flavor: {get_param: Flavor} |