diff options
Diffstat (limited to 'puppet')
29 files changed, 729 insertions, 488 deletions
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index 90eb1b09..b065ddd2 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -67,6 +67,12 @@ parameters: description: > Setting to a previously unused value during stack-update will trigger package update on all nodes + StackAction: + type: string + description: > + Heat action on performed top-level stack. + constraints: + - allowed_values: ['CREATE', 'UPDATE'] resources: @@ -303,6 +309,7 @@ resources: deploy_identifier: {get_param: DeployIdentifier} update_identifier: {get_param: UpdateIdentifier} + stack_action: {get_param: StackAction} outputs: config_id: diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index f0eb71e4..d2b90c59 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -195,28 +195,23 @@ resources: properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} - ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} - - NetIpSubnetMap: - type: OS::TripleO::Network::Ports::NetIpSubnetMap - properties: - ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} - ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} - InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} - StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} - StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} - TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -238,8 +233,8 @@ resources: timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} - ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} - ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} + ceph_public_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} CephStorageConfig: type: OS::Heat::StructuredConfig @@ -256,10 +251,16 @@ resources: - ceph - '"%{::osfamily}"' - common + - network merge_behavior: deeper datafiles: common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} ceph_extraconfig: mapped_data: {get_param: CephStorageExtraConfig} extraconfig: diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index c1a04e24..13914878 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -246,16 +246,22 @@ resources: properties: ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: @@ -316,10 +322,16 @@ resources: - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - network merge_behavior: deeper datafiles: common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} volume_extraconfig: mapped_data: {get_param: BlockStorageExtraConfig} extraconfig: diff --git a/puppet/compute.yaml b/puppet/compute.yaml index 4c18067a..e56deefd 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -430,16 +430,22 @@ resources: properties: ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkConfig: @@ -481,6 +487,7 @@ resources: - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - network - neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre - nova_nuage_data # Optionally provided by ComputeExtraConfigPre @@ -494,6 +501,11 @@ resources: mapped_data: {get_param: ExtraConfig} common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} ceph: raw_data: {get_file: hieradata/ceph.yaml} compute: diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index 80b08a06..36f9b4f8 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -55,7 +55,6 @@ resources: input_values: step: 1 update_identifier: {get_param: NodeConfigIdentifiers} - actions: ['CREATE'] # no need for two passes on an UPDATE ControllerServicesBaseDeployment_Step2: type: OS::Heat::StructuredDeployments @@ -67,7 +66,6 @@ resources: input_values: step: 2 update_identifier: {get_param: NodeConfigIdentifiers} - actions: ['CREATE'] # no need for two passes on an UPDATE ControllerOvercloudServicesDeployment_Step3: type: OS::Heat::StructuredDeployments @@ -102,31 +100,9 @@ resources: step: 5 update_identifier: {get_param: NodeConfigIdentifiers} - ControllerOvercloudServicesDeployment_Step6: - type: OS::Heat::StructuredDeployments - depends_on: ControllerOvercloudServicesDeployment_Step5 - properties: - name: ControllerOvercloudServicesDeployment_Step6 - servers: {get_param: servers} - config: {get_resource: ControllerPuppetConfig} - input_values: - step: 6 - update_identifier: {get_param: NodeConfigIdentifiers} - - ControllerOvercloudServicesDeployment_Step7: - type: OS::Heat::StructuredDeployments - depends_on: ControllerOvercloudServicesDeployment_Step6 - properties: - name: ControllerOvercloudServicesDeployment_Step7 - servers: {get_param: servers} - config: {get_resource: ControllerPuppetConfig} - input_values: - step: 7 - update_identifier: {get_param: NodeConfigIdentifiers} - ControllerPostPuppet: type: OS::TripleO::Tasks::ControllerPostPuppet - depends_on: ControllerOvercloudServicesDeployment_Step7 + depends_on: ControllerOvercloudServicesDeployment_Step5 properties: servers: {get_param: servers} input_values: diff --git a/puppet/controller.yaml b/puppet/controller.yaml index bf196d24..f3724764 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -310,14 +310,13 @@ parameters: description: Configures MySQL max_connections config setting type: number default: 4096 + MysqlClustercheckPassword: + type: string + hidden: true MysqlRootPassword: type: string hidden: true default: '' # Has to be here because of the ignored empty value bug - NeutronExternalNetworkBridge: - description: Name of bridge used for external network traffic. - type: string - default: 'br-ex' NeutronBridgeMappings: description: > The OVS logical->physical bridge mappings to use. See the Neutron @@ -328,22 +327,6 @@ parameters: scripts or be sure to keep 'datacentre' as a mapping network name. type: comma_delimited_list default: "datacentre:br-ex" - NeutronDnsmasqOptions: - default: 'dhcp-option-force=26,1400' - description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the gre tunnel overhead. - type: string - NeutronEnableDHCPAgent: - description: Knob to enable/disable DHCP Agent - type: boolean - default: true - NeutronEnableL3Agent: - description: Knob to enable/disable L3 agent - type: boolean - default: true - NeutronEnableMetadataAgent: - description: Knob to enable/disable Metadata agent - type: boolean - default: true NeutronEnableOVSAgent: description: Knob to enable/disable OVS Agent type: boolean @@ -356,10 +339,6 @@ parameters: default: 'False' description: Whether to enable l3-agent HA type: string - NeutronDhcpAgentsPerNetwork: - type: number - default: 3 - description: The number of neutron dhcp agents to schedule per network NeutronDVR: default: 'False' description: Whether to configure Neutron Distributed Virtual Routers @@ -394,10 +373,6 @@ parameters: default: 'True' description: Allow automatic l3-agent failover type: string - NeutronEnableIsolatedMetadata: - default: 'False' - description: If True, DHCP provide metadata route to VM. - type: string NeutronEnableTunnelling: type: string default: "True" @@ -562,14 +537,6 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number - RabbitFDLimit: - default: 16384 - description: Configures RabbitMQ FD limit - type: string - RabbitIPv6: - default: false - description: Enable IPv6 in RabbitMQ - type: boolean RedisPassword: type: string description: The password to access the Redis service @@ -808,28 +775,23 @@ resources: properties: ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} - ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} - - NetIpSubnetMap: - type: OS::TripleO::Network::Ports::NetIpSubnetMap - properties: - ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} - ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} - InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} - StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} - StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} - TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkConfig: type: OS::TripleO::Controller::Net::SoftwareConfig @@ -886,7 +848,6 @@ resources: swift_workers: {get_param: SwiftWorkers} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} - neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} haproxy_log_address: {get_param: HAProxySyslogAddress} haproxy_stats_password: {get_param: HAProxyStatsPassword} haproxy_stats_user: {get_param: HAProxyStatsUser} @@ -908,6 +869,9 @@ resources: - - 'http://' - {get_param: HeatApiVirtualIPUri} - ':8000/v1/waitcondition' + heat_public_url: {get_param: [EndpointMap, HeatPublic, uri]} + heat_internal_url: {get_param: [EndpointMap, HeatInternal, uri]} + heat_admin_url: {get_param: [EndpointMap, HeatAdmin, uri]} heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey} heat_enable_db_purge: {get_param: HeatEnableDBPurge} horizon_allowed_hosts: {get_param: HorizonAllowedHosts} @@ -937,6 +901,12 @@ resources: - '@' - {get_param: MysqlVirtualIPUri} - '/cinder' + cinder_public_url: {get_param: [EndpointMap, CinderPublic, uri]} + cinder_internal_url: {get_param: [EndpointMap, CinderInternal, uri]} + cinder_admin_url: {get_param: [EndpointMap, CinderAdmin, uri]} + cinder_public_url_v2: {get_param: [EndpointMap, CinderV2Public, uri]} + cinder_internal_url_v2: {get_param: [EndpointMap, CinderV2Internal, uri]} + cinder_admin_url_v2: {get_param: [EndpointMap, CinderV2Admin, uri]} heat_password: {get_param: HeatPassword} heat_stack_domain_admin_password: {get_param: HeatStackDomainAdminPassword} heat_dsn: @@ -960,6 +930,7 @@ resources: mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize} mysql_max_connections: {get_param: MysqlMaxConnections} mysql_root_password: {get_param: MysqlRootPassword} + mysql_clustercheck_password: {get_param: MysqlClustercheckPassword} mysql_cluster_name: str_replace: template: tripleo-CLUSTER @@ -984,9 +955,6 @@ resources: template: DRIVERS params: DRIVERS: {get_param: NeutronTypeDrivers} - neutron_enable_dhcp_agent: {get_param: NeutronEnableDHCPAgent} - neutron_enable_l3_agent: {get_param: NeutronEnableL3Agent} - neutron_enable_metadata_agent: {get_param: NeutronEnableMetadataAgent} neutron_enable_ovs_agent: {get_param: NeutronEnableOVSAgent} neutron_mechanism_drivers: str_replace: @@ -995,7 +963,6 @@ resources: MECHANISMS: {get_param: NeutronMechanismDrivers} neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron_l3_ha: {get_param: NeutronL3HA} - neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} neutron_network_vlan_ranges: str_replace: template: RANGES @@ -1006,7 +973,6 @@ resources: template: MAPPINGS params: MAPPINGS: {get_param: NeutronBridgeMappings} - neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge} neutron_public_interface: {get_param: NeutronPublicInterface} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute} @@ -1043,7 +1009,6 @@ resources: AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_password: {get_param: NeutronPassword} neutron_tenant_mtu: {get_param: NeutronTenantMtu} - neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} neutron_dsn: list_join: - '' @@ -1062,6 +1027,9 @@ resources: ceilometer_password: {get_param: CeilometerPassword} ceilometer_store_events: {get_param: CeilometerStoreEvents} aodh_password: {get_param: AodhPassword} + aodh_internal_url: { get_param: [ EndpointMap, AodhInternal, uri ] } + aodh_public_url: { get_param: [ EndpointMap, AodhPublic, uri ] } + aodh_admin_url: { get_param: [ EndpointMap, AodhAdmin, uri ] } ceilometer_meter_dispatcher: {get_param: CeilometerMeterDispatcher} gnocchi_password: {get_param: GnocchiPassword} gnocchi_backend: {get_param: GnocchiBackend} @@ -1091,6 +1059,11 @@ resources: - {get_param: MysqlVirtualIPUri} - '/gnocchi' gnocchi_internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]} + gnocchi_public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] } + gnocchi_admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] } + ceilometer_public_url: {get_param: [EndpointMap, CeilometerPublic, uri]} + ceilometer_internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]} + ceilometer_admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]} snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} nova_enable_db_purge: {get_param: NovaEnableDBPurge} @@ -1116,6 +1089,9 @@ resources: - '/nova_api' upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} instance_name_template: {get_param: InstanceNameTemplate} + nova_public_url: {get_param: [EndpointMap, NovaPublic, uri]} + nova_internal_url: {get_param: [EndpointMap, NovaInternal, uri]} + nova_admin_url: {get_param: [EndpointMap, NovaAdmin, uri]} fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} rabbit_username: {get_param: RabbitUserName} @@ -1123,8 +1099,6 @@ resources: rabbit_cookie: {get_param: RabbitCookie} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - rabbit_ipv6: {get_param: RabbitIPv6} - rabbit_fd_limit: {get_param: RabbitFDLimit} mongodb_no_journal: {get_param: MongoDbNoJournal} mongodb_ipv6: {get_param: MongoDbIPv6} ntp_servers: {get_param: NtpServer} @@ -1138,9 +1112,18 @@ resources: swift_replicas: {get_param: SwiftReplicas} swift_min_part_hours: {get_param: SwiftMinPartHours} swift_mount_check: {get_param: SwiftMountCheck} + swift_public_url: {get_param: [EndpointMap, SwiftPublic, uri]} + swift_internal_url: {get_param: [EndpointMap, SwiftInternal, uri]} + swift_admin_url: {get_param: [EndpointMap, SwiftAdmin, uri]} + swift_public_url_s3: {get_param: [EndpointMap, SwiftS3Public, uri]} + swift_internal_url_s3: {get_param: [EndpointMap, SwiftS3Internal, uri]} + swift_admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} sahara_password: {get_param: SaharaPassword} + sahara_public_url: {get_param: [EndpointMap, SaharaPublic, uri]} + sahara_internal_url: {get_param: [EndpointMap, SaharaInternal, uri]} + sahara_admin_url: {get_param: [EndpointMap, SaharaAdmin, uri]} sahara_dsn: list_join: - '' @@ -1177,7 +1160,7 @@ resources: str_replace: template: "['SUBNET']" params: - SUBNET: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, HorizonNetwork]}]} + SUBNET: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, HorizonNetwork]}]} rabbitmq_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]} redis_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]} redis_password: {get_param: RedisPassword} @@ -1186,8 +1169,8 @@ resources: memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} mysql_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} mysql_virtual_ip: {get_param: MysqlVirtualIP} - ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} - ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} + ceph_public_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} ceph_public_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} # Map heat metadata into hiera datafiles @@ -1214,6 +1197,7 @@ resources: - vip_data # provided by vip-config - '"%{::osfamily}"' - common + - network - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre @@ -1237,6 +1221,11 @@ resources: mapped_data: {get_param: ExtraConfig} common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} ceph: raw_data: {get_file: hieradata/ceph.yaml} mapped_data: @@ -1272,6 +1261,14 @@ resources: tripleo::ringbuilder::replicas: {get_input: swift_replicas} tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours} swift_mount_check: {get_input: swift_mount_check} + swift::keystone::auth::public_url: {get_input: swift_public_url } + swift::keystone::auth::internal_url: {get_input: swift_internal_url } + swift::keystone::auth::admin_url: {get_input: swift_admin_url } + swift::keystone::auth::public_url_s3: {get_input: swift_public_url_v3 } + swift::keystone::auth::internal_url_s3: {get_input: swift_internal_url_v3 } + swift::keystone::auth::admin_url_s3: {get_input: swift_admin_url_v3 } + swift::keystone::auth::password: {get_input: swift_password } + swift::keystone::auth::region: {get_input: keystone_region} # Cinder cinder_enable_db_purge: {get_input: cinder_enable_db_purge} @@ -1296,10 +1293,20 @@ resources: cinder::glance::glance_api_servers: {get_input: glance_api_servers} cinder_backend_config: {get_input: CinderBackendConfig} cinder::db::mysql::password: {get_input: cinder_password} + cinder::keystone::auth::public_url: {get_input: cinder_public_url } + cinder::keystone::auth::internal_url: {get_input: cinder_internal_url } + cinder::keystone::auth::admin_url: {get_input: cinder_admin_url } + cinder::keystone::auth::public_url_v2: {get_input: cinder_public_url_v2 } + cinder::keystone::auth::internal_url_v2: {get_input: cinder_internal_url_v2 } + cinder::keystone::auth::admin_url_v2: {get_input: cinder_admin_url_v2 } + cinder::keystone::auth::password: {get_input: cinder_password } + cinder::keystone::auth::region: {get_input: keystone_region} # Glance glance::api::bind_host: {get_input: glance_api_network} glance::registry::bind_host: {get_input: glance_registry_network} + glance::keystone::auth::region: {get_input: keystone_region} + # Heat heat_stack_domain_admin_password: {get_input: heat_stack_domain_admin_password} heat::engine::heat_watch_server_url: {get_input: heat.watch_server_url} @@ -1326,6 +1333,12 @@ resources: heat::db::mysql::password: {get_input: heat_password} heat_enable_db_purge: {get_input: heat_enable_db_purge} heat::keystone::domain::domain_password: {get_input: heat_stack_domain_admin_password} + heat::keystone::auth::public_url: {get_input: heat_public_url } + heat::keystone::auth::internal_url: {get_input: heat_internal_url } + heat::keystone::auth::admin_url: {get_input: heat_admin_url } + heat::keystone::auth::password: {get_input: heat_password } + heat::keystone::auth::region: {get_input: keystone_region} + # Keystone keystone::admin_bind_host: {get_input: keystone_admin_api_network} keystone::public_bind_host: {get_input: keystone_public_api_network} @@ -1343,45 +1356,33 @@ resources: mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size} mysql_max_connections: {get_input: mysql_max_connections} mysql::server::root_password: {get_input: mysql_root_password} + mysql_clustercheck_password: {get_input: mysql_clustercheck_password} mysql_cluster_name: {get_input: mysql_cluster_name} mysql_bind_host: {get_input: mysql_network} mysql_virtual_ip: {get_input: mysql_virtual_ip} # Neutron neutron::bind_host: {get_input: neutron_api_network} - neutron::rabbit_password: {get_input: rabbit_password} - neutron::rabbit_user: {get_input: rabbit_username} - neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - neutron::rabbit_port: {get_input: rabbit_client_port} - neutron::debug: {get_input: debug} neutron::server::auth_uri: {get_input: keystone_auth_uri} neutron::server::identity_uri: {get_input: keystone_identity_uri} neutron::server::database_connection: {get_input: neutron_dsn} neutron::server::api_workers: {get_input: neutron_workers} - neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge} neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} - neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} - neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret} neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network} - neutron::agents::metadata::metadata_workers: {get_input: neutron_workers} neutron_agent_mode: {get_input: neutron_agent_mode} neutron_router_distributed: {get_input: neutron_router_distributed} neutron::core_plugin: {get_input: neutron_core_plugin} neutron::service_plugins: {get_input: neutron_service_plugins} - neutron::enable_dhcp_agent: {get_input: neutron_enable_dhcp_agent} - neutron::enable_l3_agent: {get_input: neutron_enable_l3_agent} - neutron::enable_metadata_agent: {get_input: neutron_enable_metadata_agent} neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent} neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron::plugins::ml2::extension_drivers: {get_input: neutron_plugin_extensions} neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} neutron::server::l3_ha: {get_input: neutron_l3_ha} - neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} @@ -1394,10 +1395,7 @@ resources: neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} neutron::server::auth_password: {get_input: neutron_password} - neutron::agents::metadata::auth_password: {get_input: neutron_password} - neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options} neutron_dsn: {get_input: neutron_dsn} - neutron::agents::metadata::auth_url: {get_input: keystone_identity_uri} neutron::db::mysql::password: {get_input: neutron_password} neutron::keystone::auth::public_url: {get_input: neutron_public_url } neutron::keystone::auth::internal_url: {get_input: neutron_internal_url } @@ -1433,6 +1431,11 @@ resources: ceilometer::dispatcher::gnocchi::filter_project: 'service' ceilometer::dispatcher::gnocchi::archive_policy: 'low' ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml' + ceilometer::keystone::auth::public_url: {get_input: ceilometer_public_url } + ceilometer::keystone::auth::internal_url: {get_input: ceilometer_internal_url } + ceilometer::keystone::auth::admin_url: {get_input: ceilometer_admin_url } + ceilometer::keystone::auth::password: {get_input: ceilometer_password } + ceilometer::keystone::auth::region: {get_input: keystone_region} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} @@ -1453,6 +1456,11 @@ resources: aodh::db::mysql::password: {get_input: aodh_password} # for a migration path from ceilometer-alarm to aodh, we use the same database & coordination aodh::evaluator::coordination_url: {get_input: ceilometer_coordination_url} + aodh::keystone::auth::public_url: {get_input: aodh_public_url } + aodh::keystone::auth::internal_url: {get_input: aodh_internal_url } + aodh::keystone::auth::admin_url: {get_input: aodh_admin_url } + aodh::keystone::auth::password: {get_input: aodh_password } + aodh::keystone::auth::region: {get_input: keystone_region} # Gnocchi gnocchi_backend: {get_input: gnocchi_backend} @@ -1469,6 +1477,11 @@ resources: gnocchi::db::mysql::password: {get_input: gnocchi_password} gnocchi::storage::swift::swift_authurl: {get_input: keystone_auth_uri} gnocchi::storage::swift::swift_key: {get_input: gnocchi_password} + gnocchi::keystone::auth::public_url: {get_input: gnocchi_public_url } + gnocchi::keystone::auth::internal_url: {get_input: gnocchi_internal_url } + gnocchi::keystone::auth::admin_url: {get_input: gnocchi_admin_url } + gnocchi::keystone::auth::password: {get_input: gnocchi_password } + gnocchi::keystone::auth::region: {get_input: keystone_region} # Nova nova::rabbit_userid: {get_input: rabbit_username} @@ -1484,7 +1497,6 @@ resources: nova::api::metadata_listen: {get_input: nova_metadata_network} nova::api::admin_password: {get_input: nova_password} nova::api::osapi_compute_workers: {get_input: nova_workers} - nova::api::ec2_workers: {get_input: nova_workers} nova::api::metadata_workers: {get_input: nova_workers} nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::database_connection: {get_input: nova_dsn} @@ -1499,6 +1511,11 @@ resources: nova::db::mysql::password: {get_input: nova_password} nova::db::mysql_api::password: {get_input: nova_password} nova_enable_db_purge: {get_input: nova_enable_db_purge} + nova::keystone::auth::public_url: {get_input: nova_public_url} + nova::keystone::auth::internal_url: {get_input: nova_internal_url} + nova::keystone::auth::admin_url: {get_input: nova_admin_url} + nova::keystone::auth::password: {get_input: nova_password } + nova::keystone::auth::region: {get_input: keystone_region} # Horizon apache::mod::remoteip::proxy_ips: {get_input: horizon_subnet} @@ -1531,14 +1548,14 @@ resources: sahara::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} sahara::rabbit_port: {get_input: rabbit_client_port} sahara::db::mysql::password: {get_input: sahara_password} - - # Rabbit + sahara::keystone::auth::public_url: {get_input: sahara_public_url } + sahara::keystone::auth::internal_url: {get_input: sahara_internal_url } + sahara::keystone::auth::admin_url: {get_input: sahara_admin_url } + sahara::keystone::auth::password: {get_input: sahara_password } + sahara::keystone::auth::region: {get_input: keystone_region} + # RabbitMQ rabbitmq::node_ip_address: {get_input: rabbitmq_network} rabbitmq::erlang_cookie: {get_input: rabbit_cookie} - rabbitmq::file_limit: {get_input: rabbit_fd_limit} - rabbitmq::default_user: {get_input: rabbit_username} - rabbitmq::default_pass: {get_input: rabbit_password} - rabbit_ipv6: {get_input: rabbit_ipv6} # Redis redis::bind: {get_input: redis_network} redis::requirepass: {get_input: redis_password} @@ -1619,13 +1636,6 @@ outputs: hostname: description: Hostname of the server value: {get_attr: [Controller, name]} - corosync_node: - description: > - Node object in the format {ip: ..., name: ...} format that the corosync - element expects - value: - ip: {get_attr: [Controller, networks, ctlplane, 0]} - name: {get_attr: [Controller, name]} hosts_entry: description: > Server's IP address and hostname in the /etc/hosts format diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index 865210c9..1e888f39 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -11,6 +11,8 @@ nova::compute::libvirt::migration_support: true nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" +nova::network::neutron::neutron_auth_type: 'v3password' + # Changing the default from 512MB. The current templates can not deploy # overclouds with swap. On an idle compute node, we see ~1024MB of RAM # used. 2048 is suggested to account for other possible operations for diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 9316cf17..7a446b50 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -50,13 +50,22 @@ glance::registry::keystone_tenant: 'service' neutron::server::auth_tenant: 'service' neutron::agents::metadata::auth_tenant: 'service' neutron::agents::l3::router_delete_namespaces: True -neutron::agents::dhcp::dhcp_delete_namespaces: True cinder::api::keystone_tenant: 'service' swift::proxy::authtoken::admin_tenant_name: 'service' ceilometer::api::keystone_tenant: 'service' gnocchi::api::keystone_tenant: 'service' heat::keystone_tenant: 'service' sahara::admin_tenant_name: 'service' +aodh::keystone::auth::tenant: 'service' +ceilometer::keystone::auth::tenant: 'service' +cinder::keystone::auth::tenant: 'service' +glance::keystone::auth::tenant: 'service' +gnocchi::keystone::auth::tenant: 'service' +heat::keystone::auth::tenant: 'service' +neutron::keystone::auth::tenant: 'service' +nova::keystone::auth::tenant: 'service' +sahara::keystone::auth::tenant: 'service' +swift::keystone::auth::tenant: 'service' # keystone keystone::cron::token_flush::maxdelay: 3600 @@ -86,6 +95,10 @@ swift::proxy::pipeline: - 'proxy-server' swift::proxy::account_autocreate: true +swift::keystone::auth::configure_s3_endpoint: false +swift::keystone::auth::operator_roles: + - admin + - swiftoperator # glance glance::api::pipeline: 'keystone' @@ -96,7 +109,6 @@ glance_file_pcmk_directory: '/var/lib/glance/images' # neutron neutron::server::sync_db: true -neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf # nova nova::notify_on_state_change: 'vm_and_task_state' @@ -153,7 +165,6 @@ tripleo::loadbalancer::neutron: true tripleo::loadbalancer::cinder: true tripleo::loadbalancer::glance_api: true tripleo::loadbalancer::glance_registry: true -tripleo::loadbalancer::nova_ec2: true tripleo::loadbalancer::nova_osapi: true tripleo::loadbalancer::nova_metadata: true tripleo::loadbalancer::nova_novncproxy: true diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 910617fa..9a2249d7 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -24,15 +24,6 @@ if hiera('step') >= 1 { create_resources(sysctl::value, hiera('sysctl_settings'), {}) Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - $controller_node_ips = split(hiera('controller_node_ips'), ',') - - if $enable_load_balancer { - class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - manage_vip => true, - } - } - } if hiera('step') >= 2 { @@ -113,8 +104,6 @@ if hiera('step') >= 2 { # FIXME: this should only occur on the bootstrap host (ditto for db syncs) # Create all the database schemas - include ::keystone::db::mysql - include ::glance::db::mysql include ::nova::db::mysql include ::nova::db::mysql_api include ::neutron::db::mysql @@ -129,36 +118,6 @@ if hiera('step') >= 2 { include ::aodh::db::mysql } - $rabbit_nodes = hiera('rabbit_node_ips') - if count($rabbit_nodes) > 1 { - - $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false)) - if $rabbit_ipv6 { - $rabbit_env = merge(hiera('rabbitmq_environment'), { - 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"' - }) - } else { - $rabbit_env = hiera('rabbitmq_environment') - } - - class { '::rabbitmq': - config_cluster => true, - cluster_nodes => $rabbit_nodes, - tcp_keepalive => false, - config_kernel_variables => hiera('rabbitmq_kernel_variables'), - config_variables => hiera('rabbitmq_config_variables'), - environment_variables => $rabbit_env, - } - rabbitmq_policy { 'ha-all@/': - pattern => '^(?!amq\.).*', - definition => { - 'ha-mode' => 'all', - }, - } - } else { - include ::rabbitmq - } - # pre-install swift here so we can build rings include ::swift @@ -307,17 +266,6 @@ if hiera('step') >= 4 { metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'), } } else { - include ::neutron::agents::l3 - include ::neutron::agents::dhcp - include ::neutron::agents::metadata - - file { '/etc/neutron/dnsmasq-neutron.conf': - content => hiera('neutron_dnsmasq_options'), - owner => 'neutron', - group => 'neutron', - notify => Service['neutron-dhcp-service'], - require => Package['neutron'], - } # If the value of core plugin is set to 'midonet', # skip all the ML2 configuration @@ -360,17 +308,9 @@ if hiera('step') >= 4 { include ::neutron::plugins::ml2::bigswitch::restproxy include ::neutron::agents::bigswitch } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } Service['neutron-server'] -> Service['neutron-ovs-agent-service'] } - Service['neutron-server'] -> Service['neutron-dhcp-service'] - Service['neutron-server'] -> Service['neutron-l3'] Service['neutron-server'] -> Service['neutron-metadata'] } @@ -512,7 +452,6 @@ if hiera('step') >= 4 { } # swift proxy - include ::memcached include ::swift::proxy include ::swift::proxy::proxy_logging include ::swift::proxy::healthcheck diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 0372a56b..95021a74 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -23,10 +23,8 @@ Service <| tag == 'aodh-service' or tag == 'cinder-service' or tag == 'ceilometer-service' or - tag == 'glance-service' or tag == 'gnocchi-service' or tag == 'heat-service' or - tag == 'keystone-service' or tag == 'neutron-service' or tag == 'nova-service' or tag == 'sahara-service' @@ -48,7 +46,7 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) { $sync_db = false } -$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 6 +$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5 $enable_load_balancer = hiera('enable_load_balancer', true) # When to start and enable services which haven't been Pacemakerized @@ -68,18 +66,6 @@ if hiera('step') >= 1 { include ::ntp } - $controller_node_ips = split(hiera('controller_node_ips'), ',') - $controller_node_names = split(downcase(hiera('controller_node_names')), ',') - if $enable_load_balancer { - class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - controller_hosts_names => $controller_node_names, - manage_vip => false, - mysql_clustercheck => true, - haproxy_service_manage => false, - } - } - $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false)) if $corosync_ipv6 { @@ -101,6 +87,10 @@ if hiera('step') >= 1 { if $enable_fencing { include ::tripleo::fencing + # enable stonith after all Pacemaker resources have been created + Pcmk_resource<||> -> Class['tripleo::fencing'] + Pcmk_constraint<||> -> Class['tripleo::fencing'] + Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing'] # enable stonith after all fencing devices have been created Class['tripleo::fencing'] -> Class['pacemaker::stonith'] } @@ -112,35 +102,6 @@ if hiera('step') >= 1 { op_params => 'start timeout=200s stop timeout=200s', } - # Only configure RabbitMQ in this step, don't start it yet to - # avoid races where non-master nodes attempt to start without - # config (eg. binding on 0.0.0.0) - # The module ignores erlang_cookie if cluster_config is false - $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false)) - if $rabbit_ipv6 { - $rabbit_env = merge(hiera('rabbitmq_environment'), { - 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"' - }) - } else { - $rabbit_env = hiera('rabbitmq_environment') - } - - class { '::rabbitmq': - service_manage => false, - tcp_keepalive => false, - config_kernel_variables => hiera('rabbitmq_kernel_variables'), - config_variables => hiera('rabbitmq_config_variables'), - environment_variables => $rabbit_env, - } -> - file { '/var/lib/rabbitmq/.erlang.cookie': - ensure => file, - owner => 'rabbitmq', - group => 'rabbitmq', - mode => '0400', - content => hiera('rabbitmq::erlang_cookie'), - replace => true, - } - if downcase(hiera('ceilometer_backend')) == 'mongodb' { include ::mongodb::globals include ::mongodb::client @@ -149,11 +110,6 @@ if hiera('step') >= 1 { } } - # Memcached - class {'::memcached' : - service_manage => false, - } - # Redis class { '::redis' : service_manage => false, @@ -237,77 +193,12 @@ if hiera('step') >= 2 { if $pacemaker_master { - if $enable_load_balancer { - - include ::pacemaker::resource_defaults - - # Create an openstack-core dummy resource. See RHBZ 1290121 - pacemaker::resource::ocf { 'openstack-core': - ocf_agent_name => 'heartbeat:Dummy', - clone_params => true, - } - # FIXME: we should not have to access tripleo::loadbalancer class - # parameters here to configure pacemaker VIPs. The configuration - # of pacemaker VIPs could move into puppet-tripleo or we should - # make use of less specific hiera parameters here for the settings. - pacemaker::resource::service { 'haproxy': - clone_params => true, - } - - $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_control_vip': - vip_name => 'control', - ip_address => $control_vip, - } - - $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_public_vip': - ensure => $public_vip and $public_vip != $control_vip, - vip_name => 'public', - ip_address => $public_vip, - } - - $redis_vip = hiera('redis_vip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_redis_vip': - ensure => $redis_vip and $redis_vip != $control_vip, - vip_name => 'redis', - ip_address => $redis_vip, - } - - - $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_internal_api_vip': - ensure => $internal_api_vip and $internal_api_vip != $control_vip, - vip_name => 'internal_api', - ip_address => $internal_api_vip, - } - - $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_vip': - ensure => $storage_vip and $storage_vip != $control_vip, - vip_name => 'storage', - ip_address => $storage_vip, - } - - $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_mgmt_vip': - ensure => $storage_mgmt_vip and $storage_mgmt_vip != $control_vip, - vip_name => 'storage_mgmt', - ip_address => $storage_mgmt_vip, - } - } - - pacemaker::resource::service { $::memcached::params::service_name : - clone_params => 'interleave=true', - require => Class['::memcached'], - } + include ::pacemaker::resource_defaults - pacemaker::resource::ocf { 'rabbitmq': - ocf_agent_name => 'heartbeat:rabbitmq-cluster', - resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'', - clone_params => 'ordered=true interleave=true', - meta_params => 'notify=true', - require => Class['::rabbitmq'], + # Create an openstack-core dummy resource. See RHBZ 1290121 + pacemaker::resource::ocf { 'openstack-core': + ocf_agent_name => 'heartbeat:Dummy', + clone_params => true, } if downcase(hiera('ceilometer_backend')) == 'mongodb' { @@ -347,6 +238,16 @@ if hiera('step') >= 2 { } } + $mysql_root_password = hiera('mysql::server::root_password') + $mysql_clustercheck_password = hiera('mysql_clustercheck_password') + # This step is to create a sysconfig clustercheck file with the root user and empty password + # on the first install only (because later on the clustercheck db user will be used) + # We are using exec and not file in order to not have duplicate definition errors in puppet + # when we later set the the file to contain the clustercheck data + exec { 'create-root-sysconfig-clustercheck': + command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck", + unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck', + } exec { 'galera-ready' : command => '/usr/bin/clustercheck >/dev/null', @@ -354,14 +255,7 @@ if hiera('step') >= 2 { tries => 180, try_sleep => 10, environment => ['AVAILABLE_WHEN_READONLY=0'], - require => File['/etc/sysconfig/clustercheck'], - } - - file { '/etc/sysconfig/clustercheck' : - ensure => file, - content => "MYSQL_USERNAME=root\n -MYSQL_PASSWORD=''\n -MYSQL_HOST=localhost\n", + require => Exec['create-root-sysconfig-clustercheck'], } xinetd::service { 'galera-monitor' : @@ -374,17 +268,28 @@ MYSQL_HOST=localhost\n", service_type => 'UNLISTED', user => 'root', group => 'root', - require => File['/etc/sysconfig/clustercheck'], + require => Exec['create-root-sysconfig-clustercheck'], + } + # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck + # to it in a later step. We do this only on one node as it will replicate on + # the other members. We also make sure that the permissions are the minimum necessary + if $pacemaker_master { + mysql_user { 'clustercheck@localhost': + ensure => 'present', + password_hash => mysql_password($mysql_clustercheck_password), + require => Exec['galera-ready'], + } + mysql_grant { 'clustercheck@localhost/*.*': + ensure => 'present', + options => ['GRANT'], + privileges => ['PROCESS'], + table => '*.*', + user => 'clustercheck@localhost', + } } # Create all the database schemas if $sync_db { - class { '::keystone::db::mysql': - require => Exec['galera-ready'], - } - class { '::glance::db::mysql': - require => Exec['galera-ready'], - } class { '::nova::db::mysql': require => Exec['galera-ready'], } @@ -474,6 +379,17 @@ MYSQL_HOST=localhost\n", } #END STEP 2 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { + # At this stage we are guaranteed that the clustercheck db user exists + # so we switch the resource agent to use it. + file { '/etc/sysconfig/clustercheck' : + ensure => file, + mode => '0600', + owner => 'root', + group => 'root', + content => "MYSQL_USERNAME=clustercheck\n +MYSQL_PASSWORD='${mysql_clustercheck_password}'\n +MYSQL_HOST=localhost\n", + } $nova_ipv6 = hiera('nova::use_ipv6', false) if $nova_ipv6 { @@ -596,31 +512,6 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'), } } - if hiera('neutron::enable_dhcp_agent',true) { - class { '::neutron::agents::dhcp' : - manage_service => false, - enabled => false, - } - file { '/etc/neutron/dnsmasq-neutron.conf': - content => hiera('neutron_dnsmasq_options'), - owner => 'neutron', - group => 'neutron', - notify => Service['neutron-dhcp-service'], - require => Package['neutron'], - } - } - if hiera('neutron::enable_l3_agent',true) { - class { '::neutron::agents::l3' : - manage_service => false, - enabled => false, - } - } - if hiera('neutron::enable_metadata_agent',true) { - class { '::neutron::agents::metadata': - manage_service => false, - enabled => false, - } - } include ::neutron::plugins::ml2 class { '::neutron::agents::ml2::ovs': manage_service => false, @@ -652,15 +543,6 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { include ::neutron::plugins::ml2::bigswitch::restproxy include ::neutron::agents::bigswitch } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_config { - 'DEFAULT/notification_driver': value => 'messaging'; - } include ::cinder include ::cinder::config @@ -1030,6 +912,29 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { } #END STEP 4 if hiera('step') >= 5 { + # We now make sure that the root db password is set to a random one + # At first installation /root/.my.cnf will be empty and we connect without a root + # password. On second runs or updates /root/.my.cnf will already be populated + # with proper credentials. This step happens on every node because this sql + # statement does not automatically replicate across nodes. + exec { 'galera-set-root-password': + command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root", + } + file { '/root/.my.cnf' : + ensure => file, + mode => '0600', + owner => 'root', + group => 'root', + content => "[client] +user=root +password=\"${mysql_root_password}\" + +[mysql] +user=root +password=\"${mysql_root_password}\"", + require => Exec['galera-set-root-password'], + } + $nova_enable_db_purge = hiera('nova_enable_db_purge', true) $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true) $heat_enable_db_purge = hiera('heat_enable_db_purge', true) @@ -1055,15 +960,6 @@ if hiera('step') >= 5 { require => [Pacemaker::Resource::Service[$::apache::params::service_name], Pacemaker::Resource::Ocf['openstack-core']], } - pacemaker::constraint::base { 'memcached-then-openstack-core-constraint': - constraint_type => 'order', - first_resource => 'memcached-clone', - second_resource => 'openstack-core-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service['memcached'], - Pacemaker::Resource::Ocf['openstack-core']], - } pacemaker::constraint::base { 'galera-then-openstack-core-constraint': constraint_type => 'order', first_resource => 'galera-master', @@ -1153,43 +1049,6 @@ if hiera('step') >= 5 { Pacemaker::Resource::Service[$::sahara::params::engine_service_name]], } - if hiera('step') == 5 { - # Neutron - # NOTE(gfidente): Neutron will try to populate the database with some data - # as soon as neutron-server is started; to avoid races we want to make this - # happen only on one node, before normal Pacemaker initialization - # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 - # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec - # will try to start the service while it's already started by Pacemaker - # It would result to a deployment failure since systemd would return 1 to Puppet - # and the overcloud would fail to deploy (6 would be returned). - # This conditional prevents from a race condition during the deployment. - # https://bugzilla.redhat.com/show_bug.cgi?id=1290582 - exec { 'neutron-server-systemd-start-sleep' : - command => 'systemctl start neutron-server && /usr/bin/sleep 5', - path => '/usr/bin', - unless => '/sbin/pcs resource show neutron-server', - } -> - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'] - } - } else { - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'] - } - } - if hiera('neutron::enable_l3_agent', true) { - pacemaker::resource::service { $::neutron::params::l3_agent_service: - clone_params => 'interleave=true', - } - } - if hiera('neutron::enable_dhcp_agent', true) { - pacemaker::resource::service { $::neutron::params::dhcp_agent_service: - clone_params => 'interleave=true', - } - } if hiera('neutron::enable_ovs_agent', true) { pacemaker::resource::service { $::neutron::params::ovs_agent_service: clone_params => 'interleave=true', @@ -1200,11 +1059,6 @@ if hiera('step') >= 5 { clone_params => 'interleave=true', } } - if hiera('neutron::enable_metadata_agent', true) { - pacemaker::resource::service { $::neutron::params::metadata_agent_service: - clone_params => 'interleave=true', - } - } if hiera('neutron::enable_ovs_agent', true) { pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: ocf_agent_name => 'neutron:OVSCleanup', @@ -1249,81 +1103,6 @@ if hiera('step') >= 5 { Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], } } - pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::neutron::params::server_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['openstack-core'], - Pacemaker::Resource::Service[$::neutron::params::server_service]], - } - if hiera('neutron::enable_ovs_agent',true) { - pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::ovs_agent_service}-clone", - second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - } - if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) { - pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], - } - - pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': - source => "${::neutron::params::dhcp_agent_service}-clone", - target => "${::neutron::params::ovs_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - } - if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_l3_agent',true) { - pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::dhcp_agent_service}-clone", - second_resource => "${::neutron::params::l3_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]] - } - pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': - source => "${::neutron::params::l3_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]] - } - } - if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) { - pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::l3_agent_service}-clone", - second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] - } - pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::l3_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] - } - } if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint': diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index 3e8784b7..ca50d91d 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -94,5 +94,9 @@ outputs: glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort} glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword} glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]} + glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]} + glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]} + glance::keystone::auth::password: {get_param: GlancePassword } step_config: | include ::tripleo::profile::base::glance::api diff --git a/puppet/services/loadbalancer.yaml b/puppet/services/loadbalancer.yaml new file mode 100644 index 00000000..0c1757bf --- /dev/null +++ b/puppet/services/loadbalancer.yaml @@ -0,0 +1,21 @@ +heat_template_version: 2016-04-08 + +description: > + Loadbalancer service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +outputs: + role_data: + description: Role data for the Loadbalancer role. + value: + step_config: | + include ::tripleo::profile::base::loadbalancer diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml new file mode 100644 index 00000000..1833fbff --- /dev/null +++ b/puppet/services/memcached.yaml @@ -0,0 +1,23 @@ +heat_template_version: 2016-04-08 + +description: > + Memcached service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +outputs: + role_data: + description: Role data for the Memcached role. + value: + config_settings: + step_config: | + include ::tripleo::profile::base::memcached + diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml new file mode 100644 index 00000000..b34bdd22 --- /dev/null +++ b/puppet/services/neutron-base.yaml @@ -0,0 +1,44 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron base service. Shared for all Neutron agents. + +parameters: + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + NeutronDhcpAgentsPerNetwork: + type: number + default: 3 + description: The number of neutron dhcp agents to schedule per network + Debug: + type: string + default: '' + description: Set to True to enable debugging on all services. + +outputs: + role_data: + description: Role data for the Neutron base service. + value: + config_settings: + neutron::rabbit_password: {get_param: RabbitPassword} + neutron::rabbit_user: {get_param: RabbitUserName} + neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + neutron::rabbit_port: {get_param: RabbitClientPort} + neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} + neutron::debug: {get_param: Debug} diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml new file mode 100644 index 00000000..548b4ba0 --- /dev/null +++ b/puppet/services/neutron-dhcp.yaml @@ -0,0 +1,56 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron DHCP agent configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + NeutronEnableIsolatedMetadata: + default: 'False' + description: If True, DHCP provide metadata route to VM. + type: string + NeutronDnsmasqOptions: + default: 'dhcp-option-force=26,%MTU%' + description: > + Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU + to be set to the value of NeutronTenantMtu, which should be set to account + for tunnel overhead. + type: string + NeutronTenantMtu: + description: > + The default MTU for tenant networks. For VXLAN/GRE tunneling, this should + be at least 50 bytes smaller than the MTU on the physical network. This + value will be used to set the MTU on the virtual Ethernet device. + This value will be used to construct the NeutronDnsmasqOptions, since that + will determine the MTU that is assigned to the VM host through DHCP. + default: "1400" + type: string + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron DHCP agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf + tripleo::profile::base::neutron::dhcp: + str_replace: + template: {get_param: NeutronDnsmasqOptions} + params: + '%MTU%': {get_param: NeutronTenantMtu} + neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} + step_config: | + include tripleo::profile::base::neutron::dhcp diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml new file mode 100644 index 00000000..2ea1b19d --- /dev/null +++ b/puppet/services/neutron-l3.yaml @@ -0,0 +1,37 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron L3 agent configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + Debug: + type: string + default: '' + NeutronExternalNetworkBridge: + description: Name of bridge used for external network traffic. + type: string + default: 'br-ex' + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron L3 agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} + step_config: | + include tripleo::profile::base::neutron::l3 diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml new file mode 100644 index 00000000..1fe139f3 --- /dev/null +++ b/puppet/services/neutron-metadata.yaml @@ -0,0 +1,45 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Metadata agent configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + NeutronMetadataProxySharedSecret: + description: Shared secret to prevent spoofing + type: string + hidden: true + NeutronWorkers: + default: 0 + description: Number of workers for Neutron service. + type: number + NeutronPassword: + description: The password for the neutron service and db account, used by neutron agents. + type: string + hidden: true + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron Metadata agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::metadata::shared_secret: {get_param: NeutronMetadataProxySharedSecret} + neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers} + neutron::agents::metadata::auth_password: {get_param: NeutronPassword} + neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + step_config: | + include tripleo::profile::base::neutron::metadata diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml index 815eb5bf..ad964216 100644 --- a/puppet/services/pacemaker/glance-api.yaml +++ b/puppet/services/pacemaker/glance-api.yaml @@ -56,5 +56,7 @@ outputs: glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype} glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage} glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions} + glance::api::manage_service: false + glance::api::enabled: false step_config: | include ::tripleo::profile::pacemaker::glance diff --git a/puppet/services/pacemaker/glance-registry.yaml b/puppet/services/pacemaker/glance-registry.yaml index 56353459..393fbaaf 100644 --- a/puppet/services/pacemaker/glance-registry.yaml +++ b/puppet/services/pacemaker/glance-registry.yaml @@ -26,7 +26,10 @@ outputs: description: Role data for the Glance role. value: config_settings: - get_attr: [GlanceRegistryBase, role_data, config_settings] + map_merge: + - get_attr: [GlanceRegistryBase, role_data, config_settings] + - glance::registry::manage_service: false + glance::registry::enabled: false # No puppet manifests since glance-registry is included in # ::tripleo::profile::pacemaker::glance which is maintained alongside of # pacemaker/glance-api.yaml. diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml index 8fcab15f..db52cae7 100644 --- a/puppet/services/pacemaker/keystone.yaml +++ b/puppet/services/pacemaker/keystone.yaml @@ -28,7 +28,7 @@ outputs: config_settings: map_merge: - get_attr: [KeystoneServiceBase, role_data, config_settings] - #- - # custom keystone hiera goes here if we need it!? + - keystone::manage_service: false + keystone::enabled: false step_config: | include ::tripleo::profile::pacemaker::keystone diff --git a/puppet/services/pacemaker/loadbalancer.yaml b/puppet/services/pacemaker/loadbalancer.yaml new file mode 100644 index 00000000..771b3d9b --- /dev/null +++ b/puppet/services/pacemaker/loadbalancer.yaml @@ -0,0 +1,34 @@ +heat_template_version: 2016-04-08 + +description: > + Loadbalancer service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + LoadbalancerServiceBase: + type: ../loadbalancer.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Loadbalancer pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [LoadbalancerServiceBase, role_data, config_settings] + - tripleo::loadbalancer::haproxy_service_manage: false + tripleo::loadbalancer::mysql_clustercheck: true + tripleo::loadbalancer::manage_vip: false + step_config: | + include ::tripleo::profile::pacemaker::loadbalancer diff --git a/puppet/services/pacemaker/memcached.yaml b/puppet/services/pacemaker/memcached.yaml new file mode 100644 index 00000000..306f805e --- /dev/null +++ b/puppet/services/pacemaker/memcached.yaml @@ -0,0 +1,31 @@ +heat_template_version: 2016-04-08 + +description: > + Mecached service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + + MemcachedServiceBase: + type: ../memcached.yaml + +outputs: + role_data: + description: Role data for the Memcached pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [MemcachedServiceBase, role_data, config_settings] + - memcached::service_manage: false + step_config: | + include ::tripleo::profile::pacemaker::memcached + diff --git a/puppet/services/pacemaker/neutron-dhcp.yaml b/puppet/services/pacemaker/neutron-dhcp.yaml new file mode 100644 index 00000000..0e972b28 --- /dev/null +++ b/puppet/services/pacemaker/neutron-dhcp.yaml @@ -0,0 +1,35 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron DHCP service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + + NeutronDhcpBase: + type: ../neutron-dhcp.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Neutron DHCP role. + value: + config_settings: + map_merge: + - get_attr: [NeutronDhcpBase, role_data, config_settings] + - tripleo::profile::pacemaker::neutron::enable_dhcp: True + neutron::agents::dhcp::enabled: false + neutron::agents::dhcp::manage_service: false + step_config: | + include ::tripleo::profile::pacemaker::neutron::dhcp diff --git a/puppet/services/pacemaker/neutron-l3.yaml b/puppet/services/pacemaker/neutron-l3.yaml new file mode 100644 index 00000000..84bff808 --- /dev/null +++ b/puppet/services/pacemaker/neutron-l3.yaml @@ -0,0 +1,33 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron L3 service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + + NeutronL3Base: + type: ../neutron-l3.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Neutron L3 role. + value: + config_settings: + map_merge: + - get_attr: [NeutronL3Base, role_data, config_settings] + - tripleo::profile::pacemaker::neutron::enable_l3: True + step_config: | + include ::tripleo::profile::pacemaker::neutron::l3 diff --git a/puppet/services/pacemaker/neutron-metadata.yaml b/puppet/services/pacemaker/neutron-metadata.yaml new file mode 100644 index 00000000..79baf1ea --- /dev/null +++ b/puppet/services/pacemaker/neutron-metadata.yaml @@ -0,0 +1,33 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Metadata service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + + NeutronMetadataBase: + type: ../neutron-metadata.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Neutron Metadata role. + value: + config_settings: + map_merge: + - get_attr: [NeutronMetadataBase, role_data, config_settings] + - tripleo::profile::pacemaker::neutron::enable_metadata: True + step_config: | + include ::tripleo::profile::pacemaker::neutron::metadata diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml new file mode 100644 index 00000000..613db449 --- /dev/null +++ b/puppet/services/pacemaker/rabbitmq.yaml @@ -0,0 +1,32 @@ +heat_template_version: 2016-04-08 + +description: > + RabbitMQ service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + RabbitMQServiceBase: + type: ../rabbitmq.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the RabbitMQ pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [RabbitMQServiceBase, role_data, config_settings] + - rabbitmq::service_manage: false + step_config: | + include ::tripleo::profile::pacemaker::rabbitmq diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml new file mode 100644 index 00000000..ae5678a3 --- /dev/null +++ b/puppet/services/rabbitmq.yaml @@ -0,0 +1,42 @@ +heat_template_version: 2016-04-08 + +description: > + RabbitMQ service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitFDLimit: + default: 16384 + description: Configures RabbitMQ FD limit + type: string + RabbitIPv6: + default: false + description: Enable IPv6 in RabbitMQ + type: boolean + +outputs: + role_data: + description: Role data for the RabbitMQ role. + value: + config_settings: + rabbitmq::file_limit: {get_param: RabbitFDLimit} + rabbitmq::default_user: {get_param: RabbitUserName} + rabbitmq::default_pass: {get_param: RabbitPassword} + rabbit_ipv6: {get_param: RabbitIPv6} + step_config: | + include ::tripleo::profile::base::rabbitmq diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml index eb06b241..b262f947 100644 --- a/puppet/swift-storage-post.yaml +++ b/puppet/swift-storage-post.yaml @@ -52,6 +52,10 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: @@ -65,6 +69,7 @@ resources: servers: {get_param: servers} config: {get_resource: StorageRingbuilderPuppetConfig} input_values: + step: 3 # Note ringbuilder.pp expects >=3 update_identifier: {get_param: NodeConfigIdentifiers} # Note, this should come last, so use depends_on to ensure diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 296428db..3f6f4733 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -220,16 +220,22 @@ resources: properties: ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: @@ -256,10 +262,16 @@ resources: - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - network merge_behavior: deeper datafiles: common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} object_extraconfig: mapped_data: {get_param: ObjectStorageExtraConfig} extraconfig: |