diff options
Diffstat (limited to 'puppet')
40 files changed, 1161 insertions, 757 deletions
diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml index e90710c7..2b9ae751 100644 --- a/puppet/ceph-storage-post.yaml +++ b/puppet/ceph-storage-post.yaml @@ -13,6 +13,10 @@ parameters: NodeConfigIdentifiers: type: json description: Value which changes if the node configuration may need to be re-applied + StepConfig: + type: string + description: Config manifests that will be used to step through the deployment. + default: '' resources: @@ -33,26 +37,44 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: - get_file: manifests/overcloud_cephstorage.pp + list_join: + - '' + - - get_file: manifests/overcloud_cephstorage.pp + - {get_param: StepConfig} - CephStorageDeployment_Step1: + CephStorageDeployment_Step2: type: OS::Heat::StructuredDeployments depends_on: CephStorageArtifactsDeploy properties: - name: CephStorageDeployment_Step1 + name: CephStorageDeployment_Step2 servers: {get_param: servers} config: {get_resource: CephStoragePuppetConfig} input_values: + step: 2 + update_identifier: {get_param: NodeConfigIdentifiers} + + CephStorageDeployment_Step3: + type: OS::Heat::StructuredDeployments + depends_on: CephStorageDeployment_Step2 + properties: + name: CephStorageDeployment_Step3 + servers: {get_param: servers} + config: {get_resource: CephStoragePuppetConfig} + input_values: + step: 3 update_identifier: {get_param: NodeConfigIdentifiers} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: CephStorageDeployment_Step1 + depends_on: CephStorageDeployment_Step3 type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} - diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index f0eb71e4..eedb35e4 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -96,6 +96,9 @@ parameters: NodeIndex: type: number default: 0 + ServiceConfigSettings: + type: json + default: {} resources: CephStorage: @@ -195,28 +198,23 @@ resources: properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} - ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} - - NetIpSubnetMap: - type: OS::TripleO::Network::Ports::NetIpSubnetMap - properties: - ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} - ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} - InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} - StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} - StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} - TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -238,8 +236,8 @@ resources: timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} - ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} - ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} + ceph_public_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} CephStorageConfig: type: OS::Heat::StructuredConfig @@ -252,14 +250,23 @@ resources: - heat_config_%{::deploy_config_name} - ceph_extraconfig - extraconfig + - service_configs - ceph_cluster # provided by CephClusterConfig - ceph - '"%{::osfamily}"' - common + - network merge_behavior: deeper datafiles: + service_configs: + mapped_data: {get_param: ServiceConfigSettings} common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} ceph_extraconfig: mapped_data: {get_param: CephStorageExtraConfig} extraconfig: diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index c1a04e24..13914878 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -246,16 +246,22 @@ resources: properties: ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: @@ -316,10 +322,16 @@ resources: - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - network merge_behavior: deeper datafiles: common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} volume_extraconfig: mapped_data: {get_param: BlockStorageExtraConfig} extraconfig: diff --git a/puppet/compute.yaml b/puppet/compute.yaml index 4c18067a..e56deefd 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -430,16 +430,22 @@ resources: properties: ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkConfig: @@ -481,6 +487,7 @@ resources: - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - network - neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre - nova_nuage_data # Optionally provided by ComputeExtraConfigPre @@ -494,6 +501,11 @@ resources: mapped_data: {get_param: ExtraConfig} common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} ceph: raw_data: {get_file: hieradata/ceph.yaml} compute: diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index 80b08a06..36f9b4f8 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -55,7 +55,6 @@ resources: input_values: step: 1 update_identifier: {get_param: NodeConfigIdentifiers} - actions: ['CREATE'] # no need for two passes on an UPDATE ControllerServicesBaseDeployment_Step2: type: OS::Heat::StructuredDeployments @@ -67,7 +66,6 @@ resources: input_values: step: 2 update_identifier: {get_param: NodeConfigIdentifiers} - actions: ['CREATE'] # no need for two passes on an UPDATE ControllerOvercloudServicesDeployment_Step3: type: OS::Heat::StructuredDeployments @@ -102,31 +100,9 @@ resources: step: 5 update_identifier: {get_param: NodeConfigIdentifiers} - ControllerOvercloudServicesDeployment_Step6: - type: OS::Heat::StructuredDeployments - depends_on: ControllerOvercloudServicesDeployment_Step5 - properties: - name: ControllerOvercloudServicesDeployment_Step6 - servers: {get_param: servers} - config: {get_resource: ControllerPuppetConfig} - input_values: - step: 6 - update_identifier: {get_param: NodeConfigIdentifiers} - - ControllerOvercloudServicesDeployment_Step7: - type: OS::Heat::StructuredDeployments - depends_on: ControllerOvercloudServicesDeployment_Step6 - properties: - name: ControllerOvercloudServicesDeployment_Step7 - servers: {get_param: servers} - config: {get_resource: ControllerPuppetConfig} - input_values: - step: 7 - update_identifier: {get_param: NodeConfigIdentifiers} - ControllerPostPuppet: type: OS::TripleO::Tasks::ControllerPostPuppet - depends_on: ControllerOvercloudServicesDeployment_Step7 + depends_on: ControllerOvercloudServicesDeployment_Step5 properties: servers: {get_param: servers} input_values: diff --git a/puppet/controller.yaml b/puppet/controller.yaml index bf196d24..3aa0df14 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -220,14 +220,6 @@ parameters: default: /dev/log description: Syslog address where HAproxy will send its log type: string - HeatPassword: - description: The password for the Heat service and db account, used by the Heat services. - type: string - hidden: true - HeatStackDomainAdminPassword: - description: Password for heat_stack_domain_admin user. - type: string - hidden: true HeatAuthEncryptionKey: description: Auth encryption key for heat-engine type: string @@ -236,15 +228,6 @@ parameters: default: '*' description: A list of IP/Hostname allowed to connect to horizon type: comma_delimited_list - HeatWorkers: - default: 0 - description: Number of workers for Heat service. - type: number - HeatEnableDBPurge: - type: boolean - default: true - description: | - Whether to create cron job for purging soft deleted rows in the Heat database. HorizonSecret: description: Secret key for Django type: string @@ -310,14 +293,13 @@ parameters: description: Configures MySQL max_connections config setting type: number default: 4096 + MysqlClustercheckPassword: + type: string + hidden: true MysqlRootPassword: type: string hidden: true default: '' # Has to be here because of the ignored empty value bug - NeutronExternalNetworkBridge: - description: Name of bridge used for external network traffic. - type: string - default: 'br-ex' NeutronBridgeMappings: description: > The OVS logical->physical bridge mappings to use. See the Neutron @@ -328,22 +310,6 @@ parameters: scripts or be sure to keep 'datacentre' as a mapping network name. type: comma_delimited_list default: "datacentre:br-ex" - NeutronDnsmasqOptions: - default: 'dhcp-option-force=26,1400' - description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the gre tunnel overhead. - type: string - NeutronEnableDHCPAgent: - description: Knob to enable/disable DHCP Agent - type: boolean - default: true - NeutronEnableL3Agent: - description: Knob to enable/disable L3 agent - type: boolean - default: true - NeutronEnableMetadataAgent: - description: Knob to enable/disable Metadata agent - type: boolean - default: true NeutronEnableOVSAgent: description: Knob to enable/disable OVS Agent type: boolean @@ -356,10 +322,6 @@ parameters: default: 'False' description: Whether to enable l3-agent HA type: string - NeutronDhcpAgentsPerNetwork: - type: number - default: 3 - description: The number of neutron dhcp agents to schedule per network NeutronDVR: default: 'False' description: Whether to configure Neutron Distributed Virtual Routers @@ -394,10 +356,6 @@ parameters: default: 'True' description: Allow automatic l3-agent failover type: string - NeutronEnableIsolatedMetadata: - default: 'False' - description: If True, DHCP provide metadata route to VM. - type: string NeutronEnableTunnelling: type: string default: "True" @@ -562,14 +520,6 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number - RabbitFDLimit: - default: 16384 - description: Configures RabbitMQ FD limit - type: string - RabbitIPv6: - default: false - description: Enable IPv6 in RabbitMQ - type: boolean RedisPassword: type: string description: The password to access the Redis service @@ -808,28 +758,23 @@ resources: properties: ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} - ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} - - NetIpSubnetMap: - type: OS::TripleO::Network::Ports::NetIpSubnetMap - properties: - ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} - ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} - InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} - StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} - StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} - TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkConfig: type: OS::TripleO::Controller::Net::SoftwareConfig @@ -880,36 +825,15 @@ resources: bootstack_nodeid: {get_attr: [Controller, name]} ceilometer_workers: {get_param: CeilometerWorkers} cinder_workers: {get_param: CinderWorkers} - heat_workers: {get_param: HeatWorkers} nova_workers: {get_param: NovaWorkers} neutron_workers: {get_param: NeutronWorkers} swift_workers: {get_param: SwiftWorkers} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} - neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} haproxy_log_address: {get_param: HAProxySyslogAddress} haproxy_stats_password: {get_param: HAProxyStatsPassword} haproxy_stats_user: {get_param: HAProxyStatsUser} - heat.watch_server_url: - list_join: - - '' - - - 'http://' - - {get_param: HeatApiVirtualIPUri} - - ':8003' - heat.metadata_server_url: - list_join: - - '' - - - 'http://' - - {get_param: HeatApiVirtualIPUri} - - ':8000' - heat.waitcondition_server_url: - list_join: - - '' - - - 'http://' - - {get_param: HeatApiVirtualIPUri} - - ':8000/v1/waitcondition' heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey} - heat_enable_db_purge: {get_param: HeatEnableDBPurge} horizon_allowed_hosts: {get_param: HorizonAllowedHosts} horizon_secret: {get_param: HorizonSecret} admin_password: {get_param: AdminPassword} @@ -937,16 +861,12 @@ resources: - '@' - {get_param: MysqlVirtualIPUri} - '/cinder' - heat_password: {get_param: HeatPassword} - heat_stack_domain_admin_password: {get_param: HeatStackDomainAdminPassword} - heat_dsn: - list_join: - - '' - - - 'mysql+pymysql://heat:' - - {get_param: HeatPassword} - - '@' - - {get_param: MysqlVirtualIPUri} - - '/heat' + cinder_public_url: {get_param: [EndpointMap, CinderPublic, uri]} + cinder_internal_url: {get_param: [EndpointMap, CinderInternal, uri]} + cinder_admin_url: {get_param: [EndpointMap, CinderAdmin, uri]} + cinder_public_url_v2: {get_param: [EndpointMap, CinderV2Public, uri]} + cinder_internal_url_v2: {get_param: [EndpointMap, CinderV2Internal, uri]} + cinder_admin_url_v2: {get_param: [EndpointMap, CinderV2Admin, uri]} keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } keystone_ec2_uri: { get_param: [EndpointMap, KeystoneEC2, uri] } @@ -960,6 +880,7 @@ resources: mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize} mysql_max_connections: {get_param: MysqlMaxConnections} mysql_root_password: {get_param: MysqlRootPassword} + mysql_clustercheck_password: {get_param: MysqlClustercheckPassword} mysql_cluster_name: str_replace: template: tripleo-CLUSTER @@ -984,9 +905,6 @@ resources: template: DRIVERS params: DRIVERS: {get_param: NeutronTypeDrivers} - neutron_enable_dhcp_agent: {get_param: NeutronEnableDHCPAgent} - neutron_enable_l3_agent: {get_param: NeutronEnableL3Agent} - neutron_enable_metadata_agent: {get_param: NeutronEnableMetadataAgent} neutron_enable_ovs_agent: {get_param: NeutronEnableOVSAgent} neutron_mechanism_drivers: str_replace: @@ -995,7 +913,6 @@ resources: MECHANISMS: {get_param: NeutronMechanismDrivers} neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron_l3_ha: {get_param: NeutronL3HA} - neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} neutron_network_vlan_ranges: str_replace: template: RANGES @@ -1006,7 +923,6 @@ resources: template: MAPPINGS params: MAPPINGS: {get_param: NeutronBridgeMappings} - neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge} neutron_public_interface: {get_param: NeutronPublicInterface} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute} @@ -1043,7 +959,6 @@ resources: AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_password: {get_param: NeutronPassword} neutron_tenant_mtu: {get_param: NeutronTenantMtu} - neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} neutron_dsn: list_join: - '' @@ -1062,6 +977,9 @@ resources: ceilometer_password: {get_param: CeilometerPassword} ceilometer_store_events: {get_param: CeilometerStoreEvents} aodh_password: {get_param: AodhPassword} + aodh_internal_url: { get_param: [ EndpointMap, AodhInternal, uri ] } + aodh_public_url: { get_param: [ EndpointMap, AodhPublic, uri ] } + aodh_admin_url: { get_param: [ EndpointMap, AodhAdmin, uri ] } ceilometer_meter_dispatcher: {get_param: CeilometerMeterDispatcher} gnocchi_password: {get_param: GnocchiPassword} gnocchi_backend: {get_param: GnocchiBackend} @@ -1091,6 +1009,11 @@ resources: - {get_param: MysqlVirtualIPUri} - '/gnocchi' gnocchi_internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]} + gnocchi_public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] } + gnocchi_admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] } + ceilometer_public_url: {get_param: [EndpointMap, CeilometerPublic, uri]} + ceilometer_internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]} + ceilometer_admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]} snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} nova_enable_db_purge: {get_param: NovaEnableDBPurge} @@ -1116,6 +1039,9 @@ resources: - '/nova_api' upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} instance_name_template: {get_param: InstanceNameTemplate} + nova_public_url: {get_param: [EndpointMap, NovaPublic, uri]} + nova_internal_url: {get_param: [EndpointMap, NovaInternal, uri]} + nova_admin_url: {get_param: [EndpointMap, NovaAdmin, uri]} fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} rabbit_username: {get_param: RabbitUserName} @@ -1123,8 +1049,6 @@ resources: rabbit_cookie: {get_param: RabbitCookie} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - rabbit_ipv6: {get_param: RabbitIPv6} - rabbit_fd_limit: {get_param: RabbitFDLimit} mongodb_no_journal: {get_param: MongoDbNoJournal} mongodb_ipv6: {get_param: MongoDbIPv6} ntp_servers: {get_param: NtpServer} @@ -1138,9 +1062,18 @@ resources: swift_replicas: {get_param: SwiftReplicas} swift_min_part_hours: {get_param: SwiftMinPartHours} swift_mount_check: {get_param: SwiftMountCheck} + swift_public_url: {get_param: [EndpointMap, SwiftPublic, uri]} + swift_internal_url: {get_param: [EndpointMap, SwiftInternal, uri]} + swift_admin_url: {get_param: [EndpointMap, SwiftAdmin, uri]} + swift_public_url_s3: {get_param: [EndpointMap, SwiftS3Public, uri]} + swift_internal_url_s3: {get_param: [EndpointMap, SwiftS3Internal, uri]} + swift_admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} sahara_password: {get_param: SaharaPassword} + sahara_public_url: {get_param: [EndpointMap, SaharaPublic, uri]} + sahara_internal_url: {get_param: [EndpointMap, SaharaInternal, uri]} + sahara_admin_url: {get_param: [EndpointMap, SaharaAdmin, uri]} sahara_dsn: list_join: - '' @@ -1177,7 +1110,7 @@ resources: str_replace: template: "['SUBNET']" params: - SUBNET: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, HorizonNetwork]}]} + SUBNET: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, HorizonNetwork]}]} rabbitmq_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]} redis_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]} redis_password: {get_param: RedisPassword} @@ -1186,8 +1119,8 @@ resources: memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} mysql_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} mysql_virtual_ip: {get_param: MysqlVirtualIP} - ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} - ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} + ceph_public_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} ceph_public_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} # Map heat metadata into hiera datafiles @@ -1214,6 +1147,7 @@ resources: - vip_data # provided by vip-config - '"%{::osfamily}"' - common + - network - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre @@ -1237,6 +1171,11 @@ resources: mapped_data: {get_param: ExtraConfig} common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} ceph: raw_data: {get_file: hieradata/ceph.yaml} mapped_data: @@ -1272,6 +1211,14 @@ resources: tripleo::ringbuilder::replicas: {get_input: swift_replicas} tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours} swift_mount_check: {get_input: swift_mount_check} + swift::keystone::auth::public_url: {get_input: swift_public_url } + swift::keystone::auth::internal_url: {get_input: swift_internal_url } + swift::keystone::auth::admin_url: {get_input: swift_admin_url } + swift::keystone::auth::public_url_s3: {get_input: swift_public_url_v3 } + swift::keystone::auth::internal_url_s3: {get_input: swift_internal_url_v3 } + swift::keystone::auth::admin_url_s3: {get_input: swift_admin_url_v3 } + swift::keystone::auth::password: {get_input: swift_password } + swift::keystone::auth::region: {get_input: keystone_region} # Cinder cinder_enable_db_purge: {get_input: cinder_enable_db_purge} @@ -1296,36 +1243,26 @@ resources: cinder::glance::glance_api_servers: {get_input: glance_api_servers} cinder_backend_config: {get_input: CinderBackendConfig} cinder::db::mysql::password: {get_input: cinder_password} + cinder::keystone::auth::public_url: {get_input: cinder_public_url } + cinder::keystone::auth::internal_url: {get_input: cinder_internal_url } + cinder::keystone::auth::admin_url: {get_input: cinder_admin_url } + cinder::keystone::auth::public_url_v2: {get_input: cinder_public_url_v2 } + cinder::keystone::auth::internal_url_v2: {get_input: cinder_internal_url_v2 } + cinder::keystone::auth::admin_url_v2: {get_input: cinder_admin_url_v2 } + cinder::keystone::auth::password: {get_input: cinder_password } + cinder::keystone::auth::region: {get_input: keystone_region} # Glance glance::api::bind_host: {get_input: glance_api_network} glance::registry::bind_host: {get_input: glance_registry_network} + glance::keystone::auth::region: {get_input: keystone_region} + # Heat - heat_stack_domain_admin_password: {get_input: heat_stack_domain_admin_password} - heat::engine::heat_watch_server_url: {get_input: heat.watch_server_url} - heat::engine::heat_metadata_server_url: {get_input: heat.metadata_server_url} - heat::engine::heat_waitcondition_server_url: {get_input: heat.waitcondition_server_url} - heat::engine::auth_encryption_key: {get_input: heat_auth_encryption_key} - heat::rabbit_userid: {get_input: rabbit_username} - heat::rabbit_password: {get_input: rabbit_password} - heat::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - heat::rabbit_port: {get_input: rabbit_client_port} - heat::auth_uri: {get_input: keystone_auth_uri} - heat::keystone_ec2_uri: {get_input: keystone_ec2_uri} - heat::identity_uri: {get_input: keystone_identity_uri} - heat::keystone_password: {get_input: heat_password} heat::api::bind_host: {get_input: heat_api_network} - heat::api::workers: {get_input: heat_workers} heat::api_cloudwatch::bind_host: {get_input: heat_api_network} - heat::api_cloudwatch::workers: {get_input: heat_workers} heat::api_cfn::bind_host: {get_input: heat_api_network} - heat::api_cfn::workers: {get_input: heat_workers} - heat::engine::num_engine_workers: {get_input: heat_workers} - heat::database_connection: {get_input: heat_dsn} - heat::debug: {get_input: debug} - heat::db::mysql::password: {get_input: heat_password} - heat_enable_db_purge: {get_input: heat_enable_db_purge} - heat::keystone::domain::domain_password: {get_input: heat_stack_domain_admin_password} + heat::engine::auth_encryption_key: {get_input: heat_auth_encryption_key} + # Keystone keystone::admin_bind_host: {get_input: keystone_admin_api_network} keystone::public_bind_host: {get_input: keystone_public_api_network} @@ -1343,45 +1280,33 @@ resources: mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size} mysql_max_connections: {get_input: mysql_max_connections} mysql::server::root_password: {get_input: mysql_root_password} + mysql_clustercheck_password: {get_input: mysql_clustercheck_password} mysql_cluster_name: {get_input: mysql_cluster_name} mysql_bind_host: {get_input: mysql_network} mysql_virtual_ip: {get_input: mysql_virtual_ip} # Neutron neutron::bind_host: {get_input: neutron_api_network} - neutron::rabbit_password: {get_input: rabbit_password} - neutron::rabbit_user: {get_input: rabbit_username} - neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - neutron::rabbit_port: {get_input: rabbit_client_port} - neutron::debug: {get_input: debug} neutron::server::auth_uri: {get_input: keystone_auth_uri} neutron::server::identity_uri: {get_input: keystone_identity_uri} neutron::server::database_connection: {get_input: neutron_dsn} neutron::server::api_workers: {get_input: neutron_workers} - neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge} neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} - neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} - neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret} neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network} - neutron::agents::metadata::metadata_workers: {get_input: neutron_workers} neutron_agent_mode: {get_input: neutron_agent_mode} neutron_router_distributed: {get_input: neutron_router_distributed} neutron::core_plugin: {get_input: neutron_core_plugin} neutron::service_plugins: {get_input: neutron_service_plugins} - neutron::enable_dhcp_agent: {get_input: neutron_enable_dhcp_agent} - neutron::enable_l3_agent: {get_input: neutron_enable_l3_agent} - neutron::enable_metadata_agent: {get_input: neutron_enable_metadata_agent} neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent} neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron::plugins::ml2::extension_drivers: {get_input: neutron_plugin_extensions} neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} neutron::server::l3_ha: {get_input: neutron_l3_ha} - neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} @@ -1394,10 +1319,7 @@ resources: neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} neutron::server::auth_password: {get_input: neutron_password} - neutron::agents::metadata::auth_password: {get_input: neutron_password} - neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options} neutron_dsn: {get_input: neutron_dsn} - neutron::agents::metadata::auth_url: {get_input: keystone_identity_uri} neutron::db::mysql::password: {get_input: neutron_password} neutron::keystone::auth::public_url: {get_input: neutron_public_url } neutron::keystone::auth::internal_url: {get_input: neutron_internal_url } @@ -1433,6 +1355,11 @@ resources: ceilometer::dispatcher::gnocchi::filter_project: 'service' ceilometer::dispatcher::gnocchi::archive_policy: 'low' ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml' + ceilometer::keystone::auth::public_url: {get_input: ceilometer_public_url } + ceilometer::keystone::auth::internal_url: {get_input: ceilometer_internal_url } + ceilometer::keystone::auth::admin_url: {get_input: ceilometer_admin_url } + ceilometer::keystone::auth::password: {get_input: ceilometer_password } + ceilometer::keystone::auth::region: {get_input: keystone_region} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} @@ -1453,6 +1380,11 @@ resources: aodh::db::mysql::password: {get_input: aodh_password} # for a migration path from ceilometer-alarm to aodh, we use the same database & coordination aodh::evaluator::coordination_url: {get_input: ceilometer_coordination_url} + aodh::keystone::auth::public_url: {get_input: aodh_public_url } + aodh::keystone::auth::internal_url: {get_input: aodh_internal_url } + aodh::keystone::auth::admin_url: {get_input: aodh_admin_url } + aodh::keystone::auth::password: {get_input: aodh_password } + aodh::keystone::auth::region: {get_input: keystone_region} # Gnocchi gnocchi_backend: {get_input: gnocchi_backend} @@ -1469,6 +1401,11 @@ resources: gnocchi::db::mysql::password: {get_input: gnocchi_password} gnocchi::storage::swift::swift_authurl: {get_input: keystone_auth_uri} gnocchi::storage::swift::swift_key: {get_input: gnocchi_password} + gnocchi::keystone::auth::public_url: {get_input: gnocchi_public_url } + gnocchi::keystone::auth::internal_url: {get_input: gnocchi_internal_url } + gnocchi::keystone::auth::admin_url: {get_input: gnocchi_admin_url } + gnocchi::keystone::auth::password: {get_input: gnocchi_password } + gnocchi::keystone::auth::region: {get_input: keystone_region} # Nova nova::rabbit_userid: {get_input: rabbit_username} @@ -1484,7 +1421,6 @@ resources: nova::api::metadata_listen: {get_input: nova_metadata_network} nova::api::admin_password: {get_input: nova_password} nova::api::osapi_compute_workers: {get_input: nova_workers} - nova::api::ec2_workers: {get_input: nova_workers} nova::api::metadata_workers: {get_input: nova_workers} nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::database_connection: {get_input: nova_dsn} @@ -1499,6 +1435,11 @@ resources: nova::db::mysql::password: {get_input: nova_password} nova::db::mysql_api::password: {get_input: nova_password} nova_enable_db_purge: {get_input: nova_enable_db_purge} + nova::keystone::auth::public_url: {get_input: nova_public_url} + nova::keystone::auth::internal_url: {get_input: nova_internal_url} + nova::keystone::auth::admin_url: {get_input: nova_admin_url} + nova::keystone::auth::password: {get_input: nova_password } + nova::keystone::auth::region: {get_input: keystone_region} # Horizon apache::mod::remoteip::proxy_ips: {get_input: horizon_subnet} @@ -1531,14 +1472,14 @@ resources: sahara::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} sahara::rabbit_port: {get_input: rabbit_client_port} sahara::db::mysql::password: {get_input: sahara_password} - - # Rabbit + sahara::keystone::auth::public_url: {get_input: sahara_public_url } + sahara::keystone::auth::internal_url: {get_input: sahara_internal_url } + sahara::keystone::auth::admin_url: {get_input: sahara_admin_url } + sahara::keystone::auth::password: {get_input: sahara_password } + sahara::keystone::auth::region: {get_input: keystone_region} + # RabbitMQ rabbitmq::node_ip_address: {get_input: rabbitmq_network} rabbitmq::erlang_cookie: {get_input: rabbit_cookie} - rabbitmq::file_limit: {get_input: rabbit_fd_limit} - rabbitmq::default_user: {get_input: rabbit_username} - rabbitmq::default_pass: {get_input: rabbit_password} - rabbit_ipv6: {get_input: rabbit_ipv6} # Redis redis::bind: {get_input: redis_network} redis::requirepass: {get_input: redis_password} @@ -1619,13 +1560,6 @@ outputs: hostname: description: Hostname of the server value: {get_attr: [Controller, name]} - corosync_node: - description: > - Node object in the format {ip: ..., name: ...} format that the corosync - element expects - value: - ip: {get_attr: [Controller, networks, ctlplane, 0]} - name: {get_attr: [Controller, name]} hosts_entry: description: > Server's IP address and hostname in the /etc/hosts format diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index 865210c9..1e888f39 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -11,6 +11,8 @@ nova::compute::libvirt::migration_support: true nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" +nova::network::neutron::neutron_auth_type: 'v3password' + # Changing the default from 512MB. The current templates can not deploy # overclouds with swap. On an idle compute node, we see ~1024MB of RAM # used. 2048 is suggested to account for other possible operations for diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 9316cf17..7a446b50 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -50,13 +50,22 @@ glance::registry::keystone_tenant: 'service' neutron::server::auth_tenant: 'service' neutron::agents::metadata::auth_tenant: 'service' neutron::agents::l3::router_delete_namespaces: True -neutron::agents::dhcp::dhcp_delete_namespaces: True cinder::api::keystone_tenant: 'service' swift::proxy::authtoken::admin_tenant_name: 'service' ceilometer::api::keystone_tenant: 'service' gnocchi::api::keystone_tenant: 'service' heat::keystone_tenant: 'service' sahara::admin_tenant_name: 'service' +aodh::keystone::auth::tenant: 'service' +ceilometer::keystone::auth::tenant: 'service' +cinder::keystone::auth::tenant: 'service' +glance::keystone::auth::tenant: 'service' +gnocchi::keystone::auth::tenant: 'service' +heat::keystone::auth::tenant: 'service' +neutron::keystone::auth::tenant: 'service' +nova::keystone::auth::tenant: 'service' +sahara::keystone::auth::tenant: 'service' +swift::keystone::auth::tenant: 'service' # keystone keystone::cron::token_flush::maxdelay: 3600 @@ -86,6 +95,10 @@ swift::proxy::pipeline: - 'proxy-server' swift::proxy::account_autocreate: true +swift::keystone::auth::configure_s3_endpoint: false +swift::keystone::auth::operator_roles: + - admin + - swiftoperator # glance glance::api::pipeline: 'keystone' @@ -96,7 +109,6 @@ glance_file_pcmk_directory: '/var/lib/glance/images' # neutron neutron::server::sync_db: true -neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf # nova nova::notify_on_state_change: 'vm_and_task_state' @@ -153,7 +165,6 @@ tripleo::loadbalancer::neutron: true tripleo::loadbalancer::cinder: true tripleo::loadbalancer::glance_api: true tripleo::loadbalancer::glance_registry: true -tripleo::loadbalancer::nova_ec2: true tripleo::loadbalancer::nova_osapi: true tripleo::loadbalancer::nova_metadata: true tripleo::loadbalancer::nova_novncproxy: true diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index fd7faff1..4add2f02 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -16,41 +16,46 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), {}) -create_resources(sysctl::value, hiera('sysctl_settings'), {}) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> +if hiera('step') >= 1 { -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} + create_resources(kmod::load, hiera('kernel_modules'), {}) + create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> -include ::timezone + include ::timezone -if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], + if count(hiera('ntp::servers')) > 0 { + include ::ntp } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] } -if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') -} else { - $mon_host = hiera('ceph_mon_host') -} -class { '::ceph::profile::params': - mon_host => $mon_host, -} -include ::ceph::conf -include ::ceph::profile::client -include ::ceph::profile::osd +if hiera('step') >= 3 { + if str2bool(hiera('ceph_osd_selinux_permissive', true)) { + exec { 'set selinux to permissive on boot': + command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", + onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", + path => ['/usr/bin', '/usr/sbin'], + } + + exec { 'set selinux to permissive': + command => 'setenforce 0', + onlyif => "which setenforce && getenforce | grep -i 'enforcing'", + path => ['/usr/bin', '/usr/sbin'], + } -> Class['ceph::profile::osd'] + } -hiera_include('ceph_classes') -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present} + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } + class { '::ceph::profile::params': + mon_host => $mon_host, + } + include ::ceph::conf + include ::ceph::profile::client + include ::ceph::profile::osd + + hiera_include('ceph_classes') + package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present} +} diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index cc58cb14..43e87789 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -90,8 +90,17 @@ if str2bool(hiera('nova::use_ipv6', false)) { } else { $vncserver_listen = '0.0.0.0' } -class { '::nova::compute::libvirt' : - vncserver_listen => $vncserver_listen, + +if $rbd_ephemeral_storage { + class { '::nova::compute::libvirt': + libvirt_disk_cachemodes => ['network=writeback'], + libvirt_hw_disk_discard => 'unmap', + vncserver_listen => $vncserver_listen, + } +} else { + class { '::nova::compute::libvirt' : + vncserver_listen => $vncserver_listen, + } } nova_config { diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index ef330e29..536c680f 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -24,15 +24,6 @@ if hiera('step') >= 1 { create_resources(sysctl::value, hiera('sysctl_settings'), {}) Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - $controller_node_ips = split(hiera('controller_node_ips'), ',') - - if $enable_load_balancer { - class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - manage_vip => true, - } - } - } if hiera('step') >= 2 { @@ -117,7 +108,6 @@ if hiera('step') >= 2 { include ::nova::db::mysql_api include ::neutron::db::mysql include ::cinder::db::mysql - include ::heat::db::mysql include ::sahara::db::mysql if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { include ::gnocchi::db::mysql @@ -127,36 +117,6 @@ if hiera('step') >= 2 { include ::aodh::db::mysql } - $rabbit_nodes = hiera('rabbit_node_ips') - if count($rabbit_nodes) > 1 { - - $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false)) - if $rabbit_ipv6 { - $rabbit_env = merge(hiera('rabbitmq_environment'), { - 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"' - }) - } else { - $rabbit_env = hiera('rabbitmq_environment') - } - - class { '::rabbitmq': - config_cluster => true, - cluster_nodes => $rabbit_nodes, - tcp_keepalive => false, - config_kernel_variables => hiera('rabbitmq_kernel_variables'), - config_variables => hiera('rabbitmq_config_variables'), - environment_variables => $rabbit_env, - } - rabbitmq_policy { 'ha-all@/': - pattern => '^(?!amq\.).*', - definition => { - 'ha-mode' => 'all', - }, - } - } else { - include ::rabbitmq - } - # pre-install swift here so we can build rings include ::swift @@ -305,17 +265,6 @@ if hiera('step') >= 4 { metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'), } } else { - include ::neutron::agents::l3 - include ::neutron::agents::dhcp - include ::neutron::agents::metadata - - file { '/etc/neutron/dnsmasq-neutron.conf': - content => hiera('neutron_dnsmasq_options'), - owner => 'neutron', - group => 'neutron', - notify => Service['neutron-dhcp-service'], - require => Package['neutron'], - } # If the value of core plugin is set to 'midonet', # skip all the ML2 configuration @@ -358,23 +307,14 @@ if hiera('step') >= 4 { include ::neutron::plugins::ml2::bigswitch::restproxy include ::neutron::agents::bigswitch } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } Service['neutron-server'] -> Service['neutron-ovs-agent-service'] } - Service['neutron-server'] -> Service['neutron-dhcp-service'] - Service['neutron-server'] -> Service['neutron-l3'] Service['neutron-server'] -> Service['neutron-metadata'] } include ::cinder include ::cinder::config - include ::tripleo::ssl::cinder_config include ::cinder::api include ::cinder::glance include ::cinder::scheduler @@ -510,7 +450,6 @@ if hiera('step') >= 4 { } # swift proxy - include ::memcached include ::swift::proxy include ::swift::proxy::proxy_logging include ::swift::proxy::healthcheck @@ -581,16 +520,6 @@ if hiera('step') >= 4 { include ::aodh::listener include ::aodh::client - # Heat - class { '::heat' : - notification_driver => 'messaging', - } - include ::heat::config - include ::heat::api - include ::heat::api_cfn - include ::heat::api_cloudwatch - include ::heat::engine - # Sahara include ::sahara include ::sahara::service::api @@ -654,7 +583,6 @@ if hiera('step') >= 4 { if hiera('step') >= 5 { $nova_enable_db_purge = hiera('nova_enable_db_purge', true) $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true) - $heat_enable_db_purge = hiera('heat_enable_db_purge', true) if $nova_enable_db_purge { include ::nova::cron::archive_deleted_rows @@ -662,25 +590,6 @@ if hiera('step') >= 5 { if $cinder_enable_db_purge { include ::cinder::cron::db_purge } - if $heat_enable_db_purge { - include ::heat::cron::purge_deleted - } - - if downcase(hiera('bootstrap_nodeid')) == $::hostname { - # Class ::heat::keystone::domain has to run on bootstrap node - # because it creates DB entities via API calls. - include ::heat::keystone::domain - - Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain'] - } else { - # On non-bootstrap node we don't need to create Keystone resources again - class { '::heat::keystone::domain': - manage_domain => false, - manage_user => false, - manage_role => false, - } - } - } #END STEP 5 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')]) diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 0652a1c6..30345694 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -24,7 +24,6 @@ Service <| tag == 'cinder-service' or tag == 'ceilometer-service' or tag == 'gnocchi-service' or - tag == 'heat-service' or tag == 'neutron-service' or tag == 'nova-service' or tag == 'sahara-service' @@ -46,7 +45,7 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) { $sync_db = false } -$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 6 +$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5 $enable_load_balancer = hiera('enable_load_balancer', true) # When to start and enable services which haven't been Pacemakerized @@ -66,18 +65,6 @@ if hiera('step') >= 1 { include ::ntp } - $controller_node_ips = split(hiera('controller_node_ips'), ',') - $controller_node_names = split(downcase(hiera('controller_node_names')), ',') - if $enable_load_balancer { - class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - controller_hosts_names => $controller_node_names, - manage_vip => false, - mysql_clustercheck => true, - haproxy_service_manage => false, - } - } - $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false)) if $corosync_ipv6 { @@ -99,6 +86,10 @@ if hiera('step') >= 1 { if $enable_fencing { include ::tripleo::fencing + # enable stonith after all Pacemaker resources have been created + Pcmk_resource<||> -> Class['tripleo::fencing'] + Pcmk_constraint<||> -> Class['tripleo::fencing'] + Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing'] # enable stonith after all fencing devices have been created Class['tripleo::fencing'] -> Class['pacemaker::stonith'] } @@ -110,35 +101,6 @@ if hiera('step') >= 1 { op_params => 'start timeout=200s stop timeout=200s', } - # Only configure RabbitMQ in this step, don't start it yet to - # avoid races where non-master nodes attempt to start without - # config (eg. binding on 0.0.0.0) - # The module ignores erlang_cookie if cluster_config is false - $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false)) - if $rabbit_ipv6 { - $rabbit_env = merge(hiera('rabbitmq_environment'), { - 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"' - }) - } else { - $rabbit_env = hiera('rabbitmq_environment') - } - - class { '::rabbitmq': - service_manage => false, - tcp_keepalive => false, - config_kernel_variables => hiera('rabbitmq_kernel_variables'), - config_variables => hiera('rabbitmq_config_variables'), - environment_variables => $rabbit_env, - } -> - file { '/var/lib/rabbitmq/.erlang.cookie': - ensure => file, - owner => 'rabbitmq', - group => 'rabbitmq', - mode => '0400', - content => hiera('rabbitmq::erlang_cookie'), - replace => true, - } - if downcase(hiera('ceilometer_backend')) == 'mongodb' { include ::mongodb::globals include ::mongodb::client @@ -147,11 +109,6 @@ if hiera('step') >= 1 { } } - # Memcached - class {'::memcached' : - service_manage => false, - } - # Redis class { '::redis' : service_manage => false, @@ -235,77 +192,12 @@ if hiera('step') >= 2 { if $pacemaker_master { - if $enable_load_balancer { - - include ::pacemaker::resource_defaults - - # Create an openstack-core dummy resource. See RHBZ 1290121 - pacemaker::resource::ocf { 'openstack-core': - ocf_agent_name => 'heartbeat:Dummy', - clone_params => true, - } - # FIXME: we should not have to access tripleo::loadbalancer class - # parameters here to configure pacemaker VIPs. The configuration - # of pacemaker VIPs could move into puppet-tripleo or we should - # make use of less specific hiera parameters here for the settings. - pacemaker::resource::service { 'haproxy': - clone_params => true, - } - - $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_control_vip': - vip_name => 'control', - ip_address => $control_vip, - } - - $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_public_vip': - ensure => $public_vip and $public_vip != $control_vip, - vip_name => 'public', - ip_address => $public_vip, - } - - $redis_vip = hiera('redis_vip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_redis_vip': - ensure => $redis_vip and $redis_vip != $control_vip, - vip_name => 'redis', - ip_address => $redis_vip, - } + include ::pacemaker::resource_defaults - - $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_internal_api_vip': - ensure => $internal_api_vip and $internal_api_vip != $control_vip, - vip_name => 'internal_api', - ip_address => $internal_api_vip, - } - - $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_vip': - ensure => $storage_vip and $storage_vip != $control_vip, - vip_name => 'storage', - ip_address => $storage_vip, - } - - $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') - tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_mgmt_vip': - ensure => $storage_mgmt_vip and $storage_mgmt_vip != $control_vip, - vip_name => 'storage_mgmt', - ip_address => $storage_mgmt_vip, - } - } - - pacemaker::resource::service { $::memcached::params::service_name : - clone_params => 'interleave=true', - require => Class['::memcached'], - } - - pacemaker::resource::ocf { 'rabbitmq': - ocf_agent_name => 'heartbeat:rabbitmq-cluster', - resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'', - clone_params => 'ordered=true interleave=true', - meta_params => 'notify=true', - require => Class['::rabbitmq'], + # Create an openstack-core dummy resource. See RHBZ 1290121 + pacemaker::resource::ocf { 'openstack-core': + ocf_agent_name => 'heartbeat:Dummy', + clone_params => true, } if downcase(hiera('ceilometer_backend')) == 'mongodb' { @@ -345,6 +237,16 @@ if hiera('step') >= 2 { } } + $mysql_root_password = hiera('mysql::server::root_password') + $mysql_clustercheck_password = hiera('mysql_clustercheck_password') + # This step is to create a sysconfig clustercheck file with the root user and empty password + # on the first install only (because later on the clustercheck db user will be used) + # We are using exec and not file in order to not have duplicate definition errors in puppet + # when we later set the the file to contain the clustercheck data + exec { 'create-root-sysconfig-clustercheck': + command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck", + unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck', + } exec { 'galera-ready' : command => '/usr/bin/clustercheck >/dev/null', @@ -352,14 +254,7 @@ if hiera('step') >= 2 { tries => 180, try_sleep => 10, environment => ['AVAILABLE_WHEN_READONLY=0'], - require => File['/etc/sysconfig/clustercheck'], - } - - file { '/etc/sysconfig/clustercheck' : - ensure => file, - content => "MYSQL_USERNAME=root\n -MYSQL_PASSWORD=''\n -MYSQL_HOST=localhost\n", + require => Exec['create-root-sysconfig-clustercheck'], } xinetd::service { 'galera-monitor' : @@ -372,7 +267,24 @@ MYSQL_HOST=localhost\n", service_type => 'UNLISTED', user => 'root', group => 'root', - require => File['/etc/sysconfig/clustercheck'], + require => Exec['create-root-sysconfig-clustercheck'], + } + # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck + # to it in a later step. We do this only on one node as it will replicate on + # the other members. We also make sure that the permissions are the minimum necessary + if $pacemaker_master { + mysql_user { 'clustercheck@localhost': + ensure => 'present', + password_hash => mysql_password($mysql_clustercheck_password), + require => Exec['galera-ready'], + } + mysql_grant { 'clustercheck@localhost/*.*': + ensure => 'present', + options => ['GRANT'], + privileges => ['PROCESS'], + table => '*.*', + user => 'clustercheck@localhost', + } } # Create all the database schemas @@ -389,9 +301,6 @@ MYSQL_HOST=localhost\n", class { '::cinder::db::mysql': require => Exec['galera-ready'], } - class { '::heat::db::mysql': - require => Exec['galera-ready'], - } if downcase(hiera('ceilometer_backend')) == 'mysql' { class { '::ceilometer::db::mysql': @@ -466,6 +375,17 @@ MYSQL_HOST=localhost\n", } #END STEP 2 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { + # At this stage we are guaranteed that the clustercheck db user exists + # so we switch the resource agent to use it. + file { '/etc/sysconfig/clustercheck' : + ensure => file, + mode => '0600', + owner => 'root', + group => 'root', + content => "MYSQL_USERNAME=clustercheck\n +MYSQL_PASSWORD='${mysql_clustercheck_password}'\n +MYSQL_HOST=localhost\n", + } $nova_ipv6 = hiera('nova::use_ipv6', false) if $nova_ipv6 { @@ -588,31 +508,6 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'), } } - if hiera('neutron::enable_dhcp_agent',true) { - class { '::neutron::agents::dhcp' : - manage_service => false, - enabled => false, - } - file { '/etc/neutron/dnsmasq-neutron.conf': - content => hiera('neutron_dnsmasq_options'), - owner => 'neutron', - group => 'neutron', - notify => Service['neutron-dhcp-service'], - require => Package['neutron'], - } - } - if hiera('neutron::enable_l3_agent',true) { - class { '::neutron::agents::l3' : - manage_service => false, - enabled => false, - } - } - if hiera('neutron::enable_metadata_agent',true) { - class { '::neutron::agents::metadata': - manage_service => false, - enabled => false, - } - } include ::neutron::plugins::ml2 class { '::neutron::agents::ml2::ovs': manage_service => false, @@ -644,19 +539,9 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { include ::neutron::plugins::ml2::bigswitch::restproxy include ::neutron::agents::bigswitch } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_config { - 'DEFAULT/notification_driver': value => 'messaging'; - } include ::cinder include ::cinder::config - include ::tripleo::ssl::cinder_config class { '::cinder::api': sync_db => $sync_db, manage_service => false, @@ -897,29 +782,6 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } - # Heat - include ::heat::config - class { '::heat' : - sync_db => $sync_db, - notification_driver => 'messaging', - } - class { '::heat::api' : - manage_service => false, - enabled => false, - } - class { '::heat::api_cfn' : - manage_service => false, - enabled => false, - } - class { '::heat::api_cloudwatch' : - manage_service => false, - enabled => false, - } - class { '::heat::engine' : - manage_service => false, - enabled => false, - } - # httpd/apache and horizon # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent class { '::apache' : @@ -1022,9 +884,31 @@ if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { } #END STEP 4 if hiera('step') >= 5 { + # We now make sure that the root db password is set to a random one + # At first installation /root/.my.cnf will be empty and we connect without a root + # password. On second runs or updates /root/.my.cnf will already be populated + # with proper credentials. This step happens on every node because this sql + # statement does not automatically replicate across nodes. + exec { 'galera-set-root-password': + command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root", + } + file { '/root/.my.cnf' : + ensure => file, + mode => '0600', + owner => 'root', + group => 'root', + content => "[client] +user=root +password=\"${mysql_root_password}\" + +[mysql] +user=root +password=\"${mysql_root_password}\"", + require => Exec['galera-set-root-password'], + } + $nova_enable_db_purge = hiera('nova_enable_db_purge', true) $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true) - $heat_enable_db_purge = hiera('heat_enable_db_purge', true) if $nova_enable_db_purge { include ::nova::cron::archive_deleted_rows @@ -1032,9 +916,6 @@ if hiera('step') >= 5 { if $cinder_enable_db_purge { include ::cinder::cron::db_purge } - if $heat_enable_db_purge { - include ::heat::cron::purge_deleted - } if $pacemaker_master { @@ -1047,15 +928,6 @@ if hiera('step') >= 5 { require => [Pacemaker::Resource::Service[$::apache::params::service_name], Pacemaker::Resource::Ocf['openstack-core']], } - pacemaker::constraint::base { 'memcached-then-openstack-core-constraint': - constraint_type => 'order', - first_resource => 'memcached-clone', - second_resource => 'openstack-core-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service['memcached'], - Pacemaker::Resource::Ocf['openstack-core']], - } pacemaker::constraint::base { 'galera-then-openstack-core-constraint': constraint_type => 'order', first_resource => 'galera-master', @@ -1145,43 +1017,6 @@ if hiera('step') >= 5 { Pacemaker::Resource::Service[$::sahara::params::engine_service_name]], } - if hiera('step') == 5 { - # Neutron - # NOTE(gfidente): Neutron will try to populate the database with some data - # as soon as neutron-server is started; to avoid races we want to make this - # happen only on one node, before normal Pacemaker initialization - # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 - # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec - # will try to start the service while it's already started by Pacemaker - # It would result to a deployment failure since systemd would return 1 to Puppet - # and the overcloud would fail to deploy (6 would be returned). - # This conditional prevents from a race condition during the deployment. - # https://bugzilla.redhat.com/show_bug.cgi?id=1290582 - exec { 'neutron-server-systemd-start-sleep' : - command => 'systemctl start neutron-server && /usr/bin/sleep 5', - path => '/usr/bin', - unless => '/sbin/pcs resource show neutron-server', - } -> - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'] - } - } else { - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'] - } - } - if hiera('neutron::enable_l3_agent', true) { - pacemaker::resource::service { $::neutron::params::l3_agent_service: - clone_params => 'interleave=true', - } - } - if hiera('neutron::enable_dhcp_agent', true) { - pacemaker::resource::service { $::neutron::params::dhcp_agent_service: - clone_params => 'interleave=true', - } - } if hiera('neutron::enable_ovs_agent', true) { pacemaker::resource::service { $::neutron::params::ovs_agent_service: clone_params => 'interleave=true', @@ -1192,11 +1027,6 @@ if hiera('step') >= 5 { clone_params => 'interleave=true', } } - if hiera('neutron::enable_metadata_agent', true) { - pacemaker::resource::service { $::neutron::params::metadata_agent_service: - clone_params => 'interleave=true', - } - } if hiera('neutron::enable_ovs_agent', true) { pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: ocf_agent_name => 'neutron:OVSCleanup', @@ -1241,81 +1071,6 @@ if hiera('step') >= 5 { Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], } } - pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::neutron::params::server_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['openstack-core'], - Pacemaker::Resource::Service[$::neutron::params::server_service]], - } - if hiera('neutron::enable_ovs_agent',true) { - pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::ovs_agent_service}-clone", - second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - } - if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) { - pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], - } - - pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': - source => "${::neutron::params::dhcp_agent_service}-clone", - target => "${::neutron::params::ovs_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - } - if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_l3_agent',true) { - pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::dhcp_agent_service}-clone", - second_resource => "${::neutron::params::l3_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]] - } - pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': - source => "${::neutron::params::l3_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]] - } - } - if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) { - pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::l3_agent_service}-clone", - second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] - } - pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::l3_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] - } - } if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint': @@ -1471,11 +1226,6 @@ if hiera('step') >= 5 { pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : clone_params => 'interleave=true', } - pacemaker::resource::ocf { 'delay' : - ocf_agent_name => 'heartbeat:Delay', - clone_params => 'interleave=true', - resource_params => 'startdelay=10', - } # Fedora doesn't know `require-all` parameter for constraints yet if $::operatingsystem == 'Fedora' { $redis_ceilometer_constraint_params = undef @@ -1547,22 +1297,6 @@ if hiera('step') >= 5 { require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], } - pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::api_service_name}-clone", - second_resource => 'delay-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation': - source => 'delay-clone', - target => "${::ceilometer::params::api_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], - } # Aodh pacemaker::resource::service { $::aodh::params::evaluator_service_name : clone_params => 'interleave=true', @@ -1573,22 +1307,6 @@ if hiera('step') >= 5 { pacemaker::resource::service { $::aodh::params::listener_service_name : clone_params => 'interleave=true', } - pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint': - constraint_type => 'order', - first_resource => 'delay-clone', - second_resource => "${::aodh::params::evaluator_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation': - source => "${::aodh::params::evaluator_service_name}-clone", - target => 'delay-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint': constraint_type => 'order', first_resource => "${::aodh::params::evaluator_service_name}-clone", @@ -1657,77 +1375,6 @@ if hiera('step') >= 5 { Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]], } - # Heat - pacemaker::resource::service { $::heat::params::api_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::heat::params::api_cfn_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::heat::params::engine_service_name : - clone_params => 'interleave=true', - } - pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint': - constraint_type => 'order', - first_resource => "${::heat::params::api_service_name}-clone", - second_resource => "${::heat::params::api_cfn_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], - } - pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation': - source => "${::heat::params::api_cfn_service_name}-clone", - target => "${::heat::params::api_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name], - Pacemaker::Resource::Service[$::heat::params::api_service_name]], - } - pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint': - constraint_type => 'order', - first_resource => "${::heat::params::api_cfn_service_name}-clone", - second_resource => "${::heat::params::api_cloudwatch_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], - } - pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation': - source => "${::heat::params::api_cloudwatch_service_name}-clone", - target => "${::heat::params::api_cfn_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]], - } - pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint': - constraint_type => 'order', - first_resource => "${::heat::params::api_cloudwatch_service_name}-clone", - second_resource => "${::heat::params::engine_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::engine_service_name]], - } - pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation': - source => "${::heat::params::engine_service_name}-clone", - target => "${::heat::params::api_cloudwatch_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::engine_service_name]], - } - pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - second_resource => "${::heat::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]], - } - # Horizon and Keystone pacemaker::resource::service { $::apache::params::service_name: clone_params => 'interleave=true', diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index 3e8784b7..ca50d91d 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -94,5 +94,9 @@ outputs: glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort} glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword} glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]} + glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]} + glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]} + glance::keystone::auth::password: {get_param: GlancePassword } step_config: | include ::tripleo::profile::base::glance::api diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml new file mode 100644 index 00000000..99eb1074 --- /dev/null +++ b/puppet/services/heat-api-cfn.yaml @@ -0,0 +1,46 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat CloudFormation API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number + HeatPassword: + description: The password for the Heat service and db account, used by the Heat services. + type: string + hidden: true + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + +resources: + HeatBase: + type: ./heat-base.yaml + +outputs: + role_data: + description: Role data for the Heat CloudFormation API role. + value: + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - heat::api_cfn::workers: {get_param: HeatWorkers} + heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]} + heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]} + heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]} + heat::keystone::auth_cfn::password: {get_param: HeatPassword} + heat::keystone::auth::region: {get_param: KeystoneRegion} + step_config: | + include ::tripleo::profile::base::heat::api_cfn diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml new file mode 100644 index 00000000..f3d68042 --- /dev/null +++ b/puppet/services/heat-api-cloudwatch.yaml @@ -0,0 +1,33 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat CloudWatch API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number + +resources: + HeatBase: + type: ./heat-base.yaml + +outputs: + role_data: + description: Role data for the Heat Cloudwatch API role. + value: + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - heat::api_cloudwatch::workers: {get_param: HeatWorkers} + step_config: | + include ::tripleo::profile::base::heat::api_cloudwatch diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml new file mode 100644 index 00000000..4fc259ac --- /dev/null +++ b/puppet/services/heat-api.yaml @@ -0,0 +1,46 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number + HeatPassword: + description: The password for the Heat service and db account, used by the Heat services. + type: string + hidden: true + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + +resources: + HeatBase: + type: ./heat-base.yaml + +outputs: + role_data: + description: Role data for the Heat API role. + value: + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - heat::api::workers: {get_param: HeatWorkers} + heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]} + heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]} + heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]} + heat::keystone::auth::password: {get_param: HeatPassword} + heat::keystone::auth::region: {get_param: KeystoneRegion} + step_config: | + include ::tripleo::profile::base::heat::api diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml new file mode 100644 index 00000000..8617df27 --- /dev/null +++ b/puppet/services/heat-base.yaml @@ -0,0 +1,40 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat base service. Shared for all Heat services. + +parameters: + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + +outputs: + role_data: + description: Shared role data for the Heat services. + value: + config_settings: + heat::rabbit_userid: {get_param: RabbitUserName} + heat::rabbit_password: {get_param: RabbitPassword} + heat::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + heat::rabbit_port: {get_param: RabbitClientPort} + heat::debug: {get_param: Debug} + heat::enable_proxy_headers_parsing: true diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml new file mode 100644 index 00000000..143d24bb --- /dev/null +++ b/puppet/services/heat-engine.yaml @@ -0,0 +1,62 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat Engine service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + HeatEnableDBPurge: + type: boolean + default: true + description: | + Whether to create cron job for purging soft deleted rows in the Heat database. + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number + HeatPassword: + description: The password for the Heat service and db account, used by the Heat services. + type: string + hidden: true + HeatStackDomainAdminPassword: + description: Password for heat_stack_domain_admin user. + type: string + hidden: true + +resources: + HeatBase: + type: ./heat-base.yaml + +outputs: + role_data: + description: Role data for the Heat Engine role. + value: + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - heat::engine::num_engine_workers: {get_param: HeatWorkers} + tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge} + heat_dsn: &heat_dsn + list_join: + - '' + - - 'mysql+pymysql://heat:' + - {get_param: HeatPassword} + - '@' + - {get_param: MysqlVirtualIPUri} + - '/heat' + heat::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]} + heat::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + heat::keystone_password: {get_param: HeatPassword} + heat::database_connection: *heat_dsn + heat::db::mysql::password: {get_param: HeatPassword} + heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword} + step_config: | + include ::tripleo::profile::base::heat::engine diff --git a/puppet/services/loadbalancer.yaml b/puppet/services/loadbalancer.yaml new file mode 100644 index 00000000..0c1757bf --- /dev/null +++ b/puppet/services/loadbalancer.yaml @@ -0,0 +1,21 @@ +heat_template_version: 2016-04-08 + +description: > + Loadbalancer service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +outputs: + role_data: + description: Role data for the Loadbalancer role. + value: + step_config: | + include ::tripleo::profile::base::loadbalancer diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml new file mode 100644 index 00000000..1833fbff --- /dev/null +++ b/puppet/services/memcached.yaml @@ -0,0 +1,23 @@ +heat_template_version: 2016-04-08 + +description: > + Memcached service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +outputs: + role_data: + description: Role data for the Memcached role. + value: + config_settings: + step_config: | + include ::tripleo::profile::base::memcached + diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml new file mode 100644 index 00000000..b34bdd22 --- /dev/null +++ b/puppet/services/neutron-base.yaml @@ -0,0 +1,44 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron base service. Shared for all Neutron agents. + +parameters: + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + NeutronDhcpAgentsPerNetwork: + type: number + default: 3 + description: The number of neutron dhcp agents to schedule per network + Debug: + type: string + default: '' + description: Set to True to enable debugging on all services. + +outputs: + role_data: + description: Role data for the Neutron base service. + value: + config_settings: + neutron::rabbit_password: {get_param: RabbitPassword} + neutron::rabbit_user: {get_param: RabbitUserName} + neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + neutron::rabbit_port: {get_param: RabbitClientPort} + neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} + neutron::debug: {get_param: Debug} diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml new file mode 100644 index 00000000..548b4ba0 --- /dev/null +++ b/puppet/services/neutron-dhcp.yaml @@ -0,0 +1,56 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron DHCP agent configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + NeutronEnableIsolatedMetadata: + default: 'False' + description: If True, DHCP provide metadata route to VM. + type: string + NeutronDnsmasqOptions: + default: 'dhcp-option-force=26,%MTU%' + description: > + Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU + to be set to the value of NeutronTenantMtu, which should be set to account + for tunnel overhead. + type: string + NeutronTenantMtu: + description: > + The default MTU for tenant networks. For VXLAN/GRE tunneling, this should + be at least 50 bytes smaller than the MTU on the physical network. This + value will be used to set the MTU on the virtual Ethernet device. + This value will be used to construct the NeutronDnsmasqOptions, since that + will determine the MTU that is assigned to the VM host through DHCP. + default: "1400" + type: string + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron DHCP agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf + tripleo::profile::base::neutron::dhcp: + str_replace: + template: {get_param: NeutronDnsmasqOptions} + params: + '%MTU%': {get_param: NeutronTenantMtu} + neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} + step_config: | + include tripleo::profile::base::neutron::dhcp diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml new file mode 100644 index 00000000..2ea1b19d --- /dev/null +++ b/puppet/services/neutron-l3.yaml @@ -0,0 +1,37 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron L3 agent configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + Debug: + type: string + default: '' + NeutronExternalNetworkBridge: + description: Name of bridge used for external network traffic. + type: string + default: 'br-ex' + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron L3 agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} + step_config: | + include tripleo::profile::base::neutron::l3 diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml new file mode 100644 index 00000000..1fe139f3 --- /dev/null +++ b/puppet/services/neutron-metadata.yaml @@ -0,0 +1,45 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Metadata agent configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + NeutronMetadataProxySharedSecret: + description: Shared secret to prevent spoofing + type: string + hidden: true + NeutronWorkers: + default: 0 + description: Number of workers for Neutron service. + type: number + NeutronPassword: + description: The password for the neutron service and db account, used by neutron agents. + type: string + hidden: true + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron Metadata agent service. + value: + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::metadata::shared_secret: {get_param: NeutronMetadataProxySharedSecret} + neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers} + neutron::agents::metadata::auth_password: {get_param: NeutronPassword} + neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + step_config: | + include tripleo::profile::base::neutron::metadata diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml index 815eb5bf..ad964216 100644 --- a/puppet/services/pacemaker/glance-api.yaml +++ b/puppet/services/pacemaker/glance-api.yaml @@ -56,5 +56,7 @@ outputs: glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype} glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage} glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions} + glance::api::manage_service: false + glance::api::enabled: false step_config: | include ::tripleo::profile::pacemaker::glance diff --git a/puppet/services/pacemaker/glance-registry.yaml b/puppet/services/pacemaker/glance-registry.yaml index 56353459..393fbaaf 100644 --- a/puppet/services/pacemaker/glance-registry.yaml +++ b/puppet/services/pacemaker/glance-registry.yaml @@ -26,7 +26,10 @@ outputs: description: Role data for the Glance role. value: config_settings: - get_attr: [GlanceRegistryBase, role_data, config_settings] + map_merge: + - get_attr: [GlanceRegistryBase, role_data, config_settings] + - glance::registry::manage_service: false + glance::registry::enabled: false # No puppet manifests since glance-registry is included in # ::tripleo::profile::pacemaker::glance which is maintained alongside of # pacemaker/glance-api.yaml. diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml new file mode 100644 index 00000000..ba620f89 --- /dev/null +++ b/puppet/services/pacemaker/heat-api-cfn.yaml @@ -0,0 +1,35 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat CloudFormation API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + HeatApiCfnBase: + type: ../heat-api-cfn.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Heat CloudFormation API role. + value: + config_settings: + map_merge: + - get_attr: [HeatApiCfnBase, role_data, config_settings] + - heat::api_cfn::manage_service: false + heat::api_cfn::enabled: false + step_config: + # No puppet manifests since heat-api-cfn is included in + # ::tripleo::profile::pacemaker::heat which is maintained alongside of + # pacemaker/heat-api.yaml. diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml new file mode 100644 index 00000000..db71891c --- /dev/null +++ b/puppet/services/pacemaker/heat-api-cloudwatch.yaml @@ -0,0 +1,35 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat CloudWatch API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + HeatApiCloudwatchBase: + type: ../heat-api-cloudwatch.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Heat Cloudwatch API role. + value: + config_settings: + map_merge: + - get_attr: [HeatApiCloudwatchBase, role_data, config_settings] + - heat::api_cloudwatch::manage_service: false + heat::api_cloudwatch::enabled: false + step_config: + # No puppet manifests since heat-api-cloudwatch is included in + # ::tripleo::profile::pacemaker::heat which is maintained alongside of + # pacemaker/heat-api.yaml. diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml new file mode 100644 index 00000000..b1c37d41 --- /dev/null +++ b/puppet/services/pacemaker/heat-api.yaml @@ -0,0 +1,33 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + HeatApiBase: + type: ../heat-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Heat API role. + value: + config_settings: + map_merge: + - get_attr: [HeatApiBase, role_data, config_settings] + - heat::api::manage_service: false + heat::api::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::heat diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml new file mode 100644 index 00000000..1e39b363 --- /dev/null +++ b/puppet/services/pacemaker/heat-engine.yaml @@ -0,0 +1,36 @@ +heat_template_version: 2016-04-08 + +description: > + Openstack Heat Engine service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + HeatEngineBase: + type: ../heat-engine.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + + +outputs: + role_data: + description: Role data for the Heat engine role. + value: + config_settings: + map_merge: + - get_attr: [HeatEngineBase, role_data, config_settings] + - heat::engine::manage_service: false + heat::engine::enabled: false + step_config: + # No puppet manifests since heat-engine is included in + # ::tripleo::profile::pacemaker::heat which is maintained alongside of + # pacemaker/heat-api.yaml. diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml index 8fcab15f..db52cae7 100644 --- a/puppet/services/pacemaker/keystone.yaml +++ b/puppet/services/pacemaker/keystone.yaml @@ -28,7 +28,7 @@ outputs: config_settings: map_merge: - get_attr: [KeystoneServiceBase, role_data, config_settings] - #- - # custom keystone hiera goes here if we need it!? + - keystone::manage_service: false + keystone::enabled: false step_config: | include ::tripleo::profile::pacemaker::keystone diff --git a/puppet/services/pacemaker/loadbalancer.yaml b/puppet/services/pacemaker/loadbalancer.yaml new file mode 100644 index 00000000..771b3d9b --- /dev/null +++ b/puppet/services/pacemaker/loadbalancer.yaml @@ -0,0 +1,34 @@ +heat_template_version: 2016-04-08 + +description: > + Loadbalancer service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + LoadbalancerServiceBase: + type: ../loadbalancer.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Loadbalancer pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [LoadbalancerServiceBase, role_data, config_settings] + - tripleo::loadbalancer::haproxy_service_manage: false + tripleo::loadbalancer::mysql_clustercheck: true + tripleo::loadbalancer::manage_vip: false + step_config: | + include ::tripleo::profile::pacemaker::loadbalancer diff --git a/puppet/services/pacemaker/memcached.yaml b/puppet/services/pacemaker/memcached.yaml new file mode 100644 index 00000000..306f805e --- /dev/null +++ b/puppet/services/pacemaker/memcached.yaml @@ -0,0 +1,31 @@ +heat_template_version: 2016-04-08 + +description: > + Mecached service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + + MemcachedServiceBase: + type: ../memcached.yaml + +outputs: + role_data: + description: Role data for the Memcached pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [MemcachedServiceBase, role_data, config_settings] + - memcached::service_manage: false + step_config: | + include ::tripleo::profile::pacemaker::memcached + diff --git a/puppet/services/pacemaker/neutron-dhcp.yaml b/puppet/services/pacemaker/neutron-dhcp.yaml new file mode 100644 index 00000000..0e972b28 --- /dev/null +++ b/puppet/services/pacemaker/neutron-dhcp.yaml @@ -0,0 +1,35 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron DHCP service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + + NeutronDhcpBase: + type: ../neutron-dhcp.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Neutron DHCP role. + value: + config_settings: + map_merge: + - get_attr: [NeutronDhcpBase, role_data, config_settings] + - tripleo::profile::pacemaker::neutron::enable_dhcp: True + neutron::agents::dhcp::enabled: false + neutron::agents::dhcp::manage_service: false + step_config: | + include ::tripleo::profile::pacemaker::neutron::dhcp diff --git a/puppet/services/pacemaker/neutron-l3.yaml b/puppet/services/pacemaker/neutron-l3.yaml new file mode 100644 index 00000000..84bff808 --- /dev/null +++ b/puppet/services/pacemaker/neutron-l3.yaml @@ -0,0 +1,33 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron L3 service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + + NeutronL3Base: + type: ../neutron-l3.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Neutron L3 role. + value: + config_settings: + map_merge: + - get_attr: [NeutronL3Base, role_data, config_settings] + - tripleo::profile::pacemaker::neutron::enable_l3: True + step_config: | + include ::tripleo::profile::pacemaker::neutron::l3 diff --git a/puppet/services/pacemaker/neutron-metadata.yaml b/puppet/services/pacemaker/neutron-metadata.yaml new file mode 100644 index 00000000..79baf1ea --- /dev/null +++ b/puppet/services/pacemaker/neutron-metadata.yaml @@ -0,0 +1,33 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Metadata service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + + NeutronMetadataBase: + type: ../neutron-metadata.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the Neutron Metadata role. + value: + config_settings: + map_merge: + - get_attr: [NeutronMetadataBase, role_data, config_settings] + - tripleo::profile::pacemaker::neutron::enable_metadata: True + step_config: | + include ::tripleo::profile::pacemaker::neutron::metadata diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml new file mode 100644 index 00000000..613db449 --- /dev/null +++ b/puppet/services/pacemaker/rabbitmq.yaml @@ -0,0 +1,32 @@ +heat_template_version: 2016-04-08 + +description: > + RabbitMQ service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + +resources: + RabbitMQServiceBase: + type: ../rabbitmq.yaml + properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + role_data: + description: Role data for the RabbitMQ pacemaker role. + value: + config_settings: + map_merge: + - get_attr: [RabbitMQServiceBase, role_data, config_settings] + - rabbitmq::service_manage: false + step_config: | + include ::tripleo::profile::pacemaker::rabbitmq diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml new file mode 100644 index 00000000..ae5678a3 --- /dev/null +++ b/puppet/services/rabbitmq.yaml @@ -0,0 +1,42 @@ +heat_template_version: 2016-04-08 + +description: > + RabbitMQ service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + type: string + default: '' + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitFDLimit: + default: 16384 + description: Configures RabbitMQ FD limit + type: string + RabbitIPv6: + default: false + description: Enable IPv6 in RabbitMQ + type: boolean + +outputs: + role_data: + description: Role data for the RabbitMQ role. + value: + config_settings: + rabbitmq::file_limit: {get_param: RabbitFDLimit} + rabbitmq::default_user: {get_param: RabbitUserName} + rabbitmq::default_pass: {get_param: RabbitPassword} + rabbit_ipv6: {get_param: RabbitIPv6} + step_config: | + include ::tripleo::profile::base::rabbitmq diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml index eb06b241..b262f947 100644 --- a/puppet/swift-storage-post.yaml +++ b/puppet/swift-storage-post.yaml @@ -52,6 +52,10 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: @@ -65,6 +69,7 @@ resources: servers: {get_param: servers} config: {get_resource: StorageRingbuilderPuppetConfig} input_values: + step: 3 # Note ringbuilder.pp expects >=3 update_identifier: {get_param: NodeConfigIdentifiers} # Note, this should come last, so use depends_on to ensure diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 296428db..3f6f4733 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -220,16 +220,22 @@ resources: properties: ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: @@ -256,10 +262,16 @@ resources: - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - network merge_behavior: deeper datafiles: common: raw_data: {get_file: hieradata/common.yaml} + network: + mapped_data: + net_ip_map: {get_attr: [NetIpMap, net_ip_map]} + net_ip_subnet_map: {get_attr: [NetIpMap, net_ip_subnet_map]} + net_ip_uri_map: {get_attr: [NetIpMap, net_ip_uri_map]} object_extraconfig: mapped_data: {get_param: ObjectStorageExtraConfig} extraconfig: |