diff options
Diffstat (limited to 'puppet')
-rw-r--r-- | puppet/all-nodes-config.yaml | 171 | ||||
-rw-r--r-- | puppet/bootstrap-config.yaml | 1 | ||||
-rw-r--r-- | puppet/ceph-storage-puppet.yaml | 42 | ||||
-rw-r--r-- | puppet/cinder-storage-puppet.yaml | 70 | ||||
-rw-r--r-- | puppet/compute-puppet.yaml | 48 | ||||
-rw-r--r-- | puppet/controller-puppet.yaml | 269 | ||||
-rw-r--r-- | puppet/hieradata/common.yaml | 10 | ||||
-rw-r--r-- | puppet/hieradata/compute.yaml | 2 | ||||
-rw-r--r-- | puppet/hieradata/controller.yaml | 5 | ||||
-rw-r--r-- | puppet/manifests/overcloud_compute.pp | 1 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller.pp | 57 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller_pacemaker.pp | 760 | ||||
-rw-r--r-- | puppet/manifests/overcloud_volume.pp | 1 | ||||
-rw-r--r-- | puppet/swift-storage-puppet.yaml | 49 | ||||
-rw-r--r-- | puppet/vip-config.yaml | 41 |
15 files changed, 1271 insertions, 256 deletions
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index 963835e9..c50d6820 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -16,6 +16,40 @@ parameters: type: comma_delimited_list controller_names: type: comma_delimited_list + rabbit_node_ips: + type: comma_delimited_list + mongo_node_ips: + type: comma_delimited_list + redis_node_ips: + type: comma_delimited_list + memcache_node_ips: + type: comma_delimited_list + mysql_node_ips: + type: comma_delimited_list + horizon_node_ips: + type: comma_delimited_list + heat_api_node_ips: + type: comma_delimited_list + swift_proxy_node_ips: + type: comma_delimited_list + ceilometer_api_node_ips: + type: comma_delimited_list + nova_api_node_ips: + type: comma_delimited_list + nova_metadata_node_ips: + type: comma_delimited_list + glance_api_node_ips: + type: comma_delimited_list + glance_registry_node_ips: + type: comma_delimited_list + cinder_api_node_ips: + type: comma_delimited_list + neutron_api_node_ips: + type: comma_delimited_list + keystone_public_api_node_ips: + type: comma_delimited_list + keystone_admin_api_node_ips: + type: comma_delimited_list resources: @@ -65,19 +99,136 @@ resources: SERVERS_LIST: list_join: - "','" - - {get_param: controller_ips} + - {get_param: rabbit_node_ips} mongo_node_ips: - list_join: - - ',' - - {get_param: controller_ips} + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: mongo_node_ips} redis_node_ips: - list_join: - - ',' - - {get_param: controller_ips} + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: redis_node_ips} memcache_node_ips: - list_join: - - ',' - - {get_param: controller_ips} + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: memcache_node_ips} + mysql_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: mysql_node_ips} + horizon_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: horizon_node_ips} + heat_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: heat_api_node_ips} + swift_proxy_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: swift_proxy_node_ips} + ceilometer_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: ceilometer_api_node_ips} + nova_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: nova_api_node_ips} + nova_metadata_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: nova_metadata_node_ips} + glance_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: glance_api_node_ips} + glance_registry_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: glance_registry_node_ips} + cinder_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: cinder_api_node_ips} + neutron_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: neutron_api_node_ips} + keystone_public_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: keystone_public_api_node_ips} + keystone_admin_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: keystone_admin_api_node_ips} + # NOTE(gfidente): interpolation with %{} in the # hieradata file can't be used as it returns string ceilometer::rabbit_hosts: *rabbit_nodes_array diff --git a/puppet/bootstrap-config.yaml b/puppet/bootstrap-config.yaml index c88ed408..d88eebdf 100644 --- a/puppet/bootstrap-config.yaml +++ b/puppet/bootstrap-config.yaml @@ -12,6 +12,7 @@ resources: BootstrapNodeConfigImpl: type: OS::Heat::StructuredConfig properties: + group: os-apply-config config: hiera: datafiles: diff --git a/puppet/ceph-storage-puppet.yaml b/puppet/ceph-storage-puppet.yaml index 00dbca01..2250f429 100644 --- a/puppet/ceph-storage-puppet.yaml +++ b/puppet/ceph-storage-puppet.yaml @@ -28,6 +28,20 @@ parameters: default: 'false' description: Set to true to enable package installation via Puppet type: boolean + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. + type: json + UpdateIdentifier: + default: '' + type: string + description: > + Setting to a previously unused value during stack-update will trigger + package update on all nodes + Hostname: + type: string + default: '' # Defaults to Heat created hostname resources: CephStorage: @@ -41,6 +55,7 @@ resources: - network: ctlplane user_data_format: SOFTWARE_CONFIG user_data: {get_resource: NodeUserData} + name: {get_param: Hostname} NodeUserData: type: OS::TripleO::NodeUserData @@ -61,6 +76,12 @@ resources: StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + NetIpSubnetMap: + type: OS::TripleO::Network::Ports::NetIpMap + properties: + StorageIp: {get_attr: [StoragePort, ip_subnet]} + StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_subnet]} + NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: @@ -69,6 +90,7 @@ resources: CephStorageDeployment: type: OS::Heat::StructuredDeployment + depends_on: NetworkDeployment properties: config: {get_resource: CephStorageConfig} server: {get_resource: CephStorage} @@ -79,6 +101,8 @@ resources: params: server: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} + ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} + ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} CephStorageConfig: type: OS::Heat::StructuredConfig @@ -88,7 +112,6 @@ resources: hiera: hierarchy: - heat_config_%{::deploy_config_name} - - cephstorage - ceph_cluster # provided by CephClusterConfig - ceph - '"%{::osfamily}"' @@ -98,16 +121,29 @@ resources: raw_data: {get_file: hieradata/common.yaml} ceph: raw_data: {get_file: hieradata/ceph.yaml} - cephstorage: mapped_data: ntp::servers: {get_input: ntp_servers} enable_package_install: {get_input: enable_package_install} + ceph::profile::params::cluster_network: {get_input: ceph_cluster_network} + ceph::profile::params::public_network: {get_input: ceph_public_network} + + UpdateConfig: + type: OS::TripleO::Tasks::PackageUpdate + + UpdateDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: UpdateConfig} + server: {get_resource: CephStorage} + input_values: + update_identifier: + get_param: UpdateIdentifier outputs: hosts_entry: value: str_replace: - template: "IP HOST" + template: "IP HOST.localdomain HOST" params: IP: {get_attr: [CephStorage, networks, ctlplane, 0]} HOST: {get_attr: [CephStorage, name]} diff --git a/puppet/cinder-storage-puppet.yaml b/puppet/cinder-storage-puppet.yaml index c69a0f3c..a368ffd1 100644 --- a/puppet/cinder-storage-puppet.yaml +++ b/puppet/cinder-storage-puppet.yaml @@ -16,11 +16,16 @@ parameters: default: 5000 description: The size of the loopback file used by the cinder LVM driver. type: number + CinderPassword: + default: unset + description: The password for the cinder service and db account, used by cinder-api. + type: string + hidden: true Debug: default: '' description: Set to True to enable debugging on all services. type: string - VirtualIP: + VirtualIP: # deprecated. Use per service VIPs instead. default: '' type: string ExtraConfig: @@ -70,6 +75,10 @@ parameters: default: "9292" description: Glance port. type: string + GlanceProtocol: + default: http + description: Protocol to use when connecting to glance, set to https for SSL. + type: string KeyName: default: default description: Name of an existing EC2 KeyPair to enable SSH access to the instances @@ -106,6 +115,26 @@ parameters: default: 'false' description: Set to true to enable package installation via Puppet type: boolean + UpdateIdentifier: + default: '' + type: string + description: > + Setting to a previously unused value during stack-update will trigger + package update on all nodes + Hostname: + type: string + default: '' # Defaults to Heat created hostname + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. + type: json + GlanceApiVirtualIP: + type: string + default: '' + MysqlVirtualIP: + type: string + default: '' resources: BlockStorage: @@ -119,6 +148,7 @@ resources: - network: ctlplane user_data_format: SOFTWARE_CONFIG user_data: {get_resource: NodeUserData} + name: {get_param: Hostname} NodeUserData: type: OS::TripleO::NodeUserData @@ -145,6 +175,13 @@ resources: StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + NetIpMap: + type: OS::TripleO::Network::Ports::NetIpMap + properties: + InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + StorageIp: {get_attr: [StoragePort, ip_address]} + StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: @@ -153,12 +190,13 @@ resources: BlockStorageDeployment: type: OS::Heat::StructuredDeployment + depends_on: NetworkDeployment properties: server: {get_resource: BlockStorage} config: {get_resource: BlockStorageConfig} input_values: debug: {get_param: Debug} - cinder_dsn: {list_join: ['', ['mysql://cinder:unset@', {get_param: VirtualIP} , '/cinder']]} + cinder_dsn: {list_join: ['', ['mysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIP} , '/cinder']]} snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} cinder_lvm_loop_device_size: @@ -168,6 +206,15 @@ resources: size: {get_param: CinderLVMLoopDeviceSize} cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} cinder_iscsi_helper: {get_param: CinderISCSIHelper} + cinder_iscsi_ip_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]} + glance_api_servers: + list_join: + - '' + - - {get_param: GlanceProtocol} + - '://' + - {get_param: GlanceApiVirtualIP} + - ':' + - {get_param: GlancePort} rabbit_username: {get_param: RabbitUserName} rabbit_password: {get_param: RabbitPassword} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} @@ -178,7 +225,6 @@ resources: params: server: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} - signal_transport: NO_SIGNAL # Map heat metadata into hiera datafiles BlockStorageConfig: @@ -198,8 +244,6 @@ resources: raw_data: {get_file: hieradata/common.yaml} volume: raw_data: {get_file: hieradata/volume.yaml} - oac_data: - cinder_iscsi_ip_address: local-ipv4 mapped_data: # Cinder cinder::debug: {get_input: debug} @@ -211,16 +255,30 @@ resources: cinder::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} cinder::rabbit_port: {get_input: rabbit_client_port} cinder_enable_iscsi_backend: {get_input: cinder_enable_iscsi_backend} + cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address} + cinder::glance::glance_api_servers: {get_input: glance_api_servers} ntp::servers: {get_input: ntp_servers} enable_package_install: {get_input: enable_package_install} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} + UpdateConfig: + type: OS::TripleO::Tasks::PackageUpdate + + UpdateDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: UpdateConfig} + server: {get_resource: BlockStorage} + input_values: + update_identifier: + get_param: UpdateIdentifier + outputs: hosts_entry: value: str_replace: - template: "IP HOST" + template: "IP HOST.localdomain HOST" params: IP: {get_attr: [BlockStorage, networks, ctlplane, 0]} HOST: {get_attr: [BlockStorage, name]} diff --git a/puppet/compute-puppet.yaml b/puppet/compute-puppet.yaml index 28a4e045..b34e7a6f 100644 --- a/puppet/compute-puppet.yaml +++ b/puppet/compute-puppet.yaml @@ -132,7 +132,7 @@ parameters: The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the Neutron documentation for permitted values. Defaults to permitting any VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). - type: string + type: comma_delimited_list NeutronPassword: default: unset description: The password for the neutron service account, used by neutron agents. @@ -252,6 +252,15 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json + UpdateIdentifier: + default: '' + type: string + description: > + Setting to a previously unused value during stack-update will trigger + package update on all nodes + Hostname: + type: string + default: '' # Defaults to Heat created hostname resources: @@ -268,6 +277,7 @@ resources: - network: ctlplane user_data_format: SOFTWARE_CONFIG user_data: {get_resource: NodeUserData} + name: {get_param: Hostname} NodeUserData: type: OS::TripleO::NodeUserData @@ -304,7 +314,6 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: - signal_transport: NO_SIGNAL config: {get_resource: NetworkConfig} server: {get_resource: NovaCompute} input_values: @@ -332,8 +341,6 @@ resources: raw_data: {get_file: hieradata/ceph.yaml} compute: raw_data: {get_file: hieradata/compute.yaml} - oac_data: - nova::compute::vncserver_proxyclient_address: local-ipv4 mapped_data: nova::debug: {get_input: debug} nova::rabbit_userid: {get_input: rabbit_username} @@ -346,6 +353,7 @@ resources: nova::compute::vncproxy_host: {get_input: nova_public_ip} nova_enable_rbd_backend: {get_input: nova_enable_rbd_backend} nova_password: {get_input: nova_password} + nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address} ceilometer::debug: {get_input: debug} ceilometer::rabbit_userid: {get_input: rabbit_username} ceilometer::rabbit_password: {get_input: rabbit_password} @@ -365,11 +373,11 @@ resources: neutron::rabbit_port: {get_input: rabbit_client_port} neutron_flat_networks: {get_input: neutron_flat_networks} neutron_host: {get_input: neutron_host} - neutron::agents::ml2::ovs::local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronLocalIp]}]} + neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} neutron_tenant_network_type: {get_input: neutron_tenant_network_type} neutron_tunnel_types: {get_input: neutron_tunnel_types} - neutron::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} + neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron_bridge_mappings: {get_input: neutron_bridge_mappings} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron_physical_bridge: {get_input: neutron_physical_bridge} @@ -388,8 +396,8 @@ resources: NovaComputeDeployment: type: OS::TripleO::SoftwareDeployment + depends_on: NetworkDeployment properties: - signal_transport: NO_SIGNAL config: {get_resource: NovaComputeConfig} server: {get_resource: NovaCompute} input_values: @@ -400,6 +408,7 @@ resources: nova_api_host: {get_param: NovaApiHost} nova_password: {get_param: NovaPassword} nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend} + nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} ceilometer_password: {get_param: CeilometerPassword} ceilometer_compute_agent: {get_param: CeilometerComputeAgent} @@ -421,10 +430,17 @@ resources: - {get_param: GlancePort} neutron_flat_networks: {get_param: NeutronFlatNetworks} neutron_host: {get_param: NeutronHost} - neutron_local_ip: {get_attr: [NovaCompute, networks, ctlplane, 0]} + neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} neutron_tenant_network_type: {get_param: NeutronNetworkType} neutron_tunnel_types: {get_param: NeutronTunnelTypes} - neutron_network_vlan_ranges: {get_param: NeutronNetworkVLANRanges} + neutron_network_vlan_ranges: + str_replace: + template: "['RANGES']" + params: + RANGES: + list_join: + - "','" + - {get_param: NeutronNetworkVLANRanges} neutron_bridge_mappings: {get_param: NeutronBridgeMappings} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_physical_bridge: {get_param: NeutronPhysicalBridge} @@ -459,6 +475,18 @@ resources: server: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} + UpdateConfig: + type: OS::TripleO::Tasks::PackageUpdate + + UpdateDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: UpdateConfig} + server: {get_resource: NovaCompute} + input_values: + update_identifier: + get_param: UpdateIdentifier + outputs: ip_address: description: IP address of the server in the ctlplane network @@ -480,7 +508,7 @@ outputs: Server's IP address and hostname in the /etc/hosts format value: str_replace: - template: "IP HOST" + template: "IP HOST.localdomain HOST" params: IP: {get_attr: [NovaCompute, networks, ctlplane, 0]} HOST: {get_attr: [NovaCompute, name]} diff --git a/puppet/controller-puppet.yaml b/puppet/controller-puppet.yaml index b012b4f4..3d7ecd58 100644 --- a/puppet/controller-puppet.yaml +++ b/puppet/controller-puppet.yaml @@ -11,7 +11,7 @@ parameters: hidden: true AdminToken: default: unset - description: The keystone auth secret. + description: The keystone auth secret and db password. type: string hidden: true CeilometerBackend: @@ -25,7 +25,7 @@ parameters: hidden: true CeilometerPassword: default: unset - description: The password for the ceilometer service account. + description: The password for the ceilometer service and db account. type: string hidden: true CinderEnableIscsiBackend: @@ -46,9 +46,14 @@ parameters: type: number CinderPassword: default: unset - description: The password for the cinder service account, used by cinder-api. + description: The password for the cinder service and db account, used by cinder-api. type: string hidden: true + CinderBackendConfig: + default: {} + description: Contains parameters to configure Cinder backends. Typically + set via parameter_defaults in the resource registry. + type: json CloudName: default: '' description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org @@ -71,11 +76,6 @@ parameters: default: true description: Whether to use Galera instead of regular MariaDB. type: boolean - EnablePacemaker: - default: false - description: If enabled services will be monitored by Pacemaker; it - will manage VIPs as well, in place of Keepalived. - type: boolean EnableCephStorage: default: false description: Whether to deploy Ceph Storage (OSD) on the Controller @@ -137,7 +137,7 @@ parameters: default: '' GlancePassword: default: unset - description: The password for the glance service account, used by the glance services. + description: The password for the glance service and db account, used by the glance services. type: string hidden: true GlancePort: @@ -157,7 +157,7 @@ parameters: - allowed_values: ['swift', 'file', 'rbd'] HeatPassword: default: unset - description: The password for the Heat service account, used by the Heat services. + description: The password for the Heat service and db account, used by the Heat services. type: string hidden: true HeatStackDomainAdminPassword: @@ -244,7 +244,7 @@ parameters: default: 'dvr_snat' description: Agent mode for the neutron-l3-agent on the controller hosts type: string - NeutronL3HA: #FIXME this isn't wired in + NeutronL3HA: default: 'False' description: Whether to enable l3-agent HA type: string @@ -287,10 +287,10 @@ parameters: The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the Neutron documentation for permitted values. Defaults to permitting any VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). - type: string + type: comma_delimited_list NeutronPassword: default: unset - description: The password for the neutron service account, used by neutron agents. + description: The password for the neutron service and db account, used by neutron agents. type: string hidden: true NeutronPublicInterface: @@ -327,7 +327,7 @@ parameters: type: string NovaPassword: default: unset - description: The password for the nova service account, used by nova-api. + description: The password for the nova service and db account, used by nova-api. type: string hidden: true NtpServer: @@ -342,7 +342,7 @@ parameters: Specifies the interface where the public-facing virtual ip will be assigned. This should be int_public when a VLAN is being used. type: string - PublicVirtualIP: + PublicVirtualIP: # DEPRECATED: use per service settings instead type: string default: '' # Has to be here because of the ignored empty value bug RabbitCookie: @@ -368,6 +368,9 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + RedisVirtualIP: + type: string + default: '' # Has to be here because of the ignored empty value bug SnmpdReadonlyUserName: default: ro_snmp_user description: The user name for SNMPd with readonly rights running on all Overcloud nodes @@ -419,9 +422,24 @@ parameters: type: number default: 3 description: How many replicas to use in the swift rings. - VirtualIP: + VirtualIP: # DEPRECATED: use per service settings instead type: string default: '' # Has to be here because of the ignored empty value bug + HeatApiVirtualIP: + type: string + default: '' + GlanceApiVirtualIP: + type: string + default: '' + MysqlVirtualIP: + type: string + default: '' + KeystonePublicApiVirtualIP: + type: string + default: '' + NeutronApiVirtualIP: + type: string + default: '' EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -431,6 +449,15 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json + UpdateIdentifier: + default: '' + type: string + description: > + Setting to a previously unused value during stack-update will trigger + package update on all nodes + Hostname: + type: string + default: '' # Defaults to Heat created hostname resources: @@ -445,6 +472,7 @@ resources: - network: ctlplane user_data_format: SOFTWARE_CONFIG user_data: {get_resource: NodeUserData} + name: {get_param: Hostname} NodeUserData: type: OS::TripleO::NodeUserData @@ -483,6 +511,15 @@ resources: StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} TenantIp: {get_attr: [TenantPort, ip_address]} + NetIpSubnetMap: + type: OS::TripleO::Network::Ports::NetIpMap + properties: + ExternalIp: {get_attr: [ExternalPort, ip_subnet]} + InternalApiIp: {get_attr: [InternalApiPort, ip_subnet]} + StorageIp: {get_attr: [StoragePort, ip_subnet]} + StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIp: {get_attr: [TenantPort, ip_subnet]} + NetworkConfig: type: OS::TripleO::Controller::Net::SoftwareConfig properties: @@ -495,7 +532,6 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: - signal_transport: NO_SIGNAL config: {get_resource: NetworkConfig} server: {get_resource: Controller} input_values: @@ -504,32 +540,30 @@ resources: ControllerDeployment: type: OS::TripleO::SoftwareDeployment + depends_on: NetworkDeployment properties: - signal_transport: NO_SIGNAL config: {get_resource: ControllerConfig} server: {get_resource: Controller} input_values: bootstack_nodeid: {get_attr: [Controller, name]} - controller_host: {get_attr: [Controller, networks, ctlplane, 0]} - controller_virtual_ip: {get_param: VirtualIP} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} heat.watch_server_url: list_join: - '' - - 'http://' - - {get_param: VirtualIP} + - {get_param: HeatApiVirtualIP} - ':8003' heat.metadata_server_url: list_join: - '' - - 'http://' - - {get_param: VirtualIP} + - {get_param: HeatApiVirtualIP} - ':8000' heat.waitcondition_server_url: list_join: - '' - - 'http://' - - {get_param: VirtualIP} + - {get_param: HeatApiVirtualIP} - ':8000/v1/waitcondition' heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey} horizon_secret: {get_param: HorizonSecret} @@ -542,33 +576,39 @@ resources: cinder_password: {get_param: CinderPassword} cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} cinder_iscsi_helper: {get_param: CinderISCSIHelper} + cinder_backend_config: {get_param: CinderBackendConfig} cinder_dsn: list_join: - '' - - - 'mysql://cinder:unset@' - - {get_param: VirtualIP} + - - 'mysql://cinder:' + - {get_param: CinderPassword} + - '@' + - {get_param: MysqlVirtualIP} - '/cinder' glance_port: {get_param: GlancePort} - glance_protocol: {get_param: GlanceProtocol} glance_password: {get_param: GlancePassword} glance_backend: {get_param: GlanceBackend} - glance_swift_store_auth_address: {list_join: ['', ['http://', {get_param: VirtualIP} , ':5000/v2.0']]} glance_notifier_strategy: {get_param: GlanceNotifierStrategy} glance_log_file: {get_param: GlanceLogFile} glance_dsn: list_join: - '' - - - 'mysql://glance:unset@' - - {get_param: VirtualIP} + - - 'mysql://glance:' + - {get_param: GlancePassword} + - '@' + - {get_param: MysqlVirtualIP} - '/glance' heat_password: {get_param: HeatPassword} heat_stack_domain_admin_password: {get_param: HeatStackDomainAdminPassword} heat_dsn: list_join: - '' - - - 'mysql://heat:unset@' - - {get_param: VirtualIP} + - - 'mysql://heat:' + - {get_param: HeatPassword} + - '@' + - {get_param: MysqlVirtualIP} - '/heat' + keystone_auth_address: {list_join: ['', ['http://', {get_param: KeystonePublicApiVirtualIP} , ':5000/v2.0']]} keystone_ca_certificate: {get_param: KeystoneCACertificate} keystone_signing_key: {get_param: KeystoneSigningKey} keystone_signing_certificate: {get_param: KeystoneSigningCertificate} @@ -577,20 +617,22 @@ resources: keystone_dsn: list_join: - '' - - - 'mysql://keystone:unset@' - - {get_param: VirtualIP} + - - 'mysql://keystone:' + - {get_param: AdminToken} + - '@' + - {get_param: MysqlVirtualIP} - '/keystone' keystone_identity_uri: list_join: - '' - - 'http://' - - {get_param: VirtualIP} + - {get_param: KeystonePublicApiVirtualIP} - ':35357/' keystone_auth_uri: list_join: - '' - - 'http://' - - {get_param: VirtualIP} + - {get_param: KeystonePublicApiVirtualIP} - ':5000/v2.0/' enable_galera: {get_param: EnableGalera} enable_ceph_storage: {get_param: EnableCephStorage} @@ -609,7 +651,14 @@ resources: neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron_l3_ha: {get_param: NeutronL3HA} - neutron_network_vlan_ranges: {get_param: NeutronNetworkVLANRanges} + neutron_network_vlan_ranges: + str_replace: + template: "['RANGES']" + params: + RANGES: + list_join: + - "','" + - {get_param: NeutronNetworkVLANRanges} neutron_bridge_mappings: {get_param: NeutronBridgeMappings} neutron_public_interface: {get_param: NeutronPublicInterface} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} @@ -622,29 +671,37 @@ resources: neutron_dsn: list_join: - '' - - - 'mysql://neutron:unset@' - - {get_param: VirtualIP} + - - 'mysql://neutron:' + - {get_param: NeutronPassword} + - '@' + - {get_param: MysqlVirtualIP} - '/ovs_neutron?charset=utf8' neutron_url: list_join: - '' - - 'http://' - - {get_param: VirtualIP} + - {get_param: NeutronApiVirtualIP} - ':9696' neutron_admin_auth_url: list_join: - '' - - 'http://' - - {get_param: VirtualIP} + - {get_param: KeystonePublicApiVirtualIP} - ':35357/v2.0' ceilometer_backend: {get_param: CeilometerBackend} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} ceilometer_password: {get_param: CeilometerPassword} + ceilometer_coordination_url: + list_join: + - '' + - - 'redis://' + - {get_param: RedisVirtualIP} + - ':6379' ceilometer_dsn: list_join: - '' - - 'mysql://ceilometer:unset@' - - {get_param: VirtualIP} + - {get_param: MysqlVirtualIP} - '/ceilometer' snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} @@ -652,8 +709,10 @@ resources: nova_dsn: list_join: - '' - - - 'mysql://nova:unset@' - - {get_param: VirtualIP} + - - 'mysql://nova:' + - {get_param: NovaPassword} + - '@' + - {get_param: MysqlVirtualIP} - '/nova' pcsd_password: {get_param: PcsdPassword} rabbit_username: {get_param: RabbitUserName} @@ -668,7 +727,6 @@ resources: server: {get_param: NtpServer} control_virtual_interface: {get_param: ControlVirtualInterface} public_virtual_interface: {get_param: PublicVirtualInterface} - public_virtual_ip: {get_param: PublicVirtualIP} swift_hash_suffix: {get_param: SwiftHashSuffix} swift_password: {get_param: SwiftPassword} swift_part_power: {get_param: SwiftPartPower} @@ -676,6 +734,38 @@ resources: swift_min_part_hours: {get_param: SwiftMinPartHours} swift_mount_check: {get_param: SwiftMountCheck} enable_package_install: {get_param: EnablePackageInstall} + swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} + swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} + cinder_iscsi_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]} + cinder_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} + glance_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} + glance_registry_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} + glance_api_servers: + list_join: + - '' + - - {get_param: GlanceProtocol} + - '://' + - {get_param: GlanceApiVirtualIP} + - ':' + - {get_param: GlancePort} + heat_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} + keystone_public_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} + keystone_admin_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} + mongo_db_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MongoDbNetwork]}]} + neutron_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} + neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} + ceilometer_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} + nova_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} + nova_metadata_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]} + horizon_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]} + rabbitmq_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]} + redis_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]} + redis_vip: {get_param: RedisVirtualIP} + memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} + mysql_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} + ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} + ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + ceph_public_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} # Map heat metadata into hiera datafiles ControllerConfig: @@ -693,6 +783,7 @@ resources: - ceph - bootstrap_node # provided by BootstrapNodeConfig - all_nodes # provided by allNodesConfig + - vip_data # provided by vip-config - '"%{::osfamily}"' - common datafiles: @@ -700,22 +791,25 @@ resources: raw_data: {get_file: hieradata/common.yaml} ceph: raw_data: {get_file: hieradata/ceph.yaml} + mapped_data: + ceph::profile::params::cluster_network: {get_input: ceph_cluster_network} + ceph::profile::params::public_network: {get_input: ceph_public_network} + ceph::mon::public_addr: {get_input: ceph_public_ip} object: raw_data: {get_file: hieradata/object.yaml} controller: raw_data: {get_file: hieradata/controller.yaml} mapped_data: # data supplied directly to this deployment configuration, etc bootstack_nodeid: {get_input: bootstack_nodeid} - controller_host: {get_input: controller_host} #local-ipv4 # Pacemaker hacluster_pwd: {get_input: pcsd_password} # Swift - swift::proxy::proxy_local_net_ip: {get_input: controller_host} + swift::proxy::proxy_local_net_ip: {get_input: swift_proxy_network} swift::proxy::authtoken::auth_uri: {get_input: keystone_auth_uri} swift::proxy::authtoken::identity_uri: {get_input: keystone_identity_uri} - swift::storage::all::storage_local_net_ip: {get_input: controller_host} + swift::storage::all::storage_local_net_ip: {get_input: swift_management_network} swift::swift_hash_suffix: {get_input: swift_hash_suffix} swift::proxy::authtoken::admin_password: {get_input: swift_password} tripleo::ringbuilder::part_power: {get_input: swift_part_power} @@ -731,41 +825,40 @@ resources: cinder_enable_rbd_backend: {get_input: cinder_enable_rbd_backend} cinder_lvm_loop_device_size: {get_input: cinder_lvm_loop_device_size} cinder_iscsi_helper: {get_input: cinder_iscsi_helper} - cinder_iscsi_ip_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]} + cinder_iscsi_ip_address: {get_input: cinder_iscsi_network} cinder::database_connection: {get_input: cinder_dsn} cinder::api::keystone_password: {get_input: cinder_password} cinder::api::auth_uri: {get_input: keystone_auth_uri} cinder::api::identity_uri: {get_input: keystone_identity_uri} - cinder::api::bind_host: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} + cinder::api::bind_host: {get_input: cinder_api_network} cinder::rabbit_userid: {get_input: rabbit_username} cinder::rabbit_password: {get_input: rabbit_password} cinder::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} cinder::rabbit_port: {get_input: rabbit_client_port} cinder::debug: {get_input: debug} cinder_enable_iscsi_backend: {get_input: cinder_enable_iscsi_backend} + cinder::glance::glance_api_servers: {get_input: glance_api_servers} + cinder_backend_config: {get_input: CinderBackendConfig} # Glance glance::api::bind_port: {get_input: glance_port} - glance::api::bind_host: {get_input: controller_host} + glance::api::bind_host: {get_input: glance_api_network} glance::api::auth_uri: {get_input: keystone_auth_uri} glance::api::identity_uri: {get_input: keystone_identity_uri} - glance::api::registry_host: {get_input: controller_host} + glance::api::registry_host: {get_input: glance_registry_network} glance::api::keystone_password: {get_input: glance_password} glance::api::debug: {get_input: debug} - # used to construct glance_api_servers - glance_port: {get_input: glance_port} - glance_protocol: {get_input: glance_protocol} glance_notifier_strategy: {get_input: glance_notifier_strategy} glance_log_file: {get_input: glance_log_file} glance_log_file: {get_input: glance_log_file} glance::api::database_connection: {get_input: glance_dsn} glance::registry::keystone_password: {get_input: glance_password} glance::registry::database_connection: {get_input: glance_dsn} - glance::registry::bind_host: {get_input: controller_host} + glance::registry::bind_host: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} glance::registry::auth_uri: {get_input: keystone_auth_uri} glance::registry::identity_uri: {get_input: keystone_identity_uri} glance::registry::debug: {get_input: debug} - glance::backend::swift::swift_store_auth_address: {get_input: glance_swift_store_auth_address} + glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_address} glance::backend::swift::swift_store_user: service:glance glance::backend::swift::swift_store_key: {get_input: glance_password} glance_backend: {get_input: glance_backend} @@ -783,9 +876,9 @@ resources: heat::auth_uri: {get_input: keystone_auth_uri} heat::identity_uri: {get_input: keystone_identity_uri} heat::keystone_password: {get_input: heat_password} - heat::api::bind_host: {get_input: controller_host} - heat::api_cloudwatch::bind_host: {get_input: controller_host} - heat::api_cfn::bind_host: {get_input: controller_host} + heat::api::bind_host: {get_input: heat_api_network} + heat::api_cloudwatch::bind_host: {get_input: heat_api_network} + heat::api_cfn::bind_host: {get_input: heat_api_network} heat::database_connection: {get_input: heat_dsn} heat::instance_user: heat-admin heat::debug: {get_input: debug} @@ -798,11 +891,11 @@ resources: keystone_ssl_certificate: {get_input: keystone_ssl_certificate} keystone_ssl_certificate_key: {get_input: keystone_ssl_certificate_key} keystone::database_connection: {get_input: keystone_dsn} - keystone::public_bind_host: {get_input: controller_host} - keystone::admin_bind_host: {get_input: controller_host} + keystone::public_bind_host: {get_input: keystone_public_api_network} + keystone::admin_bind_host: {get_input: keystone_admin_api_network} keystone::debug: {get_input: debug} # MongoDB - mongodb::server::bind_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MongoDbNetwork]}]} + mongodb::server::bind_ip: {get_input: mongo_db_network} # MySQL admin_password: {get_input: admin_password} enable_galera: {get_input: enable_galera} @@ -811,9 +904,10 @@ resources: mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size} mysql::server::root_password: {get_input: mysql_root_password} mysql_cluster_name: {get_input: mysql_cluster_name} + mysql_bind_host: {get_input: mysql_network} # Neutron - neutron::bind_host: {get_input: controller_host} + neutron::bind_host: {get_input: neutron_api_network} neutron::rabbit_password: {get_input: rabbit_password} neutron::rabbit_user: {get_input: rabbit_user} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} @@ -823,14 +917,14 @@ resources: neutron::server::identity_uri: {get_input: keystone_identity_uri} neutron::server::database_connection: {get_input: neutron_dsn} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} - neutron::agents::ml2::ovs::local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronLocalIp]}]} + neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} neutron_flat_networks: {get_input: neutron_flat_networks} neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret} - neutron::agents::metadata::metadata_ip: {get_input: controller_virtual_ip} + neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network} neutron_agent_mode: {get_input: neutron_agent_mode} neutron_router_distributed: {get_input: neutron_router_distributed} neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} - neutron_allow_l3agent_failover: {get_input: neutron_allow_l3agent_failover} + neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} neutron::server::l3_ha: {get_input: neutron_l3_ha} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron_bridge_mappings: {get_input: neutron_bridge_mappings} @@ -844,6 +938,7 @@ resources: neutron::agents::metadata::auth_password: {get_input: neutron_password} neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options} neutron_dsn: {get_input: neutron_dsn} + neutron::agents::metadata::auth_url: {get_input: keystone_identity_uri} # Ceilometer ceilometer_backend: {get_input: ceilometer_backend} @@ -854,11 +949,13 @@ resources: ceilometer::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} ceilometer::rabbit_port: {get_input: rabbit_client_port} ceilometer::debug: {get_input: debug} - ceilometer::api::host: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} + ceilometer::api::host: {get_input: ceilometer_api_network} ceilometer::api::keystone_password: {get_input: ceilometer_password} ceilometer::api::keystone_auth_uri: {get_input: keystone_auth_uri} ceilometer::api::keystone_identity_uri: {get_input: keystone_identity_uri} ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} + ceilometer::agent::auth::auth_url: {get_input: keystone_auth_address} + ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} @@ -870,40 +967,52 @@ resources: nova::debug: {get_input: debug} nova::api::auth_uri: {get_input: keystone_auth_uri} nova::api::identity_uri: {get_input: keystone_identity_uri} - nova::api::api_bind_address: {get_input: controller_host} - nova::api::metadata_listen: {get_input: controller_host} + nova::api::api_bind_address: {get_input: nova_api_network} + nova::api::metadata_listen: {get_input: nova_metadata_network} nova::api::admin_password: {get_input: nova_password} nova::database_connection: {get_input: nova_dsn} + nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} nova::network::neutron::neutron_admin_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_url} nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} + nova::vncproxy::host: {get_input: nova_api_network} # Horizon + apache::ip: {get_input: horizon_network} horizon::django_debug: {get_input: debug} horizon::secret_key: {get_input: horizon_secret} - horizon::bind_address: {get_input: controller_host} + horizon::bind_address: {get_input: horizon_network} horizon::keystone_url: {get_input: keystone_auth_uri} # Rabbit - rabbitmq::node_ip_address: {get_input: controller_host} + rabbitmq::node_ip_address: {get_input: rabbitmq_network} rabbitmq::erlang_cookie: {get_input: rabbit_cookie} # Redis - redis::bind: {get_input: controller_host} + redis::bind: {get_input: redis_network} + redis_vip: {get_input: redis_vip} # Misc - memcached::listen_ip: {get_input: controller_host} + memcached::listen_ip: {get_input: memcached_network} neutron_public_interface_ip: {get_input: neutron_public_interface_ip} ntp::servers: {get_input: ntp_servers} control_virtual_interface: {get_input: control_virtual_interface} - controller_virtual_ip: {get_input: controller_virtual_ip} public_virtual_interface: {get_input: public_virtual_interface} - public_virtual_ip: {get_input: public_virtual_ip} tripleo::loadbalancer::control_virtual_interface: {get_input: control_virtual_interface} - tripleo::loadbalancer::controller_virtual_ip: {get_input: controller_virtual_ip} tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface} - tripleo::loadbalancer::public_virtual_ip: {get_input: public_virtual_ip} enable_package_install: {get_input: enable_package_install} + UpdateConfig: + type: OS::TripleO::Tasks::PackageUpdate + + UpdateDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: UpdateConfig} + server: {get_resource: Controller} + input_values: + update_identifier: + get_param: UpdateIdentifier + outputs: ip_address: description: IP address of the server in the ctlplane network @@ -938,7 +1047,7 @@ outputs: Server's IP address and hostname in the /etc/hosts format value: str_replace: - template: IP HOST CLOUDNAME + template: IP HOST.localdomain HOST CLOUDNAME params: IP: {get_attr: [Controller, networks, ctlplane, 0]} HOST: {get_attr: [Controller, name]} @@ -953,11 +1062,11 @@ outputs: str_replace: template: 'r1z1-IP:%PORT%/d1' params: - IP: {get_attr: [Controller, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} swift_proxy_memcache: description: Swift proxy-memcache value value: str_replace: template: "IP:11211" params: - IP: {get_attr: [Controller, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index c15d43ea..40c44aef 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -13,6 +13,16 @@ nova::network::neutron::vif_plugging_is_fatal: false nova::network::neutron::vif_plugging_timeout: 30 nova::network::neutron::dhcp_domain: '' +neutron::plugins::ml2::tunnel_id_ranges: + - '1:1000' +neutron::plugins::ml2::vni_ranges: + - '1:1000' +neutron::plugins::ml2::type_drivers: + - flat + - gre + - vxlan + - vlan + sysctl_settings: net.ipv4.tcp_keepalive_intvl: value: 1 diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index a72c4850..4915d3c8 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -15,6 +15,4 @@ nova::compute::rbd::rbd_keyring: 'client.openstack' nova::compute::rbd::libvirt_images_rbd_pool: 'vms' nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" -neutron::plugins::ml2::tunnel_id_ranges: ['1:1000'] - ceilometer::agent::auth::auth_tenant_name: 'service' diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 50cbb3f9..d0cbe890 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -67,8 +67,6 @@ neutron::core_plugin: 'ml2' neutron::service_plugins: - 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin' neutron::dhcp_agents_per_network: 2 -neutron::plugins::ml2::tunnel_id_ranges: - - '1:1000' neutron::server::sync_db: true neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf @@ -90,6 +88,8 @@ pacemaker::corosync::manage_fw: false # horizon horizon::allowed_hosts: '*' +horizon::django_session_engine: 'django.contrib.sessions.backends.cache' + mysql::server::manage_config_file: true mysql::server::package_name: mariadb-galera-server @@ -115,3 +115,4 @@ tripleo::loadbalancer::ceilometer: true tripleo::loadbalancer::heat_api: true tripleo::loadbalancer::heat_cloudwatch: true tripleo::loadbalancer::heat_cfn: true +tripleo::loadbalancer::horizon: true diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index caca89a8..00bab7f6 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -68,7 +68,6 @@ include ::neutron class { 'neutron::plugins::ml2': flat_networks => split(hiera('neutron_flat_networks'), ','), tenant_network_types => [hiera('neutron_tenant_network_type')], - type_drivers => [hiera('neutron_tenant_network_type')], } class { 'neutron::agents::ml2::ovs': diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 27272643..19ed97fb 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -48,8 +48,7 @@ if hiera('step') >= 2 { include ::mongodb::globals include ::mongodb::server - $mongo_node_ips = split(hiera('mongo_node_ips'), ',') - $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017') + $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') $mongo_node_string = join($mongo_node_ips_with_port, ',') $mongodb_replset = hiera('mongodb::server::replset') @@ -62,7 +61,7 @@ if hiera('step') >= 2 { } # Redis - $redis_node_ips = split(hiera('redis_node_ips'), ',') + $redis_node_ips = hiera('redis_node_ips') $redis_master_hostname = downcase(hiera('bootstrap_nodeid')) if $redis_master_hostname == $::hostname { @@ -77,9 +76,7 @@ if hiera('step') >= 2 { if count($redis_node_ips) > 1 { Class['::tripleo::redis_notification'] -> Service['redis-sentinel'] include ::redis::sentinel - class {'::tripleo::redis_notification' : - haproxy_monitor_ip => hiera('tripleo::loadbalancer::controller_virtual_ip'), - } + include ::tripleo::redis_notification } if str2bool(hiera('enable_galera', 'true')) { @@ -92,7 +89,7 @@ if hiera('step') >= 2 { config_file => $mysql_config_file, override_options => { 'mysqld' => { - 'bind-address' => hiera('controller_host'), + 'bind-address' => hiera('mysql_bind_host'), 'max_connections' => '1024', 'open_files_limit' => '-1', }, @@ -102,7 +99,7 @@ if hiera('step') >= 2 { # FIXME: this should only occur on the bootstrap host (ditto for db syncs) # Create all the database schemas # Example DSN format: mysql://user:password@host/dbname - $allowed_hosts = ['%',hiera('controller_host')] + $allowed_hosts = ['%',hiera('mysql_bind_host')] $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]') class { 'keystone::db::mysql': user => $keystone_dsn[3], @@ -254,10 +251,7 @@ if hiera('step') >= 3 { include ::glance::registry include join(['::glance::backend::', $glance_backend]) - class { 'nova': - glance_api_servers => join([hiera('glance_protocol'), '://', hiera('controller_virtual_ip'), ':', hiera('glance_port')]), - } - + include ::nova include ::nova::api include ::nova::cert include ::nova::conductor @@ -268,8 +262,9 @@ if hiera('step') >= 3 { include ::neutron include ::neutron::server - include ::neutron::agents::dhcp include ::neutron::agents::l3 + include ::neutron::agents::dhcp + include ::neutron::agents::metadata file { '/etc/neutron/dnsmasq-neutron.conf': content => hiera('neutron_dnsmasq_options'), @@ -280,18 +275,12 @@ if hiera('step') >= 3 { } class { 'neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), + flat_networks => split(hiera('neutron_flat_networks'), ','), tenant_network_types => [hiera('neutron_tenant_network_type')], - type_drivers => [hiera('neutron_tenant_network_type')], } - class { 'neutron::agents::ml2::ovs': - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), - } - - class { 'neutron::agents::metadata': - auth_url => join(['http://', hiera('controller_virtual_ip'), ':35357/v2.0']), + bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), + tunnel_types => split(hiera('neutron_tunnel_types'), ','), } Service['neutron-server'] -> Service['neutron-dhcp-service'] @@ -345,7 +334,23 @@ if hiera('step') >= 3 { } } - $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend]) + if hiera('cinder_enable_netapp_backend', false) { + $cinder_netapp_backend = hiera('cinder::backend::netapp::title') + + cinder_config { + "${cinder_netapp_backend}/host": value => 'hostgroup'; + } + + if hiera('cinder_netapp_nfs_shares', undef) { + $cinder_netapp_nfs_shares = split(hiera('cinder_netapp_nfs_shares', undef), ',') + } + + cinder::backend::netapp { $cinder_netapp_backend : + nfs_shares => $cinder_netapp_nfs_shares, + } + } + + $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend]) class { '::cinder::backends' : enabled_backends => $cinder_enabled_backends, } @@ -401,12 +406,10 @@ if hiera('step') >= 3 { include ::ceilometer::alarm::evaluator include ::ceilometer::expirer include ::ceilometer::collector + include ceilometer::agent::auth class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, } - class { 'ceilometer::agent::auth': - auth_url => join(['http://', hiera('controller_virtual_ip'), ':5000/v2.0']), - } Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } @@ -420,7 +423,7 @@ if hiera('step') >= 3 { # Horizon $vhost_params = { add_listen => false } class { 'horizon': - cache_server_ip => split(hiera('memcache_node_ips', '127.0.0.1'), ','), + cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), vhost_extra_params => $vhost_params, } diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index a7aa40cb..ed4f3512 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -96,14 +96,22 @@ if hiera('step') >= 1 { if downcase(hiera('ceilometer_backend')) == 'mongodb' { include ::mongodb::globals - # FIXME: replace with service_manage => false on ::mongodb::server - # when this is merged: https://github.com/puppetlabs/pupp etlabs-mongodb/pull/198 class { '::mongodb::server' : - service_ensure => undef, - service_enable => false, + service_manage => false, } } + # Memcached + class {'::memcached' : + service_manage => false, + } + + # Redis + class { '::redis' : + service_manage => false, + notify_service => false, + } + # Galera if str2bool(hiera('enable_galera', 'true')) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' @@ -122,7 +130,7 @@ if hiera('step') >= 1 { 'innodb_locks_unsafe_for_binlog'=> '1', 'query_cache_size' => '0', 'query_cache_type' => '0', - 'bind-address' => hiera('controller_host'), + 'bind-address' => hiera('mysql_bind_host'), 'max_connections' => '1024', 'open_files_limit' => '-1', 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', @@ -154,7 +162,17 @@ if hiera('step') >= 1 { if hiera('step') >= 2 { + # NOTE(gfidente): the following vars are needed on all nodes so they + # need to stay out of pacemaker_master conditional + $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') + $mongodb_replset = hiera('mongodb::server::replset') + if $pacemaker_master { + + # FIXME: we should not have to access tripleo::loadbalancer class + # parameters here to configure pacemaker VIPs. The configuration + # of pacemaker VIPs could move into puppet-tripleo or we should + # make use of less specific hiera parameters here for the settings. $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') pacemaker::resource::ip { 'control_vip': ip_address => $control_vip, @@ -163,9 +181,35 @@ if hiera('step') >= 2 { pacemaker::resource::ip { 'public_vip': ip_address => $public_vip, } + + $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') + if $internal_api_vip and $internal_api_vip != $control_vip { + pacemaker::resource::ip { 'internal_api_vip': + ip_address => $internal_api_vip, + } + } + + $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') + if $storage_vip and $storage_vip != $control_vip { + pacemaker::resource::ip { 'storage_vip': + ip_address => $storage_vip, + } + } + + $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') + if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip { + pacemaker::resource::ip { 'storage_mgmt_vip': + ip_address => $storage_mgmt_vip, + } + } + pacemaker::resource::service { 'haproxy': clone_params => true, } + pacemaker::resource::service { $::memcached::params::service_name : + clone_params => true, + require => Class['::memcached'], + } pacemaker::resource::ocf { 'rabbitmq': ocf_agent_name => 'heartbeat:rabbitmq-cluster', @@ -179,56 +223,58 @@ if hiera('step') >= 2 { op_params => 'start timeout=120s', clone_params => true, require => Class['::mongodb::server'], - before => Exec['mongodb-ready'], } # NOTE (spredzy) : The replset can only be run # once all the nodes have joined the cluster. - $mongo_node_ips = split(hiera('mongo_node_ips'), ',') - $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017') - $mongo_node_string = join($mongo_node_ips_with_port, ',') - $mongodb_replset = hiera('mongodb::server::replset') - $mongodb_cluster_ready_command = join(suffix(prefix($mongo_node_ips, '/bin/nc -w1 '), ' 27017 < /dev/null'), ' && ') - exec { 'mongodb-ready' : - command => $mongodb_cluster_ready_command, - timeout => 30, - tries => 180, - try_sleep => 10, + mongodb_conn_validator { $mongo_node_ips_with_port : + require => Pacemaker::Resource::Service[$::mongodb::params::service_name], + before => Mongodb_replset[$mongodb_replset], } mongodb_replset { $mongodb_replset : members => $mongo_node_ips_with_port, - require => Exec['mongodb-ready'], } } pacemaker::resource::ocf { 'galera' : ocf_agent_name => 'heartbeat:galera', - op_params => 'promote timeout=300s on-fail=block --master', + op_params => 'promote timeout=300s on-fail=block', + master_params => '', meta_params => "master-max=${galera_nodes_count} ordered=true", resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'", require => Class['::mysql::server'], before => Exec['galera-ready'], } - } - - # Redis - $redis_node_ips = split(hiera('redis_node_ips'), ',') - $redis_master_hostname = downcase(hiera('bootstrap_nodeid')) - - if $redis_master_hostname == $::hostname { - $slaveof = undef - } else { - $slaveof = "${redis_master_hostname} 6379" - } - class {'::redis' : - slaveof => $slaveof, - } - if count($redis_node_ips) > 1 { - Class['::tripleo::redis_notification'] -> Service['redis-sentinel'] - include ::redis::sentinel - class {'::tripleo::redis_notification' : - haproxy_monitor_ip => hiera('tripleo::loadbalancer::controller_virtual_ip'), + pacemaker::resource::ocf { 'redis': + ocf_agent_name => 'heartbeat:redis', + master_params => '', + meta_params => 'notify=true ordered=true interleave=true', + resource_params => 'wait_last_known_master=true', + require => Class['::redis'], + } + $redis_vip = hiera('redis_vip') + if $redis_vip and $redis_vip != $control_vip { + pacemaker::resource::ip { 'vip-redis': + ip_address => $redis_vip, + } } + pacemaker::constraint::base { 'redis-master-then-vip-redis': + constraint_type => 'order', + first_resource => 'redis-master', + second_resource => "ip-${redis_vip}", + first_action => 'promote', + second_action => 'start', + require => [Pacemaker::Resource::Ocf['redis'], + Pacemaker::Resource::Ip['vip-redis']], + } + pacemaker::constraint::colocation { 'vip-redis-with-redis-master': + source => "ip-${redis_vip}", + target => 'redis-master', + score => 'INFINITY', + require => [Pacemaker::Resource::Ocf['redis'], + Pacemaker::Resource::Ip['vip-redis']], + } + } exec { 'galera-ready' : @@ -263,7 +309,7 @@ MYSQL_HOST=localhost\n", # Create all the database schemas # Example DSN format: mysql://user:password@host/dbname if $sync_db { - $allowed_hosts = ['%',hiera('controller_host')] + $allowed_hosts = ['%',hiera('mysql_bind_host')] $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]') class { 'keystone::db::mysql': user => $keystone_dsn[3], @@ -350,8 +396,6 @@ MYSQL_HOST=localhost\n", include ::ceph::profile::osd } - # Memcached - include ::memcached } #END STEP 2 @@ -417,52 +461,54 @@ if hiera('step') >= 3 { } include join(['::glance::backend::', $glance_backend]) - class { 'nova': - glance_api_servers => join([hiera('glance_protocol'), '://', hiera('controller_virtual_ip'), ':', hiera('glance_port')]), - } + include ::nova class { '::nova::api' : sync_db => $sync_db, - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::nova::cert' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::nova::conductor' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::nova::consoleauth' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::nova::vncproxy' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::nova::scheduler' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } include ::nova::network::neutron + # Neutron class definitions include ::neutron class { '::neutron::server' : sync_db => $sync_db, - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::neutron::agents::dhcp' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::neutron::agents::l3' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, + } + class { 'neutron::agents::metadata': + manage_service => false, + enabled => false, } - file { '/etc/neutron/dnsmasq-neutron.conf': content => hiera('neutron_dnsmasq_options'), owner => 'neutron', @@ -470,31 +516,17 @@ if hiera('step') >= 3 { notify => Service['neutron-dhcp-service'], require => Package['neutron'], } - class { 'neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), + flat_networks => split(hiera('neutron_flat_networks'), ','), tenant_network_types => [hiera('neutron_tenant_network_type')], - type_drivers => [hiera('neutron_tenant_network_type')], } - class { 'neutron::agents::ml2::ovs': - # manage_service => $non_pcmk_start, -- not implemented - enabled => $non_pcmk_start, + # manage_service => false # not implemented + enabled => false, bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), tunnel_types => split(hiera('neutron_tunnel_types'), ','), } - class { 'neutron::agents::metadata': - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, - auth_url => join(['http://', hiera('controller_virtual_ip'), ':35357/v2.0']), - } - - Service['neutron-server'] -> Service['neutron-dhcp-service'] - Service['neutron-server'] -> Service['neutron-l3'] - Service['neutron-server'] -> Service['neutron-ovs-agent-service'] - Service['neutron-server'] -> Service['neutron-metadata'] - include ::cinder class { '::cinder::api': sync_db => $sync_db, @@ -551,7 +583,23 @@ if hiera('step') >= 3 { } } - $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend]) + if hiera('cinder_enable_netapp_backend', false) { + $cinder_netapp_backend = hiera('cinder::backend::netapp::title') + + cinder_config { + "${cinder_netapp_backend}/host": value => 'hostgroup'; + } + + if hiera('cinder_netapp_nfs_shares', undef) { + $cinder_netapp_nfs_shares = split(hiera('cinder_netapp_nfs_shares', undef), ',') + } + + cinder::backend::netapp { $cinder_netapp_backend : + nfs_shares => $cinder_netapp_nfs_shares, + } + } + + $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend]) class { '::cinder::backends' : enabled_backends => $cinder_enabled_backends, } @@ -610,42 +658,41 @@ if hiera('step') >= 3 { $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') } default : { + $mongo_node_string = join($mongo_node_ips_with_port, ',') $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" } } include ::ceilometer class { '::ceilometer::api' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::ceilometer::agent::notification' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::ceilometer::agent::central' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::ceilometer::alarm::notifier' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::ceilometer::alarm::evaluator' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::ceilometer::collector' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } include ::ceilometer::expirer class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, sync_db => $sync_db, } - class { 'ceilometer::agent::auth': - auth_url => join(['http://', hiera('controller_virtual_ip'), ':5000/v2.0']), - } + include ceilometer::agent::auth Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } @@ -654,27 +701,34 @@ if hiera('step') >= 3 { sync_db => $sync_db, } class { '::heat::api' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::heat::api_cfn' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::heat::api_cloudwatch' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } class { '::heat::engine' : - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + manage_service => false, + enabled => false, } - # Horizon - $vhost_params = { add_listen => false } + # httpd/apache and horizon + # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent + include ::apache + include ::apache::mod::status + $vhost_params = { + add_listen => false, + priority => 10, + } class { 'horizon': - cache_server_ip => split(hiera('memcache_node_ips', '127.0.0.1'), ','), + cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), vhost_extra_params => $vhost_params, + server_aliases => $::hostname, } $snmpd_user = hiera('snmpd_readonly_user_name') @@ -700,12 +754,22 @@ if hiera('step') >= 4 { # Cinder pacemaker::resource::service { $::cinder::params::api_service : clone_params => "interleave=true", + require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::cinder::params::scheduler_service : clone_params => "interleave=true", } pacemaker::resource::service { $::cinder::params::volume_service : } + pacemaker::constraint::base { 'keystone-then-cinder-api-constraint': + constraint_type => 'order', + first_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::cinder::params::api_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::cinder::params::api_service], + Pacemaker::Resource::Service[$::keystone::params::service_name]], + } pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint': constraint_type => "order", first_resource => "${::cinder::params::api_service}-clone", @@ -742,11 +806,21 @@ if hiera('step') >= 4 { # Glance pacemaker::resource::service { $::glance::params::registry_service_name : clone_params => "interleave=true", + require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::glance::params::api_service_name : clone_params => "interleave=true", } + pacemaker::constraint::base { 'keystone-then-glance-registry-constraint': + constraint_type => 'order', + first_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::glance::params::registry_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], + Pacemaker::Resource::Service[$::keystone::params::service_name]], + } pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint': constraint_type => "order", first_resource => "${::glance::params::registry_service_name}-clone", @@ -756,13 +830,481 @@ if hiera('step') >= 4 { require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], Pacemaker::Resource::Service[$::glance::params::api_service_name]], } - pacemaker::constraint::colocation { 'glance-registry-with-glance-api-colocation': - source => "${::glance::params::registry_service_name}-clone", - target => "${::glance::params::api_service_name}-clone", + pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation': + source => "${::glance::params::api_service_name}-clone", + target => "${::glance::params::registry_service_name}-clone", score => "INFINITY", require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], Pacemaker::Resource::Service[$::glance::params::api_service_name]], } + + # Neutron + pacemaker::resource::service { $::neutron::params::server_service: + op_params => "start timeout=90", + clone_params => "interleave=true", + require => Pacemaker::Resource::Service[$::keystone::params::service_name] + } + pacemaker::resource::service { $::neutron::params::l3_agent_service: + clone_params => "interleave=true", + } + pacemaker::resource::service { $::neutron::params::dhcp_agent_service: + clone_params => "interleave=true", + } + pacemaker::resource::service { $::neutron::params::ovs_agent_service: + clone_params => "interleave=true", + } + pacemaker::resource::service { $::neutron::params::metadata_agent_service: + clone_params => "interleave=true", + } + pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: + ocf_agent_name => "neutron:OVSCleanup", + clone_params => "interleave=true", + } + pacemaker::resource::ocf { 'neutron-netns-cleanup': + ocf_agent_name => "neutron:NetnsCleanup", + clone_params => "interleave=true", + } + pacemaker::resource::ocf { 'neutron-scale': + ocf_agent_name => "neutron:NeutronScale", + clone_params => "globally-unique=true clone-max=3 interleave=true", + } + pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': + constraint_type => "order", + first_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::neutron::params::server_service}-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Service[$::keystone::params::service_name], + Pacemaker::Resource::Service[$::neutron::params::server_service]], + } + pacemaker::constraint::base { 'neutron-server-to-neutron-scale-constraint': + constraint_type => "order", + first_resource => "${::neutron::params::server_service}-clone", + second_resource => "neutron-scale-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Service[$::neutron::params::server_service], + Pacemaker::Resource::Ocf['neutron-scale']], + } + pacemaker::constraint::base { 'neutron-scale-to-ovs-cleanup-constraint': + constraint_type => "order", + first_resource => "neutron-scale-clone", + second_resource => "${::neutron::params::ovs_cleanup_service}-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Ocf['neutron-scale'], + Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]], + } + pacemaker::constraint::colocation { 'neutron-scale-to-ovs-cleanup-colocation': + source => "${::neutron::params::ovs_cleanup_service}-clone", + target => "neutron-scale-clone", + score => "INFINITY", + require => [Pacemaker::Resource::Ocf['neutron-scale'], + Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]], + } + pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': + constraint_type => "order", + first_resource => "${::neutron::params::ovs_cleanup_service}-clone", + second_resource => "neutron-netns-cleanup-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], + Pacemaker::Resource::Ocf['neutron-netns-cleanup']], + } + pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': + source => "neutron-netns-cleanup-clone", + target => "${::neutron::params::ovs_cleanup_service}-clone", + score => "INFINITY", + require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], + Pacemaker::Resource::Ocf['neutron-netns-cleanup']], + } + pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': + constraint_type => "order", + first_resource => "neutron-netns-cleanup-clone", + second_resource => "${::neutron::params::ovs_agent_service}-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], + Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], + } + pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': + source => "${::neutron::params::ovs_agent_service}-clone", + target => "neutron-netns-cleanup-clone", + score => "INFINITY", + require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], + Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], + } + pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': + constraint_type => "order", + first_resource => "${::neutron::params::ovs_agent_service}-clone", + second_resource => "${::neutron::params::dhcp_agent_service}-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], + Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], + + } + pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': + source => "${::neutron::params::dhcp_agent_service}-clone", + target => "${::neutron::params::ovs_agent_service}-clone", + score => "INFINITY", + require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], + Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], + } + pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': + constraint_type => "order", + first_resource => "${::neutron::params::dhcp_agent_service}-clone", + second_resource => "${::neutron::params::l3_agent_service}-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"], + Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]] + } + pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': + source => "${::neutron::params::l3_agent_service}-clone", + target => "${::neutron::params::dhcp_agent_service}-clone", + score => "INFINITY", + require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"], + Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]] + } + pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': + constraint_type => "order", + first_resource => "${::neutron::params::l3_agent_service}-clone", + second_resource => "${::neutron::params::metadata_agent_service}-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"], + Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]] + } + pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': + source => "${::neutron::params::metadata_agent_service}-clone", + target => "${::neutron::params::l3_agent_service}-clone", + score => "INFINITY", + require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"], + Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]] + } + + # Nova + pacemaker::resource::service { $::nova::params::api_service_name : + clone_params => "interleave=true", + op_params => "monitor start-delay=10s", + } + pacemaker::resource::service { $::nova::params::conductor_service_name : + clone_params => "interleave=true", + op_params => "monitor start-delay=10s", + } + pacemaker::resource::service { $::nova::params::consoleauth_service_name : + clone_params => "interleave=true", + op_params => "monitor start-delay=10s", + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } + pacemaker::resource::service { $::nova::params::vncproxy_service_name : + clone_params => "interleave=true", + op_params => "monitor start-delay=10s", + } + pacemaker::resource::service { $::nova::params::scheduler_service_name : + clone_params => "interleave=true", + op_params => "monitor start-delay=10s", + } + + pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': + constraint_type => 'order', + first_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::nova::params::consoleauth_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], + Pacemaker::Resource::Service[$::keystone::params::service_name]], + } + pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': + constraint_type => "order", + first_resource => "${::nova::params::consoleauth_service_name}-clone", + second_resource => "${::nova::params::vncproxy_service_name}-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], + Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], + } + pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation': + source => "${::nova::params::vncproxy_service_name}-clone", + target => "${::nova::params::consoleauth_service_name}-clone", + score => "INFINITY", + require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], + Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], + } + # FIXME(gfidente): novncproxy will not start unless websockify is updated to 0.6 + # which is not the case for f20 nor f21; ucomment when it becomes available + #pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint': + # constraint_type => "order", + # first_resource => "${::nova::params::vncproxy_service_name}-clone", + # second_resource => "${::nova::params::api_service_name}-clone", + # first_action => "start", + # second_action => "start", + # require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], + # Pacemaker::Resource::Service[$::nova::params::api_service_name]], + #} + #pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation': + # source => "${::nova::params::api_service_name}-clone", + # target => "${::nova::params::vncproxy_service_name}-clone", + # score => "INFINITY", + # require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], + # Pacemaker::Resource::Service[$::nova::params::api_service_name]], + #} + pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint': + constraint_type => "order", + first_resource => "${::nova::params::api_service_name}-clone", + second_resource => "${::nova::params::scheduler_service_name}-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], + Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], + } + pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation': + source => "${::nova::params::scheduler_service_name}-clone", + target => "${::nova::params::api_service_name}-clone", + score => "INFINITY", + require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], + Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], + } + pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint': + constraint_type => "order", + first_resource => "${::nova::params::scheduler_service_name}-clone", + second_resource => "${::nova::params::conductor_service_name}-clone", + first_action => "start", + second_action => "start", + require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], + Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], + } + pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation': + source => "${::nova::params::conductor_service_name}-clone", + target => "${::nova::params::scheduler_service_name}-clone", + score => "INFINITY", + require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], + Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], + } + + # Ceilometer + pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : + clone_params => 'interleave=true', + require => [Pacemaker::Resource::Service[$::keystone::params::service_name], + Pacemaker::Resource::Service[$::mongodb::params::service_name]], + } + pacemaker::resource::service { $::ceilometer::params::collector_service_name : + clone_params => 'interleave=true', + } + pacemaker::resource::service { $::ceilometer::params::api_service_name : + clone_params => 'interleave=true', + } + pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name : + clone_params => 'interleave=true', + } + pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name : + clone_params => 'interleave=true', + } + pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : + clone_params => 'interleave=true', + } + pacemaker::resource::ocf { 'delay' : + ocf_agent_name => 'heartbeat:Delay', + clone_params => 'interleave=true', + resource_params => 'startdelay=10', + } + pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint': + constraint_type => 'order', + first_resource => "${::ceilometer::params::agent_central_service_name}-clone", + second_resource => "${::ceilometer::params::collector_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], + Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], + } + pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint': + constraint_type => 'order', + first_resource => "${::ceilometer::params::collector_service_name}-clone", + second_resource => "${::ceilometer::params::api_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name], + Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]], + } + pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation': + source => "${::ceilometer::params::api_service_name}-clone", + target => "${::ceilometer::params::collector_service_name}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], + Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], + } + pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint': + constraint_type => 'order', + first_resource => "${::ceilometer::params::api_service_name}-clone", + second_resource => 'delay-clone', + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], + Pacemaker::Resource::Ocf['delay']], + } + pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation': + source => 'delay-clone', + target => "${::ceilometer::params::api_service_name}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], + Pacemaker::Resource::Ocf['delay']], + } + pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint': + constraint_type => 'order', + first_resource => 'delay-clone', + second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], + Pacemaker::Resource::Ocf['delay']], + } + pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation': + source => "${::ceilometer::params::alarm_evaluator_service_name}-clone", + target => 'delay-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], + Pacemaker::Resource::Ocf['delay']], + } + pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint': + constraint_type => 'order', + first_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone", + second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], + Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], + } + pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation': + source => "${::ceilometer::params::alarm_notifier_service_name}-clone", + target => "${::ceilometer::params::alarm_evaluator_service_name}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], + Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], + } + pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint': + constraint_type => 'order', + first_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone", + second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name], + Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], + } + pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation': + source => "${::ceilometer::params::agent_notification_service_name}-clone", + target => "${::ceilometer::params::alarm_notifier_service_name}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name], + Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], + } + if downcase(hiera('ceilometer_backend')) == 'mongodb' { + pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': + constraint_type => 'order', + first_resource => "${::mongodb::params::service_name}-clone", + second_resource => "${::ceilometer::params::agent_central_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], + Pacemaker::Resource::Service[$::mongodb::params::service_name]], + } + } + pacemaker::constraint::base { 'vip-redis-then-ceilometer-central': + constraint_type => 'order', + first_resource => "ip-${redis_vip}", + second_resource => "${::ceilometer::params::agent_central_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], + Pacemaker::Resource::Ip['vip-redis']], + } + pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint': + constraint_type => 'order', + first_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::ceilometer::params::agent_central_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], + Pacemaker::Resource::Service[$::keystone::params::service_name]], + } + + # Heat + pacemaker::resource::service { $::heat::params::api_service_name : + clone_params => 'interleave=true', + } + pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name : + clone_params => 'interleave=true', + } + pacemaker::resource::service { $::heat::params::api_cfn_service_name : + clone_params => 'interleave=true', + } + pacemaker::resource::service { $::heat::params::engine_service_name : + clone_params => 'interleave=true', + } + pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint': + constraint_type => 'order', + first_resource => "${::heat::params::api_service_name}-clone", + second_resource => "${::heat::params::api_cfn_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], + Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], + } + pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation': + source => "${::heat::params::api_cfn_service_name}-clone", + target => "${::heat::params::api_service_name}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name], + Pacemaker::Resource::Service[$::heat::params::api_service_name]], + } + pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint': + constraint_type => 'order', + first_resource => "${::heat::params::api_cfn_service_name}-clone", + second_resource => "${::heat::params::api_cloudwatch_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], + Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], + } + pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation': + source => "${::heat::params::api_cloudwatch_service_name}-clone", + target => "${::heat::params::api_cfn_service_name}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name], + Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]], + } + pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint': + constraint_type => 'order', + first_resource => "${::heat::params::api_cloudwatch_service_name}-clone", + second_resource => "${::heat::params::engine_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], + Pacemaker::Resource::Service[$::heat::params::engine_service_name]], + } + pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation': + source => "${::heat::params::engine_service_name}-clone", + target => "${::heat::params::api_cloudwatch_service_name}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], + Pacemaker::Resource::Service[$::heat::params::engine_service_name]], + } + pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint': + constraint_type => 'order', + first_resource => "${::ceilometer::params::agent_notification_service_name}-clone", + second_resource => "${::heat::params::api_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], + Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]], + } + + # Horizon + pacemaker::resource::service { $::horizon::params::http_service: + clone_params => "interleave=true", + } + + } } #END STEP 4 diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 80cf6a21..edfeaeca 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -31,6 +31,7 @@ if count(hiera('ntp::servers')) > 0 { } include ::cinder +include ::cinder::glance include ::cinder::volume include ::cinder::setup_test_volume diff --git a/puppet/swift-storage-puppet.yaml b/puppet/swift-storage-puppet.yaml index 12292dec..15481032 100644 --- a/puppet/swift-storage-puppet.yaml +++ b/puppet/swift-storage-puppet.yaml @@ -51,6 +51,20 @@ parameters: default: 'false' description: Set to true to enable package installation via Puppet type: boolean + UpdateIdentifier: + default: '' + type: string + description: > + Setting to a previously unused value during stack-update will trigger + package update on all nodes + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. + type: json + Hostname: + type: string + default: '' # Defaults to Heat created hostname resources: @@ -64,6 +78,7 @@ resources: - network: ctlplane user_data_format: SOFTWARE_CONFIG user_data: {get_resource: NodeUserData} + name: {get_param: Hostname} NodeUserData: type: OS::TripleO::NodeUserData @@ -90,6 +105,13 @@ resources: StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + NetIpMap: + type: OS::TripleO::Network::Ports::NetIpMap + properties: + InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + StorageIp: {get_attr: [StoragePort, ip_address]} + StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: @@ -105,6 +127,8 @@ resources: hierarchy: - heat_config_%{::deploy_config_name} - object + - swift_devices_and_proxy # provided by SwiftDevicesAndProxyConfig + - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common datafiles: @@ -112,29 +136,29 @@ resources: raw_data: {get_file: hieradata/common.yaml} object: raw_data: {get_file: hieradata/object.yaml} - oac_data: # data we map in from other OAC configurations - tripleo::ringbuilder::devices: swift.devices mapped_data: # data supplied directly to this deployment configuration, etc swift::swift_hash_suffix: { get_input: swift_hash_suffix } tripleo::ringbuilder::part_power: { get_input: swift_part_power } tripleo::ringbuilder::replicas: {get_input: swift_replicas } # Swift - swift::storage::all::storage_local_net_ip: {get_input: local_ip} + swift::storage::all::storage_local_net_ip: {get_input: swift_management_network} swift_mount_check: {get_input: swift_mount_check } tripleo::ringbuilder::min_part_hours: { get_input: swift_min_part_hours } ntp::servers: {get_input: ntp_servers} # NOTE(dprince): build_ring support is currently not wired in. # See: https://review.openstack.org/#/c/109225/ tripleo::ringbuilder::build_ring: True + snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} + snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} enable_package_install: {get_input: enable_package_install} SwiftStorageHieraDeploy: type: OS::Heat::StructuredDeployment + depends_on: NetworkDeployment properties: server: {get_resource: SwiftStorage} config: {get_resource: SwiftStorageHieraConfig} - signal_transport: NO_SIGNAL input_values: local_ip: {get_attr: [SwiftStorage, networks, ctlplane, 0]} snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} @@ -150,12 +174,25 @@ resources: params: server: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} + swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} + + UpdateConfig: + type: OS::TripleO::Tasks::PackageUpdate + + UpdateDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: UpdateConfig} + server: {get_resource: SwiftStorage} + input_values: + update_identifier: + get_param: UpdateIdentifier outputs: hosts_entry: value: str_replace: - template: "IP HOST" + template: "IP HOST.localdomain HOST" params: IP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} HOST: {get_attr: [SwiftStorage, name]} @@ -169,7 +206,7 @@ outputs: str_replace: template: 'r1z1-IP:%PORT%/d1' params: - IP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} internal_api_ip_address: description: IP address of the server in the internal_api network value: {get_attr: [InternalApiPort, ip_address]} diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml new file mode 100644 index 00000000..1dec489c --- /dev/null +++ b/puppet/vip-config.yaml @@ -0,0 +1,41 @@ +heat_template_version: 2015-04-30 + +description: > + Configure hieradata for service -> virtual IP mappings. + +resources: + VipConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + vip_data: + mapped_data: + keystone_admin_api_vip: {get_input: keystone_admin_api_vip} + keystone_public_api_vip: {get_input: keystone_public_api_vip} + neutron_api_vip: {get_input: neutron_api_vip} + cinder_api_vip: {get_input: cinder_api_vip} + glance_api_vip: {get_input: glance_api_vip} + glance_registry_vip: {get_input: glance_registry_vip} + swift_proxy_vip: {get_input: swift_proxy_vip} + nova_api_vip: {get_input: nova_api_vip} + nova_metadata_vip: {get_input: nova_metadata_vip} + ceilometer_api_vip: {get_input: ceilometer_api_vip} + heat_api_vip: {get_input: heat_api_vip} + horizon_vip: {get_input: horizon_vip} + redis_vip: {get_input: redis_vip} + mysql_vip: {get_input: mysql_vip} + tripleo::loadbalancer::public_virtual_ip: {get_input: public_virtual_ip} + tripleo::loadbalancer::controller_virtual_ip: {get_input: control_virtual_ip} + tripleo::loadbalancer::internal_api_virtual_ip: {get_input: internal_api_virtual_ip} + tripleo::loadbalancer::storage_virtual_ip: {get_input: storage_virtual_ip} + tripleo::loadbalancer::storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip} + tripleo::redis_notification::haproxy_monitor_ip: {get_input: control_virtual_ip} + + +outputs: + OS::stack_id: + description: The VipConfigImpl resource. + value: {get_resource: VipConfigImpl} |