diff options
Diffstat (limited to 'puppet')
26 files changed, 655 insertions, 55 deletions
diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml index f9c53465..e90710c7 100644 --- a/puppet/ceph-storage-post.yaml +++ b/puppet/ceph-storage-post.yaml @@ -14,8 +14,19 @@ parameters: type: json description: Value which changes if the node configuration may need to be re-applied - resources: + + CephStorageArtifactsConfig: + type: deploy-artifacts.yaml + + CephStorageArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: servers} + config: {get_resource: CephStorageArtifactsConfig} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + CephStoragePuppetConfig: type: OS::Heat::SoftwareConfig properties: @@ -29,6 +40,7 @@ resources: CephStorageDeployment_Step1: type: OS::Heat::StructuredDeployments + depends_on: CephStorageArtifactsDeploy properties: name: CephStorageDeployment_Step1 servers: {get_param: servers} diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index e310e1f5..d737bcc5 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -280,11 +280,54 @@ outputs: hosts_entry: value: str_replace: - template: "IP HOST.DOMAIN HOST" + template: | + PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST + EXTERNALIP EXTERNALHOST + INTERNAL_APIIP INTERNAL_APIHOST + STORAGEIP STORAGEHOST + STORAGE_MGMTIP STORAGE_MGMTHOST + TENANTIP TENANTHOST + MANAGEMENTIP MANAGEMENTHOST params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]} + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} - HOST: {get_attr: [CephStorage, name]} + PRIMARYHOST: {get_attr: [CephStorage, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - external + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - internalapi + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - storage + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - storagemgmt + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - tenant + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - management nova_server_resource: description: Heat resource handle for the ceph storage server value: diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml index 9b7c752a..f470203f 100644 --- a/puppet/cinder-storage-post.yaml +++ b/puppet/cinder-storage-post.yaml @@ -14,8 +14,20 @@ parameters: resources: + VolumeArtifactsConfig: + type: deploy-artifacts.yaml + + VolumeArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: servers} + config: {get_resource: VolumeArtifactsConfig} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + VolumePuppetConfig: type: OS::Heat::SoftwareConfig + depends_on: VolumeArtifactsDeploy properties: group: puppet options: diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index 0bec3e93..dedd5142 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -343,11 +343,54 @@ outputs: hosts_entry: value: str_replace: - template: "IP HOST.DOMAIN HOST" + template: | + PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST + EXTERNALIP EXTERNALHOST + INTERNAL_APIIP INTERNAL_APIHOST + STORAGEIP STORAGEHOST + STORAGE_MGMTIP STORAGE_MGMTHOST + TENANTIP TENANTHOST + MANAGEMENTIP MANAGEMENTHOST params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]} + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} - HOST: {get_attr: [BlockStorage, name]} + PRIMARYHOST: {get_attr: [BlockStorage, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - external + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - internalapi + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - storage + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - storagemgmt + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - tenant + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - management nova_server_resource: description: Heat resource handle for the block storage server value: diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml index 3861e50c..a122df0e 100644 --- a/puppet/compute-post.yaml +++ b/puppet/compute-post.yaml @@ -17,6 +17,17 @@ parameters: resources: + ComputeArtifactsConfig: + type: deploy-artifacts.yaml + + ComputeArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: servers} + config: {get_resource: ComputeArtifactsConfig} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + ComputePuppetConfig: type: OS::Heat::SoftwareConfig properties: @@ -30,6 +41,7 @@ resources: ComputePuppetDeployment: type: OS::Heat::StructuredDeployments + depends_on: ComputeArtifactsDeploy properties: name: ComputePuppetDeployment servers: {get_param: servers} diff --git a/puppet/compute.yaml b/puppet/compute.yaml index 8b2bcd33..375d5032 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -118,6 +118,15 @@ parameters: default: nic1 description: A port to add to the NeutronPhysicalBridge. type: string + NeutronTenantMtu: + description: > + The default MTU for tenant networks. For VXLAN/GRE tunneling, this should + be at least 50 bytes smaller than the MTU on the physical network. This + value will be used to set the MTU on the virtual Ethernet device. + This number is related to the value of NeutronDnsmasqOptions, since that + will determine the MTU that is assigned to the VM host through DHCP. + default: 1400 + type: number NeutronTunnelTypes: type: comma_delimited_list description: | @@ -258,6 +267,10 @@ parameters: description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true + UpgradeLevelNovaCompute: + type: string + description: Nova Compute upgrade level + default: '' EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -434,9 +447,11 @@ resources: - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre - nova_nuage_data # Optionally provided by ComputeExtraConfigPre - midonet_data # Optionally provided by AllNodesExtraConfig + - neutron_opencontrail_data # Optionally provided by ComputeExtraConfigPre datafiles: compute_extraconfig: mapped_data: {get_param: NovaComputeExtraConfig} @@ -455,6 +470,7 @@ resources: nova::rabbit_password: {get_input: rabbit_password} nova::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} nova::rabbit_port: {get_input: rabbit_client_port} + nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute} nova_compute_driver: {get_input: nova_compute_driver} nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type} nova::compute::neutron::libvirt_vif_driver: {get_input: nova_compute_libvirt_vif_driver} @@ -463,7 +479,11 @@ resources: nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend} rbd_persistent_storage: {get_input: cinder_enable_rbd_backend} nova_password: {get_input: nova_password} + nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address} + nova::vncproxy::common::vncproxy_protocol: {get_input: nova_vncproxy_protocol} + nova::vncproxy::common::vncproxy_host: {get_input: nova_vncproxy_host} + nova::vncproxy::common::vncproxy_port: {get_input: nova_vncproxy_port} nova::network::neutron::neutron_ovs_bridge: {get_input: nova_ovs_bridge} nova::network::neutron::security_group_api: {get_input: nova_security_group_api} ceilometer::debug: {get_input: debug} @@ -487,6 +507,7 @@ resources: neutron_host: {get_input: neutron_host} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} + neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} @@ -498,9 +519,9 @@ resources: neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} neutron_physical_bridge: {get_input: neutron_physical_bridge} neutron_public_interface: {get_input: neutron_public_interface} - nova::network::neutron::neutron_admin_password: {get_input: neutron_password} + nova::network::neutron::neutron_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_internal_url} - nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} + nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url} neutron_router_distributed: {get_input: neutron_router_distributed} neutron_agent_mode: {get_input: neutron_agent_mode} neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} @@ -535,8 +556,12 @@ resources: nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend} cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend} nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]} + nova_vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]} + nova_vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host]} + nova_vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]} nova_ovs_bridge: {get_param: NovaOVSBridge} nova_security_group_api: {get_param: NovaSecurityGroupAPI} + upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} ceilometer_password: {get_param: CeilometerPassword} ceilometer_compute_agent: {get_param: CeilometerComputeAgent} @@ -581,6 +606,7 @@ resources: template: MAPPINGS params: MAPPINGS: {get_param: NeutronBridgeMappings} + neutron_tenant_mtu: {get_param: NeutronTenantMtu} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} neutron_physical_bridge: {get_param: NeutronPhysicalBridge} @@ -612,7 +638,7 @@ resources: AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]} - neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]} + neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]} keystone_vip: {get_param: KeystonePublicApiVirtualIP} admin_password: {get_param: AdminPassword} rabbit_username: {get_param: RabbitUserName} @@ -689,11 +715,54 @@ outputs: Server's IP address and hostname in the /etc/hosts format value: str_replace: - template: "IP HOST.DOMAIN HOST" + template: | + PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST + EXTERNALIP EXTERNALHOST + INTERNAL_APIIP INTERNAL_APIHOST + STORAGEIP STORAGEHOST + STORAGE_MGMTIP STORAGE_MGMTHOST + TENANTIP TENANTHOST + MANAGEMENTIP MANAGEMENTHOST params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]} + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} - HOST: {get_attr: [NovaCompute, name]} + PRIMARYHOST: {get_attr: [NovaCompute, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - external + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - internalapi + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - storage + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - storagemgmt + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - tenant + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - management nova_server_resource: description: Heat resource handle for the Nova compute server value: diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index d250dd70..713ad706 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -17,6 +17,15 @@ parameters: resources: + ControllerArtifactsConfig: + type: deploy-artifacts.yaml + + ControllerArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: servers} + config: {get_resource: ControllerArtifactsConfig} + ControllerPrePuppet: type: OS::TripleO::Tasks::ControllerPrePuppet properties: @@ -33,7 +42,7 @@ resources: # e.g all Deployment resources should have a *Deployment_StepN suffix ControllerLoadBalancerDeployment_Step1: type: OS::Heat::StructuredDeployments - depends_on: ControllerPrePuppet + depends_on: [ControllerPrePuppet, ControllerArtifactsDeploy] properties: name: ControllerLoadBalancerDeployment_Step1 servers: {get_param: servers} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index bad99378..a28ae562 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -492,6 +492,15 @@ parameters: default: '' description: If set, the public interface is a vlan with this device as the raw device. type: string + NeutronTenantMtu: + description: > + The default MTU for tenant networks. For VXLAN/GRE tunneling, this should + be at least 50 bytes smaller than the MTU on the physical network. This + value will be used to set the MTU on the virtual Ethernet device. + This number is related to the value of NeutronDnsmasqOptions, since that + will determine the MTU that is assigned to the VM host through DHCP. + default: 1400 + type: number NeutronTunnelTypes: default: 'vxlan' description: | @@ -510,7 +519,7 @@ parameters: default: ["1:4094", ] type: comma_delimited_list NeutronPluginExtensions: - default: "qos" + default: "qos,port_security" description: | Comma-separated list of extensions enabled for the Neutron plugin. type: comma_delimited_list @@ -635,6 +644,10 @@ parameters: default: 'UTC' description: The timezone to be set on controller nodes. type: string + UpgradeLevelNovaCompute: + type: string + description: Nova Compute upgrade level + default: '' VirtualIP: # DEPRECATED: use per service settings instead type: string default: '' # Has to be here because of the ignored empty value bug @@ -1061,6 +1074,7 @@ resources: params: AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_password: {get_param: NeutronPassword} + neutron_tenant_mtu: {get_param: NeutronTenantMtu} neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} neutron_dsn: list_join: @@ -1073,7 +1087,7 @@ resources: neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] } neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] } neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] } - neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri_no_suffix ] } + neutron_auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] } nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] } ceilometer_backend: {get_param: CeilometerBackend} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} @@ -1112,6 +1126,7 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/nova_api' + upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} instance_name_template: {get_param: InstanceNameTemplate} fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} @@ -1211,6 +1226,7 @@ resources: - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre - neutron_nuage_data # Optionally provided by ControllerExtraConfigPre - midonet_data #Optionally provided by AllNodesExtraConfig + - neutron_opencontrail_data # Optionally provided by ControllerExtraConfigPre datafiles: controller_extraconfig: mapped_data: {get_param: ControllerExtraConfig} @@ -1309,6 +1325,9 @@ resources: glance_file_pcmk_fstype: {get_input: glance_file_pcmk_fstype} glance_file_pcmk_manage: {get_input: glance_file_pcmk_manage} glance_file_pcmk_options: {get_input: glance_file_pcmk_options} + glance::notify::rabbitmq::rabbit_userid: {get_input: rabbit_username} + glance::notify::rabbitmq::rabbit_password: {get_input: rabbit_password} + glance::notify::rabbitmq::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} # Heat heat_stack_domain_admin_password: {get_input: heat_stack_domain_admin_password} @@ -1363,7 +1382,7 @@ resources: keystone::admin_workers: {get_input: keystone_workers} keystone::public_workers: {get_input: keystone_workers} keystone_enable_db_purge: {get_input: keystone_enable_db_purge} - + keystone::public_endpoint: {get_input: keystone_public_url} # MongoDB mongodb::server::bind_ip: {get_input: mongo_db_network} mongodb::server::nojournal: {get_input: mongodb_no_journal} @@ -1391,6 +1410,7 @@ resources: neutron::server::database_connection: {get_input: neutron_dsn} neutron::server::api_workers: {get_input: neutron_workers} neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge} + neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata} @@ -1416,7 +1436,7 @@ resources: neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron::agents::ml2::ovs:bridge_mappings: {get_input: neutron_bridge_mappings} + neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings} neutron_public_interface: {get_input: neutron_public_interface} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route} @@ -1436,7 +1456,7 @@ resources: neutron::keystone::auth::password: {get_input: neutron_password } neutron::keystone::auth::region: {get_input: keystone_region} neutron::server::notifications::nova_url: {get_input: nova_internal_url} - neutron::server::notifications::auth_url: {get_input: neutron_admin_auth_url} + neutron::server::notifications::auth_url: {get_input: neutron_auth_url} neutron::server::notifications::tenant_name: 'service' neutron::server::notifications::project_name: 'service' neutron::server::notifications::password: {get_input: nova_password} @@ -1466,6 +1486,7 @@ resources: nova::rabbit_password: {get_input: rabbit_password} nova::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} nova::rabbit_port: {get_input: rabbit_client_port} + nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute} nova::debug: {get_input: debug} nova::api::auth_uri: {get_input: keystone_auth_uri} nova::api::identity_uri: {get_input: keystone_identity_uri} @@ -1475,14 +1496,15 @@ resources: nova::api::osapi_compute_workers: {get_input: nova_workers} nova::api::ec2_workers: {get_input: nova_workers} nova::api::metadata_workers: {get_input: nova_workers} + nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::database_connection: {get_input: nova_dsn} nova::api_database_connection: {get_input: nova_api_dsn} nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} nova::api::instance_name_template: {get_input: instance_name_template} - nova::network::neutron::neutron_admin_password: {get_input: neutron_password} + nova::network::neutron::neutron_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_internal_url} - nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} + nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url} nova::vncproxy::host: {get_input: nova_api_network} nova::db::mysql::password: {get_input: nova_password} nova::db::mysql_api::password: {get_input: nova_password} @@ -1610,11 +1632,54 @@ outputs: Server's IP address and hostname in the /etc/hosts format value: str_replace: - template: IP HOST.DOMAIN HOST + template: | + PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST + EXTERNALIP EXTERNALHOST + INTERNAL_APIIP INTERNAL_APIHOST + STORAGEIP STORAGEHOST + STORAGE_MGMTIP STORAGE_MGMTHOST + TENANTIP TENANTHOST + MANAGEMENTIP MANAGEMENTHOST params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]} + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} - HOST: {get_attr: [Controller, name]} + PRIMARYHOST: {get_attr: [Controller, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - external + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - internalapi + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - storage + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - storagemgmt + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - tenant + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - management nova_server_resource: description: Heat resource handle for the Nova compute server value: diff --git a/puppet/deploy-artifacts.sh b/puppet/deploy-artifacts.sh new file mode 100644 index 00000000..22fde9a7 --- /dev/null +++ b/puppet/deploy-artifacts.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +TMP_DATA=$(mktemp -d) +function cleanup { + rm -Rf "$TMP_DATA" +} +trap cleanup EXIT + +if [ -n "$artifact_urls" ]; then + for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do + curl -o $TMP_DATA/file_data "$artifact_urls" + if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then + yum install -y $TMP_DATA/file_data + elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then + pushd / + tar xvzf $TMP_DATA/file_data + popd + else + echo "ERROR: Unsupported file format." + exit 1 + fi + rm $TMP_DATA/file_data + done +else + echo "No artifact_urls was set. Skipping..." +fi diff --git a/puppet/deploy-artifacts.yaml b/puppet/deploy-artifacts.yaml new file mode 100644 index 00000000..17f84163 --- /dev/null +++ b/puppet/deploy-artifacts.yaml @@ -0,0 +1,32 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to install deployment artifacts (tarball's and/or + distribution packages) via HTTP URLs. The contents of the URL's can + be tarballs or distribution packages (RPMs). If a tarball URL is supplied + it is extracted onto the target node during deployment. If a package is + deployed it is installed from the supplied URL. Note, you need the + heat-config-script element built into your images, due to the script group + below. + +parameters: + DeployArtifactURLs: + default: [] + description: A list of HTTP URLs containing deployment artifacts. + Currently supports tarballs and RPM packages. + type: comma_delimited_list + +resources: + DeployArtifacts: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: artifact_urls + default: {list_join: [' ', {get_param: DeployArtifactURLs}]} + config: {get_file: ./deploy-artifacts.sh} + +outputs: + OS::stack_id: + description: The ID of the DeployArtifacts resource. + value: {get_resource: DeployArtifacts} diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml index 7cefc24b..ebd6c251 100644 --- a/puppet/extraconfig/ceph/ceph-external-config.yaml +++ b/puppet/extraconfig/ceph/ceph-external-config.yaml @@ -76,7 +76,7 @@ resources: cinder_rbd_pool_name: {get_param: CinderRbdPoolName} glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} - glance::backend::rbd::rbd_store_pool: {get_param: CephClientUserName} + glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} nova::compute::rbd::rbd_keyring: list_join: - '.' diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml new file mode 100644 index 00000000..49c77190 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml @@ -0,0 +1,45 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata for Big Switch agents on compute node + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + NeutronBigswitchAgentEnabled: + description: The state of the neutron-bsn-agent service. + type: boolean + default: false + NeutronBigswitchLLDPEnabled: + description: The state of the neutron-bsn-lldp service. + type: boolean + default: true + + +resources: + NeutronBigswitchConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + neutron_bigswitch_data: + mapped_data: + neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} + neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} + + NeutronBigswitchDeployment: + type: OS::Heat::StructuredDeployment + properties: + name: NeutronBigswitchDeployment + config: {get_resource: NeutronBigswitchConfig} + server: {get_param: server} + input_values: + neutron_enable_bigswitch_agent: {get_param: NeutronBigswitchAgentEnabled} + neutron_enable_bigswitch_lldp: {get_param: NeutronBigswitchLLDPEnabled} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [NeutronBigswitchDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml new file mode 100644 index 00000000..e496553a --- /dev/null +++ b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml @@ -0,0 +1,47 @@ +heat_template_version: 2015-04-30 + +description: Compute node hieradata for Neutron OpenContrail configuration + +parameters: + server: + description: ID of the compute node to apply this config to + type: string + ContrailApiServerIp: + description: IP address of the OpenContrail API server + type: string + ContrailApiServerPort: + description: Port of the OpenContrail API + type: string + default: 8082 + +resources: + ComputeContrailConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + neutron_opencontrail_data: + mapped_data: + nova::network::neutron::network_api_class: nova.network.neutronv2.api.API + + contrail::vrouter::provision_vrouter::api_address: {get_input: contrail_api_server_ip} + contrail::vrouter::provision_vrouter::api_port: {get_input: contrail_api_server_port} + contrail::vrouter::provision_vrouter::keystone_admin_user: admin + contrail::vrouter::provision_vrouter::keystone_admin_tenant_name: admin + contrail::vrouter::provision_vrouter::keystone_admin_password: '"%{::admin_password}"' + + ComputeContrailDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: ComputeContrailConfig} + server: {get_param: server} + input_values: + contrail_api_server_ip: {get_param: ContrailApiServerIp} + contrail_api_server_port: {get_param: ContrailApiServerPort} + +outputs: + deploy_stdout: + description: Output of the extra hiera data deployment + value: {get_attr: [ComputeContrailDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml index 1e652960..467f57cc 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml @@ -44,7 +44,6 @@ resources: datafiles: neutron_bigswitch_data: mapped_data: - neutron_enable_bigswitch_ml2: true neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml new file mode 100644 index 00000000..5c686fe7 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml @@ -0,0 +1,62 @@ +heat_template_version: 2015-04-30 + +description: Controller hieradata for Neutron OpenContrail configuration + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + ContrailApiServerIp: + description: IP address of the OpenContrail API server + type: string + ContrailApiServerPort: + description: Port of the OpenContrail API + type: string + default: 8082 + ContrailMultiTenancy: + description: Whether to enable multi tenancy + type: boolean + default: false + ContrailExtensions: + description: List of OpenContrail extensions to be enabled + type: comma_delimited_list + default: '' + +resources: + ControllerContrailConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + neutron_opencontrail_data: + mapped_data: + neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions + + neutron::plugins::opencontrail::api_server_ip: {get_input: contrail_api_server_ip} + neutron::plugins::opencontrail::api_server_port: {get_input: contrail_api_server_port} + neutron::plugins::opencontrail::multi_tenancy: {get_input: contrail_multi_tenancy} + neutron::plugins::opencontrail::contrail_extensions: {get_input: contrail_extensions} + neutron::plugins::opencontrail::keystone_auth_url: '"%{hiera(''keystone_auth_uri'')}"' + neutron::plugins::opencontrail::keystone_admin_user: admin + neutron::plugins::opencontrail::keystone_admin_tenant_name: admin + neutron::plugins::opencontrail::keystone_admin_password: '"%{hiera(''admin_password'')}"' + neutron::plugins::opencontrail::keystone_admin_token: '"%{hiera(''keystone::admin_token'')}"' + + ControllerContrailDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: ControllerContrailConfig} + server: {get_param: server} + input_values: + contrail_api_server_ip: {get_param: ContrailApiServerIp} + contrail_api_server_port: {get_param: ContrailApiServerPort} + contrail_multi_tenancy: {get_param: ContrailMultiTenancy} + contrail_extensions: {get_param: ContrailExtensions} + + +outputs: + deploy_stdout: + description: Output of the extra hiera data deployment + value: {get_attr: [ControllerContrailDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/tls/ca-inject.yaml b/puppet/extraconfig/tls/ca-inject.yaml index 5a36e951..aab42849 100644 --- a/puppet/extraconfig/tls/ca-inject.yaml +++ b/puppet/extraconfig/tls/ca-inject.yaml @@ -45,7 +45,7 @@ resources: cat > ${cacert_path} << EOF ${cacert_content} EOF - chmod 0440 ${cacert_path} + chmod 0444 ${cacert_path} chown root:root ${cacert_path} ${update_anchor_command} md5sum ${cacert_path} > ${heat_outputs_path}.root_cert_md5sum diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index b4b51abf..03366c7e 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -8,12 +8,15 @@ ceilometer::agent::auth::auth_region: 'regionOne' ceilometer::agent::auth::auth_tenant_name: 'admin' nova::api::admin_tenant_name: 'service' -nova::network::neutron::neutron_admin_tenant_name: 'service' -nova::network::neutron::neutron_admin_username: 'neutron' +nova::network::neutron::neutron_project_name: 'service' +nova::network::neutron::neutron_username: 'neutron' nova::network::neutron::dhcp_domain: '' neutron::allow_overlapping_ips: true +kernel_modules: + nf_conntrack: {} + sysctl_settings: net.ipv4.tcp_keepalive_intvl: value: 1 @@ -21,6 +24,15 @@ sysctl_settings: value: 5 net.ipv4.tcp_keepalive_time: value: 5 + net.nf_conntrack_max: + value: 500000 + net.netfilter.nf_conntrack_max: + value: 500000 + # prevent neutron bridges from autoconfiguring ipv6 addresses + net.ipv6.conf.default.accept_ra: + value: 0 + net.ipv6.conf.default.autoconf: + value: 0 nova::rabbit_heartbeat_timeout_threshold: 60 neutron::rabbit_heartbeat_timeout_threshold: 60 diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index a4dda4b4..e00fffaf 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -91,6 +91,7 @@ nova::api::sync_db_api: true nova::scheduler::filter::ram_allocation_ratio: '1.0' nova::cron::archive_deleted_rows::hour: '*/12' nova::cron::archive_deleted_rows::destination: '/dev/null' +nova::notification_driver: messaging # ceilometer ceilometer::agent::auth::auth_endpoint_type: 'internalURL' @@ -98,9 +99,7 @@ ceilometer::agent::auth::auth_endpoint_type: 'internalURL' # cinder cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler cinder::cron::db_purge::destination: '/dev/null' -cinder::config::cinder_config: - DEFAULT/host: - value: hostgroup +cinder::host: hostgroup # heat heat::engine::configure_delegated_roles: false diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index 1d801adc..0db5b45a 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 4b0f98e4..7925f50a 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp @@ -118,6 +120,15 @@ elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV cassandra_seeds => $cassandra_node_ips } } +elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { + + include ::contrail::vrouter + # NOTE: it's not possible to use this class without a functional + # contrail controller up and running + #class {'::contrail::vrouter::provision_vrouter': + # require => Class['contrail::vrouter'], + #} +} else { include ::neutron::plugins::ml2 @@ -129,6 +140,10 @@ else { n1kv_version => hiera('n1kv_vem_version', undef), } } + + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::agents::bigswitch + } } diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 276093ba..c304e94e 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -20,7 +20,9 @@ $enable_load_balancer = hiera('enable_load_balancer', true) if hiera('step') >= 1 { + create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> $controller_node_ips = split(hiera('controller_node_ips'), ',') @@ -83,11 +85,15 @@ if hiera('step') >= 2 { $mysql_config_file = '/etc/my.cnf.d/server.cnf' } # TODO Galara + # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we + # set bind-address to a hostname instead of an ip address; to move Mysql + # from internal_api on another network we'll have to customize both + # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap class { '::mysql::server': config_file => $mysql_config_file, override_options => { 'mysqld' => { - 'bind-address' => hiera('mysql_bind_host'), + 'bind-address' => $::hostname, 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', }, @@ -216,7 +222,7 @@ if hiera('step') >= 3 { $http_store = ['glance.store.http.Store'] $glance_store = concat($http_store, $backend_store) - # TODO: notifications, scrubber, etc. + # TODO: scrubber and other additional optional features include ::glance include ::glance::config class { '::glance::api': @@ -224,6 +230,10 @@ if hiera('step') >= 3 { } include ::glance::registry include join(['::glance::backend::', $glance_backend]) + $rabbit_port = hiera('rabbitmq::port') + class { '::glance::notify::rabbitmq': + rabbit_hosts => suffix(hiera('rabbit_node_ips'), ":${rabbit_port}"), + } class { '::nova' : memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'), @@ -294,11 +304,13 @@ if hiera('step') >= 3 { include ::neutron::server include ::neutron::server::notifications - # If the value of core plugin is set to 'nuage', - # include nuage core plugin, and it does not + # If the value of core plugin is set to 'nuage' or 'opencontrail', + # include nuage or opencontrail core plugins, and it does not # need the l3, dhcp and metadata agents if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { include ::neutron::plugins::nuage + } elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { + include ::neutron::plugins::opencontrail } else { include ::neutron::agents::l3 include ::neutron::agents::dhcp @@ -349,8 +361,9 @@ if hiera('step') >= 3 { include ::neutron::plugins::ml2::cisco::type_nexus_vxlan } - if hiera('neutron_enable_bigswitch_ml2', false) { + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::bigswitch::restproxy + include ::neutron::agents::bigswitch } neutron_l3_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); @@ -368,10 +381,12 @@ if hiera('step') >= 3 { include ::cinder include ::cinder::config + include ::tripleo::ssl::cinder_config include ::cinder::api include ::cinder::glance include ::cinder::scheduler include ::cinder::volume + include ::cinder::ceilometer class { '::cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -440,7 +455,7 @@ if hiera('step') >= 3 { dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), - dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef), + dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef), dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), } @@ -557,7 +572,9 @@ if hiera('step') >= 3 { Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } # Heat - include ::heat + class { '::heat' : + notification_driver => 'messaging', + } include ::heat::config include ::heat::api include ::heat::api_cfn diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 8fbb25ab..d44dc69c 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -39,7 +39,9 @@ $non_pcmk_start = hiera('step') >= 4 if hiera('step') >= 1 { + create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> include ::timezone @@ -134,6 +136,11 @@ if hiera('step') >= 1 { $galera_nodes = downcase(hiera('galera_node_names', $::hostname)) $galera_nodes_count = count(split($galera_nodes, ',')) + # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we + # set bind-address to a hostname instead of an ip address; to move Mysql + # from internal_api on another network we'll have to customize both + # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap + $mysql_bind_host = hiera('mysql_bind_host') $mysqld_options = { 'mysqld' => { 'skip-name-resolve' => '1', @@ -143,7 +150,7 @@ if hiera('step') >= 1 { 'innodb_locks_unsafe_for_binlog'=> '1', 'query_cache_size' => '0', 'query_cache_type' => '0', - 'bind-address' => hiera('mysql_bind_host'), + 'bind-address' => $::hostname, 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', @@ -159,6 +166,7 @@ if hiera('step') >= 1 { 'wsrep_drupal_282555_workaround'=> '0', 'wsrep_causal_reads' => '0', 'wsrep_sst_method' => 'rsync', + 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;", }, } @@ -348,6 +356,7 @@ if hiera('step') >= 2 { ocf_agent_name => 'heartbeat:rabbitmq-cluster', resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'', clone_params => 'ordered=true interleave=true', + meta_params => 'notify=true', require => Class['::rabbitmq'], } @@ -497,9 +506,12 @@ MYSQL_HOST=localhost\n", if hiera('step') >= 3 { class { '::keystone': - sync_db => $sync_db, - manage_service => false, - enabled => false, + sync_db => $sync_db, + manage_service => false, + enabled => false, + # TODO: when keystone resources will be managed by puppet-keystone + # for the overcloud, set enable_bootstrap to the default value (True). + enable_bootstrap => false, } include ::keystone::config @@ -568,6 +580,10 @@ if hiera('step') >= 3 { enabled => false, } include join(['::glance::backend::', $glance_backend]) + $rabbit_port = hiera('rabbitmq::port') + class { '::glance::notify::rabbitmq': + rabbit_hosts => suffix(hiera('rabbit_node_ips'), ":${rabbit_port}"), + } class { '::nova' : memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'), @@ -665,6 +681,9 @@ if hiera('step') >= 3 { if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { include ::neutron::plugins::nuage } + if hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { + include ::neutron::plugins::opencontrail + } if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { class {'::neutron::plugins::midonet': midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), @@ -724,8 +743,9 @@ if hiera('step') >= 3 { } } - if hiera('neutron_enable_bigswitch_ml2', false) { + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::bigswitch::restproxy + include ::neutron::agents::bigswitch } neutron_l3_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); @@ -733,9 +753,13 @@ if hiera('step') >= 3 { neutron_dhcp_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); } + neutron_config { + 'DEFAULT/notification_driver': value => 'messaging'; + } include ::cinder include ::cinder::config + include ::tripleo::ssl::cinder_config class { '::cinder::api': sync_db => $sync_db, manage_service => false, @@ -750,6 +774,7 @@ if hiera('step') >= 3 { enabled => false, } include ::cinder::glance + include ::cinder::ceilometer class { '::cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -818,7 +843,7 @@ if hiera('step') >= 3 { dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), - dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef), + dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef), dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), } @@ -976,7 +1001,8 @@ if hiera('step') >= 3 { # Heat include ::heat::config class { '::heat' : - sync_db => $sync_db, + sync_db => $sync_db, + notification_driver => 'messaging', } class { '::heat::api' : manage_service => false, @@ -1401,24 +1427,19 @@ if hiera('step') >= 4 { # Nova pacemaker::resource::service { $::nova::params::api_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::conductor_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::consoleauth_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', require => Pacemaker::Resource::Service[$::apache::params::service_name], } pacemaker::resource::service { $::nova::params::vncproxy_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::scheduler_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 63ac396e..1ac66904 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 5a69725a..72cd36c3 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml index a55b3959..eb06b241 100644 --- a/puppet/swift-storage-post.yaml +++ b/puppet/swift-storage-post.yaml @@ -12,9 +12,19 @@ parameters: type: json description: Value which changes if the node configuration may need to be re-applied - resources: + StorageArtifactsConfig: + type: deploy-artifacts.yaml + + StorageArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: servers} + config: {get_resource: StorageArtifactsConfig} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + StoragePuppetConfig: type: OS::Heat::SoftwareConfig properties: @@ -28,6 +38,7 @@ resources: StorageDeployment_Step1: type: OS::Heat::StructuredDeployments + depends_on: StorageArtifactsDeploy properties: name: StorageDeployment_Step1 servers: {get_param: servers} diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 142e47cc..d36a9c17 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -302,11 +302,54 @@ outputs: hosts_entry: value: str_replace: - template: "IP HOST.DOMAIN HOST" + template: | + PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST + EXTERNALIP EXTERNALHOST + INTERNAL_APIIP INTERNAL_APIHOST + STORAGEIP STORAGEHOST + STORAGE_MGMTIP STORAGE_MGMTHOST + TENANTIP TENANTHOST + MANAGEMENTIP MANAGEMENTHOST params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]} + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} - HOST: {get_attr: [SwiftStorage, name]} + PRIMARYHOST: {get_attr: [SwiftStorage, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - external + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - internalapi + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - storage + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - storagemgmt + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - tenant + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - management nova_server_resource: description: Heat resource handle for the swift storage server value: |