diff options
Diffstat (limited to 'puppet')
32 files changed, 332 insertions, 142 deletions
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index 3dd3d5c9..e85975d4 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -69,8 +69,8 @@ resources: allNodesConfigImpl: type: OS::Heat::StructuredConfig properties: + group: os-apply-config config: - completion-signal: {get_input: deploy_signal_id} hosts: list_join: - "\n" @@ -227,6 +227,15 @@ resources: list_join: - "','" - {get_param: neutron_api_node_ips} + # TODO: pass a `midonet_api_node_ips` var + midonet_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: neutron_api_node_ips} keystone_public_api_node_ips: str_replace: template: "['SERVERS_LIST']" diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml index 0f7dd36f..f9c53465 100644 --- a/puppet/ceph-storage-post.yaml +++ b/puppet/ceph-storage-post.yaml @@ -30,6 +30,7 @@ resources: CephStorageDeployment_Step1: type: OS::Heat::StructuredDeployments properties: + name: CephStorageDeployment_Step1 servers: {get_param: servers} config: {get_resource: CephStoragePuppetConfig} input_values: diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index 3044d975..e310e1f5 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -190,6 +190,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: + name: NetworkDeployment config: {get_resource: NetworkConfig} server: {get_resource: CephStorage} actions: {get_param: NetworkDeploymentActions} @@ -198,6 +199,7 @@ resources: type: OS::Heat::StructuredDeployment depends_on: NetworkDeployment properties: + name: CephStorageDeployment config: {get_resource: CephStorageConfig} server: {get_resource: CephStorage} input_values: diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml index bb7f94de..9b7c752a 100644 --- a/puppet/cinder-storage-post.yaml +++ b/puppet/cinder-storage-post.yaml @@ -28,6 +28,7 @@ resources: VolumeDeployment_Step1: type: OS::Heat::StructuredDeployments properties: + name: VolumeDeployment_Step1 servers: {get_param: servers} config: {get_resource: VolumePuppetConfig} diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index fd9365bd..0bec3e93 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -13,7 +13,7 @@ parameters: description: The iSCSI helper to use with cinder. type: string CinderLVMLoopDeviceSize: - default: 5000 + default: 10280 description: The size of the loopback file used by the cinder LVM driver. type: number CinderPassword: @@ -231,6 +231,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: + name: NetworkDeployment config: {get_resource: NetworkConfig} server: {get_resource: BlockStorage} actions: {get_param: NetworkDeploymentActions} @@ -239,6 +240,7 @@ resources: type: OS::Heat::StructuredDeployment depends_on: NetworkDeployment properties: + name: BlockStorageDeployment server: {get_resource: BlockStorage} config: {get_resource: BlockStorageConfig} input_values: @@ -330,6 +332,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment properties: + name: UpdateDeployment config: {get_resource: UpdateConfig} server: {get_resource: BlockStorage} input_values: diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml index b63b06b4..3861e50c 100644 --- a/puppet/compute-post.yaml +++ b/puppet/compute-post.yaml @@ -31,6 +31,7 @@ resources: ComputePuppetDeployment: type: OS::Heat::StructuredDeployments properties: + name: ComputePuppetDeployment servers: {get_param: servers} config: {get_resource: ComputePuppetConfig} input_values: diff --git a/puppet/compute.yaml b/puppet/compute.yaml index 1f7f0c23..31d76263 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -118,6 +118,15 @@ parameters: default: nic1 description: A port to add to the NeutronPhysicalBridge. type: string + NeutronTenantMtu: + description: > + The default MTU for tenant networks. For VXLAN/GRE tunneling, this should + be at least 50 bytes smaller than the MTU on the physical network. This + value will be used to set the MTU on the virtual Ethernet device. + This number is related to the value of NeutronDnsmasqOptions, since that + will determine the MTU that is assigned to the VM host through DHCP. + default: 1400 + type: number NeutronTunnelTypes: type: comma_delimited_list description: | @@ -127,13 +136,13 @@ parameters: description: | Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation - default: ["1:1000", ] + default: ["1:4094", ] type: comma_delimited_list NeutronVniRanges: description: | Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of VXLAN VNI IDs that are available for tenant network allocation - default: ["1:1000", ] + default: ["1:4094", ] type: comma_delimited_list NeutronPublicInterfaceRawDevice: default: '' @@ -409,6 +418,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: + name: NetworkDeployment config: {get_resource: NetworkConfig} server: {get_resource: NovaCompute} actions: {get_param: NetworkDeploymentActions} @@ -433,6 +443,7 @@ resources: - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre - nova_nuage_data # Optionally provided by ComputeExtraConfigPre - midonet_data # Optionally provided by AllNodesExtraConfig @@ -462,6 +473,7 @@ resources: nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend} rbd_persistent_storage: {get_input: cinder_enable_rbd_backend} nova_password: {get_input: nova_password} + nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address} nova::vncproxy::common::vncproxy_protocol: {get_input: nova_vncproxy_protocol} nova::vncproxy::common::vncproxy_host: {get_input: nova_vncproxy_host} @@ -489,8 +501,9 @@ resources: neutron_host: {get_input: neutron_host} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} + neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} - neutron::agents::ml2::ovs:tunnel_types: {get_input: neutron_tunnel_types} + neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} @@ -522,6 +535,7 @@ resources: type: OS::TripleO::SoftwareDeployment depends_on: NetworkDeployment properties: + name: NovaComputeDeployment config: {get_resource: NovaComputeConfig} server: {get_resource: NovaCompute} input_values: @@ -585,6 +599,7 @@ resources: template: MAPPINGS params: MAPPINGS: {get_param: NeutronBridgeMappings} + neutron_tenant_mtu: {get_param: NeutronTenantMtu} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} neutron_physical_bridge: {get_param: NeutronPhysicalBridge} @@ -656,6 +671,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment properties: + name: UpdateDeployment config: {get_resource: UpdateConfig} server: {get_resource: NovaCompute} input_values: diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index ed8129e7..d250dd70 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -35,6 +35,7 @@ resources: type: OS::Heat::StructuredDeployments depends_on: ControllerPrePuppet properties: + name: ControllerLoadBalancerDeployment_Step1 servers: {get_param: servers} config: {get_resource: ControllerPuppetConfig} input_values: @@ -46,6 +47,7 @@ resources: type: OS::Heat::StructuredDeployments depends_on: ControllerLoadBalancerDeployment_Step1 properties: + name: ControllerServicesBaseDeployment_Step2 servers: {get_param: servers} config: {get_resource: ControllerPuppetConfig} input_values: @@ -71,6 +73,7 @@ resources: type: OS::Heat::StructuredDeployments depends_on: ControllerServicesBaseDeployment_Step2 properties: + name: ControllerRingbuilderDeployment_Step3 servers: {get_param: servers} config: {get_resource: ControllerRingbuilderPuppetConfig} input_values: @@ -80,6 +83,7 @@ resources: type: OS::Heat::StructuredDeployments depends_on: ControllerRingbuilderDeployment_Step3 properties: + name: ControllerOvercloudServicesDeployment_Step4 servers: {get_param: servers} config: {get_resource: ControllerPuppetConfig} input_values: @@ -90,6 +94,7 @@ resources: type: OS::Heat::StructuredDeployments depends_on: ControllerOvercloudServicesDeployment_Step4 properties: + name: ControllerOvercloudServicesDeployment_Step5 servers: {get_param: servers} config: {get_resource: ControllerPuppetConfig} input_values: @@ -100,6 +105,7 @@ resources: type: OS::Heat::StructuredDeployments depends_on: ControllerOvercloudServicesDeployment_Step5 properties: + name: ControllerOvercloudServicesDeployment_Step6 servers: {get_param: servers} config: {get_resource: ControllerPuppetConfig} input_values: diff --git a/puppet/controller.yaml b/puppet/controller.yaml index f5378221..36003104 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -61,7 +61,7 @@ parameters: description: The iSCSI helper to use with cinder. type: string CinderLVMLoopDeviceSize: - default: 5000 + default: 10280 description: The size of the loopback file used by the cinder LVM driver. type: number CinderNfsMountOptions: @@ -89,10 +89,6 @@ parameters: default: 0 description: Number of workers for Cinder service. type: number - CloudName: - default: overcloud - description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org - type: string ControllerExtraConfig: default: {} description: | @@ -496,6 +492,15 @@ parameters: default: '' description: If set, the public interface is a vlan with this device as the raw device. type: string + NeutronTenantMtu: + description: > + The default MTU for tenant networks. For VXLAN/GRE tunneling, this should + be at least 50 bytes smaller than the MTU on the physical network. This + value will be used to set the MTU on the virtual Ethernet device. + This number is related to the value of NeutronDnsmasqOptions, since that + will determine the MTU that is assigned to the VM host through DHCP. + default: 1400 + type: number NeutronTunnelTypes: default: 'vxlan' description: | @@ -505,16 +510,16 @@ parameters: description: | Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation - default: ["1:1000", ] + default: ["1:4094", ] type: comma_delimited_list NeutronVniRanges: description: | Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of VXLAN VNI IDs that are available for tenant network allocation - default: ["1:1000", ] + default: ["1:4094", ] type: comma_delimited_list NeutronPluginExtensions: - default: "qos" + default: "qos,port_security" description: | Comma-separated list of extensions enabled for the Neutron plugin. type: comma_delimited_list @@ -832,6 +837,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: + name: NetworkDeployment config: {get_resource: NetworkConfig} server: {get_resource: Controller} actions: {get_param: NetworkDeploymentActions} @@ -859,6 +865,7 @@ resources: type: OS::TripleO::SoftwareDeployment depends_on: NetworkDeployment properties: + name: ControllerDeployment config: {get_resource: ControllerConfig} server: {get_resource: Controller} input_values: @@ -1063,6 +1070,7 @@ resources: params: AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_password: {get_param: NeutronPassword} + neutron_tenant_mtu: {get_param: NeutronTenantMtu} neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} neutron_dsn: list_join: @@ -1075,7 +1083,7 @@ resources: neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] } neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] } neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] } - neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri ] } + neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri_no_suffix ] } nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] } ceilometer_backend: {get_param: CeilometerBackend} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} @@ -1106,6 +1114,14 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/nova' + nova_api_dsn: + list_join: + - '' + - - 'mysql+pymysql://nova_api:' + - {get_param: NovaPassword} + - '@' + - {get_param: MysqlVirtualIP} + - '/nova_api' instance_name_template: {get_param: InstanceNameTemplate} fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} @@ -1336,8 +1352,10 @@ resources: keystone_ssl_certificate: {get_input: keystone_ssl_certificate} keystone_ssl_certificate_key: {get_input: keystone_ssl_certificate_key} keystone::database_connection: {get_input: keystone_dsn} - keystone::public_bind_host: {get_input: keystone_public_api_network} keystone::admin_bind_host: {get_input: keystone_admin_api_network} + keystone::public_bind_host: {get_input: keystone_public_api_network} + keystone::wsgi::apache::bind_host: {get_input: keystone_public_api_network} + keystone::wsgi::apache::admin_bind_host: {get_input: keystone_admin_api_network} keystone::debug: {get_input: debug} keystone::db::mysql::password: {get_input: admin_token} keystone::rabbit_userid: {get_input: rabbit_username} @@ -1355,7 +1373,7 @@ resources: keystone::admin_workers: {get_input: keystone_workers} keystone::public_workers: {get_input: keystone_workers} keystone_enable_db_purge: {get_input: keystone_enable_db_purge} - + keystone::public_endpoint: {get_input: keystone_public_url} # MongoDB mongodb::server::bind_ip: {get_input: mongo_db_network} mongodb::server::nojournal: {get_input: mongodb_no_journal} @@ -1383,6 +1401,7 @@ resources: neutron::server::database_connection: {get_input: neutron_dsn} neutron::server::api_workers: {get_input: neutron_workers} neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge} + neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata} @@ -1408,7 +1427,7 @@ resources: neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron::agents::ml2::ovs:bridge_mappings: {get_input: neutron_bridge_mappings} + neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings} neutron_public_interface: {get_input: neutron_public_interface} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route} @@ -1430,6 +1449,7 @@ resources: neutron::server::notifications::nova_url: {get_input: nova_internal_url} neutron::server::notifications::auth_url: {get_input: neutron_admin_auth_url} neutron::server::notifications::tenant_name: 'service' + neutron::server::notifications::project_name: 'service' neutron::server::notifications::password: {get_input: nova_password} # Ceilometer @@ -1466,7 +1486,9 @@ resources: nova::api::osapi_compute_workers: {get_input: nova_workers} nova::api::ec2_workers: {get_input: nova_workers} nova::api::metadata_workers: {get_input: nova_workers} + nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::database_connection: {get_input: nova_dsn} + nova::api_database_connection: {get_input: nova_api_dsn} nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} nova::api::instance_name_template: {get_input: instance_name_template} @@ -1475,6 +1497,7 @@ resources: nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} nova::vncproxy::host: {get_input: nova_api_network} nova::db::mysql::password: {get_input: nova_password} + nova::db::mysql_api::password: {get_input: nova_password} nova_enable_db_purge: {get_input: nova_enable_db_purge} # Horizon @@ -1555,6 +1578,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment properties: + name: UpdateDeployment config: {get_resource: UpdateConfig} server: {get_resource: Controller} input_values: @@ -1598,12 +1622,11 @@ outputs: Server's IP address and hostname in the /etc/hosts format value: str_replace: - template: IP HOST.DOMAIN HOST CLOUDNAME + template: IP HOST.DOMAIN HOST params: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} HOST: {get_attr: [Controller, name]} - CLOUDNAME: {get_param: CloudName} nova_server_resource: description: Heat resource handle for the Nova compute server value: diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml index 2413f5a4..655fd0f2 100644 --- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml +++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml @@ -131,6 +131,7 @@ resources: NetworkCiscoDeployment: type: OS::Heat::StructuredDeployments properties: + name: NetworkCiscoDeployment config: {get_resource: NetworkCiscoConfig} servers: {get_param: controller_servers} input_values: @@ -178,6 +179,7 @@ resources: CollectMacDeploymentsController: type: OS::Heat::SoftwareDeployments properties: + name: CollectMacDeploymentsController servers: {get_param: controller_servers} config: {get_resource: CollectMacConfig} actions: ['CREATE'] # Only do this on CREATE @@ -185,6 +187,7 @@ resources: CollectMacDeploymentsCompute: type: OS::Heat::SoftwareDeployments properties: + name: CollectMacDeploymentsCompute servers: {get_param: compute_servers} config: {get_resource: CollectMacConfig} actions: ['CREATE'] # Only do this on CREATE @@ -192,6 +195,7 @@ resources: CollectMacDeploymentsBlockStorage: type: OS::Heat::SoftwareDeployments properties: + name: CollectMacDeploymentsBlockStorage servers: {get_param: blockstorage_servers} config: {get_resource: CollectMacConfig} actions: ['CREATE'] # Only do this on CREATE @@ -199,6 +203,7 @@ resources: CollectMacDeploymentsObjectStorage: type: OS::Heat::SoftwareDeployments properties: + name: CollectMacDeploymentsObjectStorage servers: {get_param: objectstorage_servers} config: {get_resource: CollectMacConfig} actions: ['CREATE'] # Only do this on CREATE @@ -206,6 +211,7 @@ resources: CollectMacDeploymentsCephStorage: type: OS::Heat::SoftwareDeployments properties: + name: CollectMacDeploymentsCephStorage servers: {get_param: cephstorage_servers} config: {get_resource: CollectMacConfig} actions: ['CREATE'] # Only do this on CREATE @@ -280,6 +286,7 @@ resources: MappingToNexusDeploymentsController: type: OS::Heat::SoftwareDeployment properties: + name: MappingToNexusDeploymentsController server: {get_param: [controller_servers, '0']} config: {get_resource: MappingToNexusConfig} input_values: @@ -323,6 +330,7 @@ resources: type: OS::Heat::SoftwareDeployment depends_on: MappingToNexusDeploymentsController properties: + name: MappingToUCSMDeploymentsController server: {get_param: [controller_servers, '0']} config: {get_resource: MappingToUCSMConfig} input_values: diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml index 7cefc24b..ebd6c251 100644 --- a/puppet/extraconfig/ceph/ceph-external-config.yaml +++ b/puppet/extraconfig/ceph/ceph-external-config.yaml @@ -76,7 +76,7 @@ resources: cinder_rbd_pool_name: {get_param: CinderRbdPoolName} glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} - glance::backend::rbd::rbd_store_pool: {get_param: CephClientUserName} + glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} nova::compute::rbd::rbd_keyring: list_join: - '.' diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml new file mode 100644 index 00000000..49c77190 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml @@ -0,0 +1,45 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata for Big Switch agents on compute node + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + NeutronBigswitchAgentEnabled: + description: The state of the neutron-bsn-agent service. + type: boolean + default: false + NeutronBigswitchLLDPEnabled: + description: The state of the neutron-bsn-lldp service. + type: boolean + default: true + + +resources: + NeutronBigswitchConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + neutron_bigswitch_data: + mapped_data: + neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} + neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} + + NeutronBigswitchDeployment: + type: OS::Heat::StructuredDeployment + properties: + name: NeutronBigswitchDeployment + config: {get_resource: NeutronBigswitchConfig} + server: {get_param: server} + input_values: + neutron_enable_bigswitch_agent: {get_param: NeutronBigswitchAgentEnabled} + neutron_enable_bigswitch_lldp: {get_param: NeutronBigswitchLLDPEnabled} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [NeutronBigswitchDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml index 96368e37..5561c74a 100644 --- a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml +++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml @@ -70,6 +70,7 @@ resources: NovaNuageDeployment: type: OS::Heat::StructuredDeployment properties: + name: NovaNuageDeployment config: {get_resource: NovaNuageConfig} server: {get_param: server} input_values: diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml index 7ec2190f..ab442f2b 100644 --- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml +++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml @@ -114,6 +114,7 @@ resources: CinderNetappDeployment: type: OS::Heat::StructuredDeployment properties: + name: CinderNetappDeployment config: {get_resource: CinderNetappConfig} server: {get_param: server} input_values: diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml index bf06d25d..467f57cc 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml @@ -44,7 +44,6 @@ resources: datafiles: neutron_bigswitch_data: mapped_data: - neutron_enable_bigswitch_ml2: true neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} @@ -56,6 +55,7 @@ resources: NeutronBigswitchDeployment: type: OS::Heat::StructuredDeployment properties: + name: NeutronBigswitchDeployment config: {get_resource: NeutronBigswitchConfig} server: {get_param: server} input_values: diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml index 6730ddf1..cec885cd 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml @@ -142,6 +142,7 @@ resources: CiscoN1kvDeployment: type: OS::Heat::StructuredDeployment properties: + name: CiscoN1kvDeployment config: {get_resource: CiscoN1kvConfig} server: {get_param: server} input_values: diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml index 8378d2fc..a4cfea07 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml @@ -71,6 +71,7 @@ resources: NeutronNuageDeployment: type: OS::Heat::StructuredDeployment properties: + name: NeutronNuageDeployment config: {get_resource: NeutronNuageConfig} server: {get_param: server} input_values: diff --git a/puppet/extraconfig/pre_deploy/per_node.yaml b/puppet/extraconfig/pre_deploy/per_node.yaml index 80c8ad6e..e236e336 100644 --- a/puppet/extraconfig/pre_deploy/per_node.yaml +++ b/puppet/extraconfig/pre_deploy/per_node.yaml @@ -45,6 +45,7 @@ resources: NodeSpecificDeployment: type: OS::Heat::SoftwareDeployment properties: + name: NodeSpecificDeployment config: {get_resource: NodeSpecificConfig} server: {get_param: server} input_values: diff --git a/puppet/extraconfig/tls/ca-inject.yaml b/puppet/extraconfig/tls/ca-inject.yaml index 7e34f071..aab42849 100644 --- a/puppet/extraconfig/tls/ca-inject.yaml +++ b/puppet/extraconfig/tls/ca-inject.yaml @@ -45,7 +45,7 @@ resources: cat > ${cacert_path} << EOF ${cacert_content} EOF - chmod 0440 ${cacert_path} + chmod 0444 ${cacert_path} chown root:root ${cacert_path} ${update_anchor_command} md5sum ${cacert_path} > ${heat_outputs_path}.root_cert_md5sum @@ -53,6 +53,7 @@ resources: CADeployment: type: OS::Heat::SoftwareDeployment properties: + name: CADeployment config: {get_resource: CAConfig} server: {get_param: server} input_values: diff --git a/puppet/extraconfig/tls/tls-cert-inject.yaml b/puppet/extraconfig/tls/tls-cert-inject.yaml index ce524ba9..20bb3737 100644 --- a/puppet/extraconfig/tls/tls-cert-inject.yaml +++ b/puppet/extraconfig/tls/tls-cert-inject.yaml @@ -67,6 +67,7 @@ resources: ControllerTLSDeployment: type: OS::Heat::SoftwareDeployment properties: + name: ControllerTLSDeployment config: {get_resource: ControllerTLSConfig} server: {get_param: server} input_values: diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index b4b51abf..30645687 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -14,6 +14,9 @@ nova::network::neutron::dhcp_domain: '' neutron::allow_overlapping_ips: true +kernel_modules: + nf_conntrack: {} + sysctl_settings: net.ipv4.tcp_keepalive_intvl: value: 1 @@ -21,6 +24,15 @@ sysctl_settings: value: 5 net.ipv4.tcp_keepalive_time: value: 5 + net.nf_conntrack_max: + value: 500000 + net.netfilter.nf_conntrack_max: + value: 500000 + # prevent neutron bridges from autoconfiguring ipv6 addresses + net.ipv6.conf.default.accept_ra: + value: 0 + net.ipv6.conf.default.autoconf: + value: 0 nova::rabbit_heartbeat_timeout_threshold: 60 neutron::rabbit_heartbeat_timeout_threshold: 60 diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 7f30fe7a..e0e0ffbc 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -34,6 +34,8 @@ glance::api::keystone_tenant: 'service' glance::registry::keystone_tenant: 'service' neutron::server::auth_tenant: 'service' neutron::agents::metadata::auth_tenant: 'service' +neutron::agents::l3::router_delete_namespaces: True +neutron::agents::dhcp::dhcp_delete_namespaces: True cinder::api::keystone_tenant: 'service' swift::proxy::authtoken::admin_tenant_name: 'service' ceilometer::api::keystone_tenant: 'service' @@ -45,6 +47,13 @@ keystone::cron::token_flush::maxdelay: 3600 keystone::roles::admin::service_tenant: 'service' keystone::roles::admin::admin_tenant: 'admin' keystone::cron::token_flush::destination: '/dev/null' +keystone::config::keystone_config: + DEFAULT/secure_proxy_ssl_header: + value: 'HTTP_X_FORWARDED_PROTO' + ec2/driver: + value: 'keystone.contrib.ec2.backends.sql.Ec2' +keystone::service_name: 'httpd' +keystone::wsgi::apache::ssl: false #swift swift::proxy::pipeline: @@ -78,6 +87,7 @@ neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf nova::notify_on_state_change: 'vm_and_task_state' nova::api::default_floating_pool: 'public' nova::api::osapi_v3: true +nova::api::sync_db_api: true nova::scheduler::filter::ram_allocation_ratio: '1.0' nova::cron::archive_deleted_rows::hour: '*/12' nova::cron::archive_deleted_rows::destination: '/dev/null' @@ -88,6 +98,7 @@ ceilometer::agent::auth::auth_endpoint_type: 'internalURL' # cinder cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler cinder::cron::db_purge::destination: '/dev/null' +cinder::host: hostgroup # heat heat::engine::configure_delegated_roles: false diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml index 89577505..61714691 100644 --- a/puppet/hieradata/database.yaml +++ b/puppet/hieradata/database.yaml @@ -6,6 +6,13 @@ nova::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" +nova::db::mysql_api::user: nova_api +nova::db::mysql_api::host: "%{hiera('mysql_virtual_ip')}" +nova::db::mysql_api::dbname: nova_api +nova::db::mysql_api::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + # Glance glance::db::mysql::user: glance glance::db::mysql::host: "%{hiera('mysql_virtual_ip')}" diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index 7444155c..0db5b45a 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp @@ -38,6 +40,7 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) { } -> Class['ceph::profile::osd'] } +include ::ceph::conf include ::ceph::profile::client include ::ceph::profile::osd diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index bb3575cf..0f1318c3 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp @@ -37,6 +39,16 @@ exec { 'libvirt-default-net-destroy': before => Service['libvirt'], } +# When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique +exec { 'reset-iscsi-initiator-name': + command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi', + onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset', +}-> + +file { '/etc/iscsi/.initiator_reset': + ensure => present, +} + include ::nova include ::nova::config include ::nova::compute @@ -49,6 +61,7 @@ nova_config { $rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) $rbd_persistent_storage = hiera('rbd_persistent_storage', false) if $rbd_ephemeral_storage or $rbd_persistent_storage { + include ::ceph::conf include ::ceph::profile::client $client_keys = hiera('ceph::profile::params::client_keys') @@ -78,6 +91,7 @@ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' } include ::nova::network::neutron include ::neutron +include ::neutron::config # If the value of core plugin is set to 'nuage', # include nuage agent, @@ -117,6 +131,10 @@ else { n1kv_version => hiera('n1kv_vem_version', undef), } } + + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::agents::bigswitch + } } diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 29af6ca2..14dde157 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -20,7 +20,9 @@ $enable_load_balancer = hiera('enable_load_balancer', true) if hiera('step') >= 1 { + create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> $controller_node_ips = split(hiera('controller_node_ips'), ',') @@ -83,11 +85,15 @@ if hiera('step') >= 2 { $mysql_config_file = '/etc/my.cnf.d/server.cnf' } # TODO Galara + # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we + # set bind-address to a hostname instead of an ip address; to move Mysql + # from internal_api on another network we'll have to customize both + # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap class { '::mysql::server': config_file => $mysql_config_file, override_options => { 'mysqld' => { - 'bind-address' => hiera('mysql_bind_host'), + 'bind-address' => $::hostname, 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', }, @@ -100,6 +106,7 @@ if hiera('step') >= 2 { include ::keystone::db::mysql include ::glance::db::mysql include ::nova::db::mysql + include ::nova::db::mysql_api include ::neutron::db::mysql include ::cinder::db::mysql include ::heat::db::mysql @@ -137,6 +144,7 @@ if hiera('step') >= 2 { class { '::ceph::profile::params': mon_initial_members => downcase(hiera('ceph_mon_initial_members')), } + include ::ceph::conf include ::ceph::profile::mon } @@ -155,10 +163,12 @@ if hiera('step') >= 2 { } -> Class['ceph::profile::osd'] } + include ::ceph::conf include ::ceph::profile::osd } if str2bool(hiera('enable_external_ceph', false)) { + include ::ceph::conf include ::ceph::profile::client } @@ -167,13 +177,13 @@ if hiera('step') >= 2 { if hiera('step') >= 3 { include ::keystone + include ::keystone::config include ::keystone::roles::admin include ::keystone::endpoint + include ::keystone::wsgi::apache #TODO: need a cleanup-keystone-tokens.sh solution here - keystone_config { - 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2'; - } + file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]: ensure => 'directory', owner => 'keystone', @@ -214,6 +224,7 @@ if hiera('step') >= 3 { # TODO: notifications, scrubber, etc. include ::glance + include ::glance::config class { '::glance::api': known_stores => $glance_store, } @@ -243,7 +254,8 @@ if hiera('step') >= 3 { if hiera('enable_zookeeper_on_controller') { class {'::tripleo::cluster::zookeeper': zookeeper_server_ips => $zookeeper_node_ips, - zookeeper_client_ip => $ipaddress, + # TODO: create a 'bind' hiera key for zookeeper + zookeeper_client_ip => hiera('neutron::bind_host'), zookeeper_hostnames => hiera('controller_node_names') } } @@ -252,7 +264,8 @@ if hiera('step') >= 3 { if hiera('enable_cassandra_on_controller') { class {'::tripleo::cluster::cassandra': cassandra_servers => $cassandra_node_ips, - cassandra_ip => $ipaddress + # TODO: create a 'bind' hiera key for cassandra + cassandra_ip => hiera('neutron::bind_host'), } } @@ -263,10 +276,11 @@ if hiera('step') >= 3 { class {'::tripleo::network::midonet::api': zookeeper_servers => $zookeeper_node_ips, - vip => $ipaddress, - keystone_ip => $ipaddress, + vip => hiera('tripleo::loadbalancer::public_virtual_ip'), + keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), keystone_admin_token => hiera('keystone::admin_token'), - bind_address => $ipaddress, + # TODO: create a 'bind' hiera key for api + bind_address => hiera('neutron::bind_host'), admin_password => hiera('admin_password') } @@ -282,6 +296,7 @@ if hiera('step') >= 3 { include ::neutron } + include ::neutron::config include ::neutron::server include ::neutron::server::notifications @@ -308,7 +323,7 @@ if hiera('step') >= 3 { if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { class {'::neutron::plugins::midonet': - midonet_api_ip => $ipaddress, + midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), keystone_tenant => hiera('neutron::server::auth_tenant'), keystone_password => hiera('neutron::server::auth_password') } @@ -340,8 +355,9 @@ if hiera('step') >= 3 { include ::neutron::plugins::ml2::cisco::type_nexus_vxlan } - if hiera('neutron_enable_bigswitch_ml2', false) { + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::bigswitch::restproxy + include ::neutron::agents::bigswitch } neutron_l3_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); @@ -358,6 +374,8 @@ if hiera('step') >= 3 { } include ::cinder + include ::cinder::config + include ::tripleo::ssl::cinder_config include ::cinder::api include ::cinder::glance include ::cinder::scheduler @@ -405,10 +423,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_eqlx_backend', false) { $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name') - cinder_config { - "${cinder_eqlx_backend}/host": value => 'hostgroup'; - } - cinder::backend::eqlx { $cinder_eqlx_backend : volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef), san_ip => hiera('cinder::backend::eqlx::san_ip', undef), @@ -416,7 +430,7 @@ if hiera('step') >= 3 { san_password => hiera('cinder::backend::eqlx::san_password', undef), san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef), eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef), - eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef), + eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef), eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef), eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef), eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef), @@ -426,10 +440,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_dellsc_backend', false) { $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name') - cinder_config { - "${cinder_dellsc_backend}/host": value => 'hostgroup'; - } - cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend : volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef), san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef), @@ -438,7 +448,7 @@ if hiera('step') >= 3 { dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), - dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef), + dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef), dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), } @@ -447,10 +457,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_netapp_backend', false) { $cinder_netapp_backend = hiera('cinder::backend::netapp::title') - cinder_config { - "${cinder_netapp_backend}/host": value => 'hostgroup'; - } - if hiera('cinder::backend::netapp::nfs_shares', undef) { $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',') } @@ -560,6 +566,7 @@ if hiera('step') >= 3 { # Heat include ::heat + include ::heat::config include ::heat::api include ::heat::api_cfn include ::heat::api_cloudwatch diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 583a4fd4..c527c26e 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -39,7 +39,9 @@ $non_pcmk_start = hiera('step') >= 4 if hiera('step') >= 1 { + create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> include ::timezone @@ -134,6 +136,11 @@ if hiera('step') >= 1 { $galera_nodes = downcase(hiera('galera_node_names', $::hostname)) $galera_nodes_count = count(split($galera_nodes, ',')) + # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we + # set bind-address to a hostname instead of an ip address; to move Mysql + # from internal_api on another network we'll have to customize both + # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap + $mysql_bind_host = hiera('mysql_bind_host') $mysqld_options = { 'mysqld' => { 'skip-name-resolve' => '1', @@ -143,7 +150,7 @@ if hiera('step') >= 1 { 'innodb_locks_unsafe_for_binlog'=> '1', 'query_cache_size' => '0', 'query_cache_type' => '0', - 'bind-address' => hiera('mysql_bind_host'), + 'bind-address' => $::hostname, 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', @@ -158,8 +165,8 @@ if hiera('step') >= 1 { 'wsrep_auto_increment_control' => '1', 'wsrep_drupal_282555_workaround'=> '0', 'wsrep_causal_reads' => '0', - 'wsrep_notify_cmd' => '', 'wsrep_sst_method' => 'rsync', + 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;", }, } @@ -430,6 +437,9 @@ MYSQL_HOST=localhost\n", class { '::nova::db::mysql': require => Exec['galera-ready'], } + class { '::nova::db::mysql_api': + require => Exec['galera-ready'], + } class { '::neutron::db::mysql': require => Exec['galera-ready'], } @@ -461,6 +471,7 @@ MYSQL_HOST=localhost\n", class { '::ceph::profile::params': mon_initial_members => downcase(hiera('ceph_mon_initial_members')), } + include ::ceph::conf include ::ceph::profile::mon } @@ -479,10 +490,12 @@ MYSQL_HOST=localhost\n", } -> Class['ceph::profile::osd'] } + include ::ceph::conf include ::ceph::profile::osd } if str2bool(hiera('enable_external_ceph', false)) { + include ::ceph::conf include ::ceph::profile::client } @@ -496,11 +509,10 @@ if hiera('step') >= 3 { manage_service => false, enabled => false, } + include ::keystone::config #TODO: need a cleanup-keystone-tokens.sh solution here - keystone_config { - 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2'; - } + file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]: ensure => 'directory', owner => 'keystone', @@ -552,6 +564,7 @@ if hiera('step') >= 3 { # TODO: notifications, scrubber, etc. include ::glance + include ::glance::config class { '::glance::api': known_stores => $glance_store, manage_service => false, @@ -572,6 +585,7 @@ if hiera('step') >= 3 { class { '::nova::api' : sync_db => $sync_db, + sync_db_api => $sync_db, manage_service => false, enabled => false, } @@ -608,8 +622,9 @@ if hiera('step') >= 3 { if hiera('enable_zookeeper_on_controller') { class {'::tripleo::cluster::zookeeper': zookeeper_server_ips => $zookeeper_node_ips, - zookeeper_client_ip => $ipaddress, - zookeeper_hostnames => hiera('controller_node_names') + # TODO: create a 'bind' hiera key for zookeeper + zookeeper_client_ip => hiera('neutron::bind_host'), + zookeeper_hostnames => split(hiera('controller_node_names'), ',') } } @@ -617,7 +632,8 @@ if hiera('step') >= 3 { if hiera('enable_cassandra_on_controller') { class {'::tripleo::cluster::cassandra': cassandra_servers => $cassandra_node_ips, - cassandra_ip => $ipaddress + # TODO: create a 'bind' hiera key for cassandra + cassandra_ip => hiera('neutron::bind_host'), } } @@ -627,11 +643,12 @@ if hiera('step') >= 3 { } class {'::tripleo::network::midonet::api': - zookeeper_servers => hiera('neutron_api_node_ips'), - vip => $public_vip, - keystone_ip => $public_vip, + zookeeper_servers => $zookeeper_node_ips, + vip => hiera('tripleo::loadbalancer::public_virtual_ip'), + keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), keystone_admin_token => hiera('keystone::admin_token'), - bind_address => $ipaddress, + # TODO: create a 'bind' hiera key for api + bind_address => hiera('neutron::bind_host'), admin_password => hiera('admin_password') } @@ -646,6 +663,7 @@ if hiera('step') >= 3 { include ::neutron } + include ::neutron::config class { '::neutron::server' : sync_db => $sync_db, manage_service => false, @@ -657,7 +675,7 @@ if hiera('step') >= 3 { } if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { class {'::neutron::plugins::midonet': - midonet_api_ip => $public_vip, + midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), keystone_tenant => hiera('neutron::server::auth_tenant'), keystone_password => hiera('neutron::server::auth_password') } @@ -714,8 +732,9 @@ if hiera('step') >= 3 { } } - if hiera('neutron_enable_bigswitch_ml2', false) { + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::bigswitch::restproxy + include ::neutron::agents::bigswitch } neutron_l3_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); @@ -725,6 +744,8 @@ if hiera('step') >= 3 { } include ::cinder + include ::cinder::config + include ::tripleo::ssl::cinder_config class { '::cinder::api': sync_db => $sync_db, manage_service => false, @@ -782,10 +803,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_eqlx_backend', false) { $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name') - cinder_config { - "${cinder_eqlx_backend}/host": value => 'hostgroup'; - } - cinder::backend::eqlx { $cinder_eqlx_backend : volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef), san_ip => hiera('cinder::backend::eqlx::san_ip', undef), @@ -793,7 +810,7 @@ if hiera('step') >= 3 { san_password => hiera('cinder::backend::eqlx::san_password', undef), san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef), eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef), - eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef), + eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef), eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef), eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef), eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef), @@ -803,10 +820,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_dellsc_backend', false) { $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name') - cinder_config { - "${cinder_dellsc_backend}/host": value => 'hostgroup'; - } - cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend : volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef), san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef), @@ -815,7 +828,7 @@ if hiera('step') >= 3 { dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), - dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef), + dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef), dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), } @@ -824,10 +837,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_netapp_backend', false) { $cinder_netapp_backend = hiera('cinder::backend::netapp::title') - cinder_config { - "${cinder_netapp_backend}/host": value => 'hostgroup'; - } - if hiera('cinder::backend::netapp::nfs_shares', undef) { $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',') } @@ -975,6 +984,7 @@ if hiera('step') >= 3 { Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } # Heat + include ::heat::config class { '::heat' : sync_db => $sync_db, } @@ -1001,6 +1011,7 @@ if hiera('step') >= 3 { service_enable => false, # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? } + include ::keystone::wsgi::apache include ::apache::mod::status if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { $_profile_support = 'cisco' @@ -1044,57 +1055,49 @@ if hiera('step') >= 4 { if $pacemaker_master { - # Keystone - pacemaker::resource::service { $::keystone::params::service_name : - clone_params => 'interleave=true', - verify_on_create => true, - require => [File['/etc/keystone/ssl/certs/ca.pem'], - File['/etc/keystone/ssl/private/signing_key.pem'], - File['/etc/keystone/ssl/certs/signing_cert.pem']], - } if $enable_load_balancer { pacemaker::constraint::base { 'haproxy-then-keystone-constraint': constraint_type => 'order', first_resource => 'haproxy-clone', - second_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::apache::params::service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } } pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint': constraint_type => 'order', first_resource => 'rabbitmq-clone', - second_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::apache::params::service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Ocf['rabbitmq'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'memcached-then-keystone-constraint': constraint_type => 'order', first_resource => 'memcached-clone', - second_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::apache::params::service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service['memcached'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'galera-then-keystone-constraint': constraint_type => 'order', first_resource => 'galera-master', - second_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::apache::params::service_name}-clone", first_action => 'promote', second_action => 'start', require => [Pacemaker::Resource::Ocf['galera'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } # Cinder pacemaker::resource::service { $::cinder::params::api_service : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } pacemaker::resource::service { $::cinder::params::scheduler_service : clone_params => 'interleave=true', @@ -1103,12 +1106,12 @@ if hiera('step') >= 4 { pacemaker::constraint::base { 'keystone-then-cinder-api-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::cinder::params::api_service}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint': constraint_type => 'order', @@ -1146,25 +1149,25 @@ if hiera('step') >= 4 { # Sahara pacemaker::resource::service { $::sahara::params::api_service_name : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } pacemaker::resource::service { $::sahara::params::engine_service_name : clone_params => 'interleave=true', } pacemaker::constraint::base { 'keystone-then-sahara-api-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::sahara::params::api_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } # Glance pacemaker::resource::service { $::glance::params::registry_service_name : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } pacemaker::resource::service { $::glance::params::api_service_name : clone_params => 'interleave=true', @@ -1172,12 +1175,12 @@ if hiera('step') >= 4 { pacemaker::constraint::base { 'keystone-then-glance-registry-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::glance::params::registry_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint': constraint_type => 'order', @@ -1215,12 +1218,12 @@ if hiera('step') >= 4 { } -> pacemaker::resource::service { $::neutron::params::server_service: clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name] + require => Pacemaker::Resource::Service[$::apache::params::service_name] } } else { pacemaker::resource::service { $::neutron::params::server_service: clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name] + require => Pacemaker::Resource::Service[$::apache::params::service_name] } } if hiera('neutron::enable_l3_agent', true) { @@ -1292,28 +1295,16 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], } } - pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", - second_resource => "${::neutron::params::server_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::neutron::params::server_service]], - } - if hiera('neutron::enable_ovs_agent',true) { - pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::ovs_agent_service}-clone", + constraint_type => 'order', + first_resource => "${::apache::params::service_name}-clone", + second_resource => "${::neutron::params::server_service}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], + require => [Pacemaker::Resource::Service[$::apache::params::service_name], + Pacemaker::Resource::Service[$::neutron::params::server_service]], } - } - if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) { + if hiera('neutron::enable_ovs_agent',true) { pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': constraint_type => 'order', first_resource => "${::neutron::params::ovs_agent_service}-clone", @@ -1322,8 +1313,19 @@ if hiera('step') >= 4 { second_action => 'start', require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } + } + if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) { + pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::server_service}-clone", + second_resource => "${::neutron::params::ovs_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::server_service], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], + } + pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': source => "${::neutron::params::dhcp_agent_service}-clone", target => "${::neutron::params::ovs_agent_service}-clone", @@ -1409,34 +1411,29 @@ if hiera('step') >= 4 { # Nova pacemaker::resource::service { $::nova::params::api_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::conductor_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::consoleauth_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } pacemaker::resource::service { $::nova::params::vncproxy_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::scheduler_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::nova::params::consoleauth_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': constraint_type => 'order', @@ -1508,14 +1505,14 @@ if hiera('step') >= 4 { /mysql/: { pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } } default: { pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : clone_params => 'interleave=true', - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], + require => [Pacemaker::Resource::Service[$::apache::params::service_name], + Pacemaker::Resource::Service[$::mongodb::params::service_name]], } } } @@ -1551,12 +1548,12 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::ceilometer::params::agent_central_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint': constraint_type => 'order', @@ -1626,12 +1623,12 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'keystone-then-heat-api-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::heat::params::api_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint': constraint_type => 'order', @@ -1691,9 +1688,13 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]], } - # Horizon - pacemaker::resource::service { $::horizon::params::http_service: - clone_params => 'interleave=true', + # Horizon and Keystone + pacemaker::resource::service { $::apache::params::service_name: + clone_params => 'interleave=true', + verify_on_create => true, + require => [File['/etc/keystone/ssl/certs/ca.pem'], + File['/etc/keystone/ssl/private/signing_key.pem'], + File['/etc/keystone/ssl/certs/signing_cert.pem']], } #VSM @@ -1730,12 +1731,11 @@ if hiera('step') >= 5 { if $pacemaker_master { class {'::keystone::roles::admin' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } -> class {'::keystone::endpoint' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } - } } #END STEP 5 diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 63ac396e..1ac66904 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 5a69725a..72cd36c3 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml index d22f5386..a55b3959 100644 --- a/puppet/swift-storage-post.yaml +++ b/puppet/swift-storage-post.yaml @@ -29,6 +29,7 @@ resources: StorageDeployment_Step1: type: OS::Heat::StructuredDeployments properties: + name: StorageDeployment_Step1 servers: {get_param: servers} config: {get_resource: StoragePuppetConfig} input_values: @@ -49,6 +50,7 @@ resources: type: OS::Heat::StructuredDeployments depends_on: StorageDeployment_Step1 properties: + name: StorageRingbuilderDeployment_Step2 servers: {get_param: servers} config: {get_resource: StorageRingbuilderPuppetConfig} input_values: diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 10c87493..142e47cc 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -200,6 +200,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: + name: NetworkDeployment config: {get_resource: NetworkConfig} server: {get_resource: SwiftStorage} actions: {get_param: NetworkDeploymentActions} @@ -252,6 +253,7 @@ resources: type: OS::Heat::StructuredDeployment depends_on: NetworkDeployment properties: + name: SwiftStorageHieraDeploy server: {get_resource: SwiftStorage} config: {get_resource: SwiftStorageHieraConfig} input_values: diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml index c49a1047..5e2f698f 100644 --- a/puppet/vip-config.yaml +++ b/puppet/vip-config.yaml @@ -16,6 +16,8 @@ resources: keystone_admin_api_vip: {get_input: keystone_admin_api_vip} keystone_public_api_vip: {get_input: keystone_public_api_vip} neutron_api_vip: {get_input: neutron_api_vip} + # TODO: pass a `midonet_api_vip` var + midonet_api_vip: {get_input: neutron_api_vip} cinder_api_vip: {get_input: cinder_api_vip} glance_api_vip: {get_input: glance_api_vip} glance_registry_vip: {get_input: glance_registry_vip} |