diff options
Diffstat (limited to 'puppet')
21 files changed, 1328 insertions, 313 deletions
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index 2bc519bb..3dd3d5c9 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -50,6 +50,19 @@ parameters: type: comma_delimited_list keystone_admin_api_node_ips: type: comma_delimited_list + sahara_api_node_ips: + type: comma_delimited_list + + DeployIdentifier: + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. + UpdateIdentifier: + type: string + description: > + Setting to a previously unused value during stack-update will trigger + package update on all nodes resources: @@ -230,6 +243,14 @@ resources: list_join: - "','" - {get_param: keystone_admin_api_node_ips} + sahara_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: sahara_api_node_ips} # NOTE(gfidente): interpolation with %{} in the # hieradata file can't be used as it returns string @@ -239,9 +260,19 @@ resources: neutron::rabbit_hosts: *rabbit_nodes_array nova::rabbit_hosts: *rabbit_nodes_array keystone::rabbit_hosts: *rabbit_nodes_array + sahara::rabbit_hosts: *rabbit_nodes_array + + deploy_identifier: {get_param: DeployIdentifier} + update_identifier: {get_param: UpdateIdentifier} outputs: config_id: description: The ID of the allNodesConfigImpl resource. value: {get_resource: allNodesConfigImpl} + hosts_entries: + description: | + The content that should be appended to your /etc/hosts if you want to get + hostname-based access to the deployed nodes (useful for testing without + setting up a DNS). + value: {get_attr: [allNodesConfigImpl, config, hosts]} diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index e918538a..e310e1f5 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -16,7 +16,7 @@ parameters: description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string KeyName: - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string default: default constraints: @@ -34,6 +34,10 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json + TimeZone: + default: 'UTC' + description: The timezone to be set on Ceph nodes. + type: string UpdateIdentifier: default: '' type: string @@ -59,6 +63,13 @@ parameters: description: > Heat action when to apply network configuration changes default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] CloudDomain: default: '' type: string @@ -71,7 +82,10 @@ parameters: Extra properties or metadata passed to Nova for the created nodes in the overcloud. It's accessible via the Nova metadata API. type: json - + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: CephStorage: @@ -86,7 +100,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -108,6 +124,16 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::CephStorage::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + + InternalApiPort: + type: OS::TripleO::CephStorage::Ports::InternalApiPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + StoragePort: type: OS::TripleO::CephStorage::Ports::StoragePort properties: @@ -118,26 +144,48 @@ resources: properties: ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + TenantPort: + type: OS::TripleO::CephStorage::Ports::TenantPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::CephStorage::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + NetworkConfig: type: OS::TripleO::CephStorage::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} + InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetIpSubnetMap: type: OS::TripleO::Network::Ports::NetIpSubnetMap properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -156,6 +204,7 @@ resources: server: {get_resource: CephStorage} input_values: ntp_servers: {get_param: NtpServer} + timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} @@ -187,6 +236,7 @@ resources: raw_data: {get_file: hieradata/ceph.yaml} mapped_data: ntp::servers: {get_input: ntp_servers} + timezone::timezone: {get_input: timezone} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} ceph::profile::params::cluster_network: {get_input: ceph_cluster_network} @@ -239,12 +289,24 @@ outputs: description: Heat resource handle for the ceph storage server value: {get_resource: CephStorage} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} + internal_api_ip_address: + description: IP address of the server in the internal_api network + value: {get_attr: [InternalApiPort, ip_address]} storage_ip_address: description: IP address of the server in the storage network value: {get_attr: [StoragePort, ip_address]} storage_mgmt_ip_address: description: IP address of the server in the storage_mgmt network value: {get_attr: [StorageMgmtPort, ip_address]} + tenant_ip_address: + description: IP address of the server in the tenant network + value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying value: diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml index 731ffe1c..9b7c752a 100644 --- a/puppet/cinder-storage-post.yaml +++ b/puppet/cinder-storage-post.yaml @@ -31,3 +31,11 @@ resources: name: VolumeDeployment_Step1 servers: {get_param: servers} config: {get_resource: VolumePuppetConfig} + + # Note, this should come last, so use depends_on to ensure + # this is created after any other resources. + ExtraConfig: + depends_on: VolumeDeployment_Step1 + type: OS::TripleO::NodeExtraConfigPost + properties: + servers: {get_param: servers} diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index 17bcb097..f7e8f907 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -17,7 +17,6 @@ parameters: description: The size of the loopback file used by the cinder LVM driver. type: number CinderPassword: - default: unset description: The password for the cinder service and db account, used by cinder-api. type: string hidden: true @@ -46,7 +45,7 @@ parameters: - custom_constraint: nova.flavor KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string RabbitPassword: default: 'guest' @@ -70,7 +69,6 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true @@ -101,6 +99,10 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + TimeZone: + default: 'UTC' + description: The timezone to be set on Cinder nodes. + type: string GlanceApiVirtualIP: type: string default: '' @@ -112,6 +114,13 @@ parameters: description: > Heat action when to apply network configuration changes default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] CloudDomain: default: '' type: string @@ -124,6 +133,10 @@ parameters: Extra properties or metadata passed to Nova for the created nodes in the overcloud. It's accessible via the Nova metadata API. type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -139,7 +152,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -161,6 +176,11 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::BlockStorage::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + InternalApiPort: type: OS::TripleO::BlockStorage::Ports::InternalApiPort properties: @@ -176,21 +196,37 @@ resources: properties: ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + TenantPort: + type: OS::TripleO::BlockStorage::Ports::TenantPort + properties: + ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::BlockStorage::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + NetworkConfig: type: OS::TripleO::BlockStorage::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -209,7 +245,7 @@ resources: config: {get_resource: BlockStorageConfig} input_values: debug: {get_param: Debug} - cinder_dsn: {list_join: ['', ['mysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIP} , '/cinder']]} + cinder_dsn: {list_join: ['', ['mysql+pymysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIP} , '/cinder']]} snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} cinder_lvm_loop_device_size: @@ -226,6 +262,7 @@ resources: rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} ntp_servers: {get_param: NtpServer} + timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} @@ -268,6 +305,7 @@ resources: cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address} cinder::glance::glance_api_servers: {get_input: glance_api_servers} ntp::servers: {get_input: ntp_servers} + timezone::timezone: {get_input: timezone} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} @@ -314,6 +352,9 @@ outputs: description: Heat resource handle for the block storage server value: {get_resource: BlockStorage} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} internal_api_ip_address: description: IP address of the server in the internal_api network value: {get_attr: [InternalApiPort, ip_address]} @@ -323,6 +364,12 @@ outputs: storage_mgmt_ip_address: description: IP address of the server in the storage_mgmt network value: {get_attr: [StorageMgmtPort, ip_address]} + tenant_ip_address: + description: IP address of the server in the tenant network + value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying value: diff --git a/puppet/compute.yaml b/puppet/compute.yaml index 4f430f65..a48073ee 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -1,11 +1,10 @@ -heat_template_version: 2015-04-30 +heat_template_version: 2015-10-15 description: > OpenStack hypervisor node configured via Puppet. parameters: AdminPassword: - default: unset description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true @@ -16,12 +15,10 @@ parameters: constraints: - allowed_values: ['', Present] CeilometerMeteringSecret: - default: unset description: Secret shared by the ceilometer services. type: string hidden: true CeilometerPassword: - default: unset description: The password for the ceilometer service account. type: string hidden: true @@ -61,7 +58,7 @@ parameters: description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string KeyName: - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string default: default constraints: @@ -80,7 +77,7 @@ parameters: to create provider networks (and we use this for the default floating network) - if changing this either use different post-install network scripts or be sure to keep 'datacentre' as a mapping network name. - type: string + type: comma_delimited_list default: "datacentre:br-ex" NeutronEnableTunnelling: type: string @@ -91,7 +88,7 @@ parameters: Enable/disable the L2 population feature in the Neutron agents. default: "False" NeutronFlatNetworks: - type: string + type: comma_delimited_list default: 'datacentre' description: > If set, flat networks to configure in neutron plugins. @@ -99,18 +96,17 @@ parameters: type: string default: '' # Has to be here because of the ignored empty value bug NeutronNetworkType: - type: string - description: The tenant network type for Neutron, either gre or vxlan. + type: comma_delimited_list + description: The tenant network type for Neutron. default: 'vxlan' NeutronNetworkVLANRanges: - default: 'datacentre' + default: 'datacentre:1:1000' description: > The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the Neutron documentation for permitted values. Defaults to permitting any VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). type: comma_delimited_list NeutronPassword: - default: unset description: The password for the neutron service account, used by neutron agents. type: string hidden: true @@ -123,10 +119,9 @@ parameters: description: A port to add to the NeutronPhysicalBridge. type: string NeutronTunnelTypes: - type: string + type: comma_delimited_list description: | - The tunnel types for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'gre,vxlan' + The tunnel types for the Neutron tenant network. default: 'vxlan' NeutronTunnelIdRanges: description: | @@ -147,7 +142,6 @@ parameters: default: 'False' type: string NeutronMetadataProxySharedSecret: - default: 'unset' description: Shared secret to prevent spoofing type: string hidden: true @@ -158,7 +152,7 @@ parameters: from neutron.core_plugins namespace. type: string NeutronServicePlugins: - default: "router" + default: "router,qos" description: | Comma-separated list of service plugin entrypoints to be loaded from the neutron.service_plugins namespace. @@ -171,9 +165,13 @@ parameters: NeutronMechanismDrivers: default: 'openvswitch' description: | - The mechanism drivers for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'openvswitch,l2_population' - type: string + The mechanism drivers for the Neutron tenant network. + type: comma_delimited_list + NeutronAgentExtensions: + default: "qos" + description: | + Comma-separated list of extensions enabled for the Neutron agents. + type: comma_delimited_list # Not relevant for Computes, should be removed NeutronAllowL3AgentFailover: default: 'True' @@ -202,7 +200,7 @@ parameters: type: json NovaComputeLibvirtType: type: string - default: '' + default: kvm NovaComputeLibvirtVifDriver: default: '' description: Libvirt VIF driver configuration for the network @@ -212,7 +210,6 @@ parameters: description: Whether to enable or not the Rbd backend for Nova type: boolean NovaPassword: - default: unset description: The password for the nova service account, used by nova-api. type: string hidden: true @@ -258,7 +255,6 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true @@ -276,6 +272,10 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + TimeZone: + default: 'UTC' + description: The timezone to be set on compute nodes. + type: string UpdateIdentifier: default: '' type: string @@ -290,19 +290,29 @@ parameters: description: > Heat action when to apply network configuration changes default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] CloudDomain: default: '' type: string description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. - ServerMetadata: default: {} description: > Extra properties or metadata passed to Nova for the created nodes in the overcloud. It's accessible via the Nova metadata API. type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -320,7 +330,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -342,6 +354,11 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::Compute::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + InternalApiPort: type: OS::TripleO::Compute::Ports::InternalApiPort properties: @@ -352,26 +369,42 @@ resources: properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + StorageMgmtPort: + type: OS::TripleO::Compute::Ports::StorageMgmtPort + properties: + ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + TenantPort: type: OS::TripleO::Compute::Ports::TenantPort properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + ManagementPort: + type: OS::TripleO::Compute::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetworkConfig: type: OS::TripleO::Compute::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -403,6 +436,7 @@ resources: - common - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre - nova_nuage_data # Optionally provided by ComputeExtraConfigPre + - midonet_data # Optionally provided by AllNodesExtraConfig datafiles: compute_extraconfig: mapped_data: {get_param: NovaComputeExtraConfig} @@ -449,16 +483,17 @@ resources: neutron::rabbit_user: {get_input: rabbit_username} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} neutron::rabbit_port: {get_input: rabbit_client_port} - neutron_flat_networks: {get_input: neutron_flat_networks} + neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} neutron_host: {get_input: neutron_host} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} - neutron_tenant_network_type: {get_input: neutron_tenant_network_type} - neutron_tunnel_types: {get_input: neutron_tunnel_types} + neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} + neutron::agents::ml2::ovs:tunnel_types: {get_input: neutron_tunnel_types} + neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron_bridge_mappings: {get_input: neutron_bridge_mappings} + neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} neutron_physical_bridge: {get_input: neutron_physical_bridge} @@ -472,11 +507,12 @@ resources: neutron::core_plugin: {get_input: neutron_core_plugin} neutron::service_plugins: {get_input: neutron_service_plugins} neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} - neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} + neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} keystone_public_api_virtual_ip: {get_input: keystone_vip} admin_password: {get_input: admin_password} ntp::servers: {get_input: ntp_servers} + timezone::timezone: {get_input: timezone} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} @@ -508,36 +544,43 @@ resources: snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} - neutron_flat_networks: {get_param: NeutronFlatNetworks} + neutron_flat_networks: + str_replace: + template: NETWORKS + params: + NETWORKS: {get_param: NeutronFlatNetworks} neutron_host: {get_param: NeutronHost} neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} - neutron_tenant_network_type: {get_param: NeutronNetworkType} - neutron_tunnel_types: {get_param: NeutronTunnelTypes} neutron_tunnel_id_ranges: str_replace: - template: "['RANGES']" + template: RANGES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronTunnelIdRanges} + RANGES: {get_param: NeutronTunnelIdRanges} neutron_vni_ranges: str_replace: - template: "['RANGES']" + template: RANGES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronVniRanges} + RANGES: {get_param: NeutronVniRanges} + neutron_tenant_network_types: + str_replace: + template: TYPES + params: + TYPES: {get_param: NeutronNetworkType} + neutron_tunnel_types: + str_replace: + template: TYPES + params: + TYPES: {get_param: NeutronTunnelTypes} neutron_network_vlan_ranges: str_replace: - template: "['RANGES']" + template: RANGES + params: + RANGES: {get_param: NeutronNetworkVLANRanges} + neutron_bridge_mappings: + str_replace: + template: MAPPINGS params: - RANGES: - list_join: - - "','" - - {get_param: NeutronNetworkVLANRanges} - neutron_bridge_mappings: {get_param: NeutronBridgeMappings} + MAPPINGS: {get_param: NeutronBridgeMappings} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} neutron_physical_bridge: {get_param: NeutronPhysicalBridge} @@ -549,21 +592,24 @@ resources: neutron_core_plugin: {get_param: NeutronCorePlugin} neutron_service_plugins: str_replace: - template: "['PLUGINS']" + template: PLUGINS params: - PLUGINS: - list_join: - - "','" - - {get_param: NeutronServicePlugins} + PLUGINS: {get_param: NeutronServicePlugins} neutron_type_drivers: str_replace: - template: "['DRIVERS']" + template: DRIVERS + params: + DRIVERS: {get_param: NeutronTypeDrivers} + neutron_mechanism_drivers: + str_replace: + template: MECHANISMS + params: + MECHANISMS: {get_param: NeutronMechanismDrivers} + neutron_agent_extensions: + str_replace: + template: AGENT_EXTENSIONS params: - DRIVERS: - list_join: - - "','" - - {get_param: NeutronTypeDrivers} - neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} + AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]} neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]} @@ -574,6 +620,7 @@ resources: rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} ntp_servers: {get_param: NtpServer} + timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} @@ -616,15 +663,24 @@ outputs: ip_address: description: IP address of the server in the ctlplane network value: {get_attr: [NovaCompute, networks, ctlplane, 0]} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} internal_api_ip_address: description: IP address of the server in the internal_api network value: {get_attr: [InternalApiPort, ip_address]} storage_ip_address: description: IP address of the server in the storage network value: {get_attr: [StoragePort, ip_address]} + storage_mgmt_ip_address: + description: IP address of the server in the storage_mgmt network + value: {get_attr: [StorageMgmtPort, ip_address]} tenant_ip_address: description: IP address of the server in the tenant network value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} hostname: description: Hostname of the server value: {get_attr: [NovaCompute, name]} diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index bfb69a15..d250dd70 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -17,6 +17,13 @@ parameters: resources: + ControllerPrePuppet: + type: OS::TripleO::Tasks::ControllerPrePuppet + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + ControllerPuppetConfig: type: OS::TripleO::ControllerConfig @@ -26,6 +33,7 @@ resources: # e.g all Deployment resources should have a *Deployment_StepN suffix ControllerLoadBalancerDeployment_Step1: type: OS::Heat::StructuredDeployments + depends_on: ControllerPrePuppet properties: name: ControllerLoadBalancerDeployment_Step1 servers: {get_param: servers} @@ -104,10 +112,18 @@ resources: step: 5 update_identifier: {get_param: NodeConfigIdentifiers} + ControllerPostPuppet: + type: OS::TripleO::Tasks::ControllerPostPuppet + depends_on: ControllerOvercloudServicesDeployment_Step6 + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: ControllerOvercloudServicesDeployment_Step5 + depends_on: ControllerPostPuppet type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index 622a444f..55c864f2 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2015-04-30 +heat_template_version: 2015-10-15 description: > OpenStack controller node configured by Puppet. @@ -10,12 +10,10 @@ parameters: type: string hidden: true AdminPassword: - default: unset description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true AdminToken: - default: unset description: The keystone auth secret and db password. type: string hidden: true @@ -27,18 +25,25 @@ parameters: description: The ceilometer backend type. type: string CeilometerMeteringSecret: - default: unset description: Secret shared by the ceilometer services. type: string hidden: true CeilometerPassword: - default: unset description: The password for the ceilometer service and db account. type: string hidden: true CinderApiVirtualIP: type: string default: '' + CeilometerWorkers: + default: 0 + description: Number of workers for Ceilometer service. + type: number + CinderEnableDBPurge: + default: true + description: | + Whether to create cron job for purging soft deleted rows in Cinder database. + type: boolean CinderEnableNfsBackend: default: false description: Whether to enable or not the NFS backend for Cinder @@ -72,7 +77,6 @@ parameters: CinderEnableNfsBackend is true. type: comma_delimited_list CinderPassword: - default: unset description: The password for the cinder service and db account, used by cinder-api. type: string hidden: true @@ -81,8 +85,12 @@ parameters: description: Contains parameters to configure Cinder backends. Typically set via parameter_defaults in the resource registry. type: json + CinderWorkers: + default: 0 + description: Number of workers for Cinder service. + type: number CloudName: - default: '' + default: overcloud description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org type: string ControllerExtraConfig: @@ -90,6 +98,15 @@ parameters: description: | Controller specific hiera configuration data to inject into the cluster. type: json + ControllerIPs: + default: {} + description: > + A network mapped list of IPs to assign to Controllers in the following form: + { + "internal_api": ["a.b.c.d", "e.f.g.h"], + ... + } + type: json ControlVirtualInterface: default: 'br-ex' description: Interface where virtual ip will be assigned. @@ -170,7 +187,6 @@ parameters: type: string default: '' GlancePassword: - default: unset description: The password for the glance service and db account, used by the glance services. type: string hidden: true @@ -209,15 +225,17 @@ parameters: default: /dev/log description: Syslog address where HAproxy will send its log type: string + GlanceWorkers: + default: 0 + description: Number of workers for Glance service. + type: number HeatPassword: - default: unset description: The password for the Heat service and db account, used by the Heat services. type: string hidden: true HeatStackDomainAdminPassword: description: Password for heat_domain_admin user. type: string - default: '' hidden: true HeatAuthEncryptionKey: description: Auth encryption key for heat-engine @@ -227,6 +245,10 @@ parameters: default: '*' description: A list of IP/Hostname allowed to connect to horizon type: comma_delimited_list + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number HorizonSecret: description: Secret key for Django type: string @@ -246,7 +268,7 @@ parameters: type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string constraints: - custom_constraint: nova.keypair @@ -254,6 +276,11 @@ parameters: default: '' description: Keystone self-signed certificate authority certificate. type: string + KeystoneEnableDBPurge: + default: true + description: | + Whether to create cron job for purging soft deleted rows in Keystone database. + type: boolean KeystoneSigningCertificate: default: '' description: Keystone certificate for verifying token validity. @@ -294,6 +321,18 @@ parameters: default: false description: Whether IPtables rules should be purged before setting up the new ones. type: boolean + KeystoneWorkers: + default: 0 + description: Number of workers for Keystone service. + type: number + SaharaApiVirtualIP: + type: string + default: '' + SaharaPassword: + default: unset + description: The password for the sahara service account, used by sahara-api. + type: string + hidden: true MysqlClusterUniquePart: description: A unique identifier of the MySQL cluster the controller is in. type: string @@ -328,7 +367,7 @@ parameters: to create provider networks (and we use this for the default floating network) - if changing this either use different post-install network scripts or be sure to keep 'datacentre' as a mapping network name. - type: string + type: comma_delimited_list default: "datacentre:br-ex" NeutronDnsmasqOptions: default: 'dhcp-option-force=26,1400' @@ -367,7 +406,6 @@ parameters: description: Whether to configure Neutron Distributed Virtual Routers type: string NeutronMetadataProxySharedSecret: - default: 'unset' description: Shared secret to prevent spoofing type: string hidden: true @@ -378,7 +416,7 @@ parameters: from neutron.core_plugins namespace. type: string NeutronServicePlugins: - default: "router" + default: "router,qos" description: | Comma-separated list of service plugin entrypoints to be loaded from the neutron.service_plugins namespace. @@ -391,9 +429,8 @@ parameters: NeutronMechanismDrivers: default: 'openvswitch' description: | - The mechanism drivers for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'openvswitch,l2_population' - type: string + The mechanism drivers for the Neutron tenant network. + type: comma_delimited_list NeutronAllowL3AgentFailover: default: 'True' description: Allow automatic l3-agent failover @@ -411,7 +448,7 @@ parameters: Enable/disable the L2 population feature in the Neutron agents. default: "False" NeutronFlatNetworks: - type: string + type: comma_delimited_list default: 'datacentre' description: If set, flat networks to configure in neutron plugins. NeutronL3HA: @@ -420,17 +457,16 @@ parameters: type: string NeutronNetworkType: default: 'vxlan' - description: The tenant network type for Neutron, either gre or vxlan. - type: string + description: The tenant network type for Neutron. + type: comma_delimited_list NeutronNetworkVLANRanges: - default: 'datacentre' + default: 'datacentre:1:1000' description: > The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the Neutron documentation for permitted values. Defaults to permitting any VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). type: comma_delimited_list NeutronPassword: - default: unset description: The password for the neutron service and db account, used by neutron agents. type: string hidden: true @@ -463,9 +499,8 @@ parameters: NeutronTunnelTypes: default: 'vxlan' description: | - The tunnel types for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'gre,vxlan' - type: string + The tunnel types for the Neutron tenant network. + type: comma_delimited_list NeutronTunnelIdRanges: description: | Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges @@ -478,14 +513,36 @@ parameters: of VXLAN VNI IDs that are available for tenant network allocation default: ["1:1000", ] type: comma_delimited_list + NeutronPluginExtensions: + default: "qos" + description: | + Comma-separated list of extensions enabled for the Neutron plugin. + type: comma_delimited_list + NeutronAgentExtensions: + default: "qos" + description: | + Comma-separated list of extensions enabled for the Neutron agents. + type: comma_delimited_list NovaApiVirtualIP: type: string default: '' + NeutronWorkers: + default: 0 + description: Number of workers for Neutron service. + type: number + NovaEnableDBPurge: + default: true + description: | + Whether to create cron job for purging soft deleted rows in Nova database. + type: boolean NovaPassword: - default: unset description: The password for the nova service and db account, used by nova-api. type: string hidden: true + NovaWorkers: + default: 0 + description: Number of workers for Nova service. + type: number MongoDbNoJournal: default: false description: Should MongoDb journaling be disabled @@ -542,12 +599,10 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true SwiftHashSuffix: - default: unset description: A random string to be used as a salt when hashing to determine mappings in the ring. hidden: true @@ -565,7 +620,6 @@ parameters: description: Partition Power to use when building Swift rings type: number SwiftPassword: - default: unset description: The password for the swift service account, used by the swift proxy services. hidden: true @@ -577,6 +631,14 @@ parameters: type: number default: 3 description: How many replicas to use in the swift rings. + SwiftWorkers: + default: 0 + description: Number of workers for Swift service. + type: number + TimeZone: + default: 'UTC' + description: The timezone to be set on controller nodes. + type: string VirtualIP: # DEPRECATED: use per service settings instead type: string default: '' # Has to be here because of the ignored empty value bug @@ -632,6 +694,13 @@ parameters: NodeIndex: type: number default: 0 + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] CloudDomain: default: '' type: string @@ -644,6 +713,10 @@ parameters: Extra properties or metadata passed to Nova for the created nodes in the overcloud. It's accessible via the Nova metadata API. type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -659,7 +732,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -684,26 +759,41 @@ resources: ExternalPort: type: OS::TripleO::Controller::Ports::ExternalPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} InternalApiPort: type: OS::TripleO::Controller::Ports::InternalApiPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} StoragePort: type: OS::TripleO::Controller::Ports::StoragePort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} StorageMgmtPort: type: OS::TripleO::Controller::Ports::StorageMgmtPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} TenantPort: type: OS::TripleO::Controller::Ports::TenantPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} + ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::Controller::Ports::ManagementPort + properties: ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} NetIpMap: @@ -715,6 +805,7 @@ resources: StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetIpSubnetMap: type: OS::TripleO::Network::Ports::NetIpSubnetMap @@ -725,6 +816,7 @@ resources: StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkConfig: type: OS::TripleO::Controller::Net::SoftwareConfig @@ -735,6 +827,7 @@ resources: StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -762,6 +855,7 @@ resources: server: {get_resource: Controller} NodeIndex: {get_param: NodeIndex} + ControllerDeployment: type: OS::TripleO::SoftwareDeployment depends_on: NetworkDeployment @@ -771,6 +865,14 @@ resources: server: {get_resource: Controller} input_values: bootstack_nodeid: {get_attr: [Controller, name]} + ceilometer_workers: {get_param: CeilometerWorkers} + cinder_workers: {get_param: CinderWorkers} + glance_workers: {get_param: GlanceWorkers} + heat_workers: {get_param: HeatWorkers} + keystone_workers: {get_param: KeystoneWorkers} + nova_workers: {get_param: NovaWorkers} + neutron_workers: {get_param: NeutronWorkers} + swift_workers: {get_param: SwiftWorkers} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} @@ -801,17 +903,15 @@ resources: admin_token: {get_param: AdminToken} neutron_public_interface_ip: {get_param: NeutronPublicInterfaceIP} debug: {get_param: Debug} + cinder_enable_db_purge: {get_param: CinderEnableDBPurge} cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend} cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend} cinder_nfs_mount_options: {get_param: CinderNfsMountOptions} cinder_nfs_servers: str_replace: - template: "['SERVERS']" + template: SERVERS params: - SERVERS: - list_join: - - "','" - - {get_param: CinderNfsServers} + SERVERS: {get_param: CinderNfsServers} cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize} cinder_password: {get_param: CinderPassword} cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} @@ -820,7 +920,7 @@ resources: cinder_dsn: list_join: - '' - - - 'mysql://cinder:' + - - 'mysql+pymysql://cinder:' - {get_param: CinderPassword} - '@' - {get_param: MysqlVirtualIP} @@ -837,7 +937,7 @@ resources: glance_dsn: list_join: - '' - - - 'mysql://glance:' + - - 'mysql+pymysql://glance:' - {get_param: GlancePassword} - '@' - {get_param: MysqlVirtualIP} @@ -847,7 +947,7 @@ resources: heat_dsn: list_join: - '' - - - 'mysql://heat:' + - - 'mysql+pymysql://heat:' - {get_param: HeatPassword} - '@' - {get_param: MysqlVirtualIP} @@ -859,10 +959,11 @@ resources: keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey} keystone_notification_driver: {get_param: KeystoneNotificationDriver} keystone_notification_format: {get_param: KeystoneNotificationFormat} + keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge} keystone_dsn: list_join: - '' - - - 'mysql://keystone:' + - - 'mysql+pymysql://keystone:' - {get_param: AdminToken} - '@' - {get_param: MysqlVirtualIP} @@ -887,73 +988,88 @@ resources: template: tripleo-CLUSTER params: CLUSTER: {get_param: MysqlClusterUniquePart} - neutron_flat_networks: {get_param: NeutronFlatNetworks} + neutron_flat_networks: + str_replace: + template: NETWORKS + params: + NETWORKS: {get_param: NeutronFlatNetworks} neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} neutron_agent_mode: {get_param: NeutronAgentMode} neutron_router_distributed: {get_param: NeutronDVR} neutron_core_plugin: {get_param: NeutronCorePlugin} neutron_service_plugins: str_replace: - template: "['PLUGINS']" + template: PLUGINS params: - PLUGINS: - list_join: - - "','" - - {get_param: NeutronServicePlugins} + PLUGINS: {get_param: NeutronServicePlugins} neutron_type_drivers: str_replace: - template: "['DRIVERS']" + template: DRIVERS params: - DRIVERS: - list_join: - - "','" - - {get_param: NeutronTypeDrivers} + DRIVERS: {get_param: NeutronTypeDrivers} neutron_enable_dhcp_agent: {get_param: NeutronEnableDHCPAgent} neutron_enable_l3_agent: {get_param: NeutronEnableL3Agent} neutron_enable_metadata_agent: {get_param: NeutronEnableMetadataAgent} neutron_enable_ovs_agent: {get_param: NeutronEnableOVSAgent} - neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} + neutron_mechanism_drivers: + str_replace: + template: MECHANISMS + params: + MECHANISMS: {get_param: NeutronMechanismDrivers} neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron_l3_ha: {get_param: NeutronL3HA} neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} neutron_network_vlan_ranges: str_replace: - template: "['RANGES']" + template: RANGES + params: + RANGES: {get_param: NeutronNetworkVLANRanges} + neutron_bridge_mappings: + str_replace: + template: MAPPINGS params: - RANGES: - list_join: - - "','" - - {get_param: NeutronNetworkVLANRanges} - neutron_bridge_mappings: {get_param: NeutronBridgeMappings} + MAPPINGS: {get_param: NeutronBridgeMappings} neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge} neutron_public_interface: {get_param: NeutronPublicInterface} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute} neutron_public_interface_tag: {get_param: NeutronPublicInterfaceTag} - neutron_tenant_network_type: {get_param: NeutronNetworkType} - neutron_tunnel_types: {get_param: NeutronTunnelTypes} neutron_tunnel_id_ranges: str_replace: - template: "['RANGES']" + template: RANGES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronTunnelIdRanges} + RANGES: {get_param: NeutronTunnelIdRanges} neutron_vni_ranges: str_replace: - template: "['RANGES']" + template: RANGES + params: + RANGES: {get_param: NeutronVniRanges} + neutron_tenant_network_types: + str_replace: + template: TYPES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronVniRanges} + TYPES: {get_param: NeutronNetworkType} + neutron_tunnel_types: + str_replace: + template: TYPES + params: + TYPES: {get_param: NeutronTunnelTypes} + neutron_plugin_extensions: + str_replace: + template: PLUGIN_EXTENSIONS + params: + PLUGIN_EXTENSIONS: {get_param: NeutronPluginExtensions} + neutron_agent_extensions: + str_replace: + template: AGENT_EXTENSIONS + params: + AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_password: {get_param: NeutronPassword} neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} neutron_dsn: list_join: - '' - - - 'mysql://neutron:' + - - 'mysql+pymysql://neutron:' - {get_param: NeutronPassword} - '@' - {get_param: MysqlVirtualIP} @@ -975,18 +1091,19 @@ resources: ceilometer_dsn: list_join: - '' - - - 'mysql://ceilometer:' + - - 'mysql+pymysql://ceilometer:' - {get_param: CeilometerPassword} - '@' - {get_param: MysqlVirtualIP} - '/ceilometer' snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} + nova_enable_db_purge: {get_param: NovaEnableDBPurge} nova_password: {get_param: NovaPassword} nova_dsn: list_join: - '' - - - 'mysql://nova:' + - - 'mysql+pymysql://nova:' - {get_param: NovaPassword} - '@' - {get_param: MysqlVirtualIP} @@ -1009,6 +1126,7 @@ resources: params: LIMIT: {get_param: RabbitFDLimit} ntp_servers: {get_param: NtpServer} + timezone: {get_param: TimeZone} control_virtual_interface: {get_param: ControlVirtualInterface} public_virtual_interface: {get_param: PublicVirtualInterface} swift_hash_suffix: {get_param: SwiftHashSuffix} @@ -1019,6 +1137,15 @@ resources: swift_mount_check: {get_param: SwiftMountCheck} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} + sahara_password: {get_param: SaharaPassword} + sahara_dsn: + list_join: + - '' + - - 'mysql://sahara:' + - {get_param: SaharaPassword} + - '@' + - {get_param: MysqlVirtualIP} + - '/sahara' swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} cinder_iscsi_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]} @@ -1041,6 +1168,7 @@ resources: rabbitmq_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]} redis_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]} redis_vip: {get_param: RedisVirtualIP} + sahara_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]} memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} mysql_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} mysql_virtual_ip: {get_param: MysqlVirtualIP} @@ -1071,11 +1199,14 @@ resources: - vip_data # provided by vip-config - '"%{::osfamily}"' - common + - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre + - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre - neutron_nuage_data # Optionally provided by ControllerExtraConfigPre + - midonet_data #Optionally provided by AllNodesExtraConfig datafiles: controller_extraconfig: mapped_data: {get_param: ControllerExtraConfig} @@ -1111,6 +1242,7 @@ resources: swift::storage::all::storage_local_net_ip: {get_input: swift_management_network} swift::swift_hash_suffix: {get_input: swift_hash_suffix} swift::proxy::authtoken::admin_password: {get_input: swift_password} + swift::proxy::workers: {get_input: swift_workers} tripleo::ringbuilder::part_power: {get_input: swift_part_power} tripleo::ringbuilder::replicas: {get_input: swift_replicas} tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours} @@ -1121,6 +1253,7 @@ resources: tripleo::ringbuilder::build_ring: True # Cinder + cinder_enable_db_purge: {get_input: cinder_enable_db_purge} cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend} cinder_enable_rbd_backend: {get_input: cinder_enable_rbd_backend} cinder_nfs_mount_options: {get_input: cinder_nfs_mount_options} @@ -1151,6 +1284,7 @@ resources: glance::api::registry_host: {get_input: glance_registry_host} glance::api::keystone_password: {get_input: glance_password} glance::api::debug: {get_input: debug} + glance::api::workers: {get_input: glance_workers} glance_notifier_strategy: {get_input: glance_notifier_strategy} glance_log_file: {get_input: glance_log_file} glance_log_file: {get_input: glance_log_file} @@ -1162,6 +1296,7 @@ resources: glance::registry::identity_uri: {get_input: keystone_identity_uri} glance::registry::debug: {get_input: debug} glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_uri} + glance::registry::workers: {get_input: glance_workers} glance::backend::swift::swift_store_user: service:glance glance::backend::swift::swift_store_key: {get_input: glance_password} glance_backend: {get_input: glance_backend} @@ -1186,8 +1321,11 @@ resources: heat::identity_uri: {get_input: keystone_identity_uri} heat::keystone_password: {get_input: heat_password} heat::api::bind_host: {get_input: heat_api_network} + heat::api::workers: {get_input: heat_workers} heat::api_cloudwatch::bind_host: {get_input: heat_api_network} + heat::api_cloudwatch::workers: {get_input: heat_workers} heat::api_cfn::bind_host: {get_input: heat_api_network} + heat::api_cfn::workers: {get_input: heat_workers} heat::database_connection: {get_input: heat_dsn} heat::debug: {get_input: debug} heat::db::mysql::password: {get_input: heat_password} @@ -1216,6 +1354,10 @@ resources: keystone::endpoint::internal_url: {get_input: keystone_internal_url} keystone::endpoint::admin_url: {get_input: keystone_identity_uri} keystone::endpoint::region: {get_input: keystone_region} + keystone::admin_workers: {get_input: keystone_workers} + keystone::public_workers: {get_input: keystone_workers} + keystone_enable_db_purge: {get_input: keystone_enable_db_purge} + # MongoDB mongodb::server::bind_ip: {get_input: mongo_db_network} mongodb::server::nojournal: {get_input: mongodb_no_journal} @@ -1241,14 +1383,16 @@ resources: neutron::server::auth_uri: {get_input: keystone_auth_uri} neutron::server::identity_uri: {get_input: keystone_identity_uri} neutron::server::database_connection: {get_input: neutron_dsn} + neutron::server::api_workers: {get_input: neutron_workers} neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} - neutron_flat_networks: {get_input: neutron_flat_networks} + neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret} neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network} + neutron::agents::metadata::metadata_workers: {get_input: neutron_workers} neutron_agent_mode: {get_input: neutron_agent_mode} neutron_router_distributed: {get_input: neutron_router_distributed} neutron::core_plugin: {get_input: neutron_core_plugin} @@ -1258,20 +1402,22 @@ resources: neutron::enable_metadata_agent: {get_input: neutron_enable_metadata_agent} neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent} neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} - neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} + neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} + neutron::plugins::ml2::extension_drivers: {get_input: neutron_plugin_extensions} neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} neutron::server::l3_ha: {get_input: neutron_l3_ha} neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron_bridge_mappings: {get_input: neutron_bridge_mappings} + neutron::agents::ml2::ovs:bridge_mappings: {get_input: neutron_bridge_mappings} neutron_public_interface: {get_input: neutron_public_interface} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route} neutron_public_interface_tag: {get_input: neutron_public_interface_tag} - neutron_tenant_network_type: {get_input: neutron_tenant_network_type} - neutron_tunnel_types: {get_input: neutron_tunnel_types} + neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} + neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} + neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} neutron::server::auth_password: {get_input: neutron_password} neutron::agents::metadata::auth_password: {get_input: neutron_password} neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options} @@ -1319,6 +1465,9 @@ resources: nova::api::api_bind_address: {get_input: nova_api_network} nova::api::metadata_listen: {get_input: nova_metadata_network} nova::api::admin_password: {get_input: nova_password} + nova::api::osapi_compute_workers: {get_input: nova_workers} + nova::api::ec2_workers: {get_input: nova_workers} + nova::api::metadata_workers: {get_input: nova_workers} nova::database_connection: {get_input: nova_dsn} nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} @@ -1328,6 +1477,7 @@ resources: nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} nova::vncproxy::host: {get_input: nova_api_network} nova::db::mysql::password: {get_input: nova_password} + nova_enable_db_purge: {get_input: nova_enable_db_purge} # Horizon apache::ip: {get_input: horizon_network} @@ -1337,6 +1487,29 @@ resources: horizon::bind_address: {get_input: horizon_network} horizon::keystone_url: {get_input: keystone_auth_uri} + # Sahara + sahara::host: {get_input: sahara_api_network} + sahara::plugins: + - cdh + - hdp + - mapr + - vanilla + - spark + - storm + sahara::admin_password: {get_input: sahara_password} + sahara::auth_uri: {get_input: keystone_auth_uri} + sahara::admin_user: sahara + sahara::identity_uri: {get_input: keystone_identity_uri} + sahara::use_neutron: true + sahara::database_connection: {get_input: sahara_dsn} + sahara::debug: {get_input: debug} + sahara::rpc_backend: rabbit + sahara::rabbit_userid: {get_input: rabbit_username} + sahara::rabbit_password: {get_input: rabbit_password} + sahara::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} + sahara::rabbit_port: {get_input: rabbit_client_port} + sahara::db::mysql::password: {get_input: sahara_password} + # Rabbit rabbitmq::node_ip_address: {get_input: rabbitmq_network} rabbitmq::erlang_cookie: {get_input: rabbit_cookie} @@ -1353,17 +1526,13 @@ resources: memcached::listen_ip: {get_input: memcached_network} neutron_public_interface_ip: {get_input: neutron_public_interface_ip} ntp::servers: {get_input: ntp_servers} + timezone::timezone: {get_input: timezone} control_virtual_interface: {get_input: control_virtual_interface} public_virtual_interface: {get_input: public_virtual_interface} tripleo::loadbalancer::control_virtual_interface: {get_input: control_virtual_interface} tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface} tripleo::loadbalancer::haproxy_log_address: {get_input: haproxy_log_address} - # NOTE(jaosorior): The service certificate configuration for - # HAProxy was left commented because to properly use this, we - # need to be able to set up the keystone endpoints. And - # currently that is not possible, but is being addressed by - # other commits. A subsequent commit will uncomment this. - #tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]} + tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} @@ -1414,6 +1583,9 @@ outputs: tenant_ip_address: description: IP address of the server in the tenant network value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} hostname: description: Hostname of the server value: {get_attr: [Controller, name]} diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml new file mode 100644 index 00000000..26ce7138 --- /dev/null +++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml @@ -0,0 +1,119 @@ +heat_template_version: 2015-10-15 + +description: Configure hieradata for all MidoNet nodes + +parameters: + # Parameters passed from the parent template + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json + + EnableZookeeperOnController: + label: Enable Zookeeper On Controller + description: 'Whether enable Zookeeper cluster on Controller' + type: boolean + default: false + EnableCassandraOnController: + label: Enable Cassandra On Controller + description: 'Whether enable Cassandra cluster on Controller' + type: boolean + default: false + CassandraStoragePort: + label: Cassandra Storage Port + description: 'The Cassandra port for inter-node communication' + type: string + default: '7000' + CassandraSslStoragePort: + label: Cassandra SSL Storage Port + description: 'The SSL port for encrypted communication. Unused unless enabled in encryption_options' + type: string + default: '7001' + CassandraClientPort: + label: Cassandra Client Port + description: 'Native Transport Port' + type: string + default: '9042' + CassandraClientPortThrift: + label: Cassandra Client Thrift Port + description: 'The port for the Thrift RPC service, which is used for client connections' + type: string + default: '9160' + TunnelZoneName: + label: Name of the Tunnelzone + description: 'Name of the tunnel zone used to tunnel packages' + type: string + default: 'tunnelzone_tripleo' + TunnelZoneType: + label: Type of the Tunnel + description: 'Type of the tunnels on the overlay. Choose between `gre` and `vxlan`' + type: string + default: 'vxlan' + +resources: + + NetworkMidoNetConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + midonet_data: + mapped_data: + enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController} + enable_cassandra_on_controller: {get_param: EnableCassandraOnController} + midonet_tunnelzone_name: {get_param: TunnelZoneName} + midonet_tunnelzone_type: {get_param: TunnelZoneType} + midonet_libvirt_qemu_data: | + user = "root" + group = "root" + cgroup_device_acl = [ + "/dev/null", "/dev/full", "/dev/zero", + "/dev/random", "/dev/urandom", + "/dev/ptmx", "/dev/kvm", "/dev/kqemu", + "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", + "/dev/net/tun" + ] + tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort} + tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} + tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} + tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} + tripleo::loadbalancer::midonet_api: true + # Missed Neutron Puppet data + neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' + neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' + neutron::plugins::midonet::midonet_api_port: 8081 + neutron::params::midonet_server_package: 'python-networking-midonet' + + # Make sure the l3 agent does not run + l3_agent_service: false + neutron::agents::l3::manage_service: false + neutron::agents::l3::enabled: false + + + NetworkMidonetDeploymentControllers: + type: OS::Heat::StructuredDeploymentGroup + properties: + config: {get_resource: NetworkMidoNetConfig} + servers: {get_param: controller_servers} + + NetworkMidonetDeploymentComputes: + type: OS::Heat::StructuredDeploymentGroup + properties: + config: {get_resource: NetworkMidoNetConfig} + servers: {get_param: compute_servers} + +outputs: + config_identifier: + value: + list_join: + - ' ' + - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]} + - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]} diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml new file mode 100644 index 00000000..905f196d --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml @@ -0,0 +1,87 @@ +heat_template_version: 2015-11-12 + +description: Configure hieradata for Cinder Dell Storage Center configuration + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + + # Config specific parameters, to be provided via parameter_defaults + CinderEnableDellScBackend: + type: boolean + default: true + CinderDellScBackendName: + type: string + default: 'tripleo_dellsc' + CinderDellScSanIp: + type: string + CinderDellScSanLogin: + type: string + default: 'Admin' + CinderDellScSanPassword: + type: string + hidden: true + CinderDellScSsn: + type: string + default: '64702' + CinderDellScIscsiIpAddress: + type: string + default: '' + CinderDellScIscsiPort: + type: string + default: '3260' + CinderDellScApiPort: + type: string + default: '3033' + CinderDellScServerFolder: + type: string + default: 'dellsc_server' + CinderDellScVolumeFolder: + type: string + default: 'dellsc_volume' + +resources: + CinderDellScConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + cinder_dellsc_data: + mapped_data: + cinder_enable_dellsc_backend: {get_input: EnableDellScBackend} + cinder::backend::dellsc_iscsi::volume_backend_name: {get_input: DellScBackendName} + cinder::backend::dellsc_iscsi::san_ip: {get_input: DellScSanIp} + cinder::backend::dellsc_iscsi::san_login: {get_input: DellScSanLogin} + cinder::backend::dellsc_iscsi::san_password: {get_input: DellScSanPassword} + cinder::backend::dellsc_iscsi::dell_sc_ssn: {get_input: DellScSsn} + cinder::backend::dellsc_iscsi::iscsi_ip_address: {get_input: DellScIscsiIpAddress} + cinder::backend::dellsc_iscsi::iscsi_port: {get_input: DellScIscsiPort} + cinder::backend::dellsc_iscsi::dell_sc_api_port: {get_input: DellScApiPort} + cinder::backend::dellsc_iscsi::dell_sc_server_folder: {get_input: DellScServerFolder} + cinder::backend::dellsc_iscsi::dell_sc_volume_folder: {get_input: DellScVolumeFolder} + + CinderDellScDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: CinderDellScConfig} + server: {get_param: server} + input_values: + EnableDellScBackend: {get_param: CinderEnableDellScBackend} + DellScBackendName: {get_param: CinderDellScBackendName} + DellScSanIp: {get_param: CinderDellScSanIp} + DellScSanLogin: {get_param: CinderDellScSanLogin} + DellScSanPassword: {get_param: CinderDellScSanPassword} + DellScSsn: {get_param: CinderDellScSsn} + DellScIscsiIpAddress: {get_param: CinderDellScIscsiIpAddress} + DellScIscsiPort: {get_param: CinderDellScIscsiPort} + DellScApiPort: {get_param: CinderDellScApiPort} + DellScServerFolder: {get_param: CinderDellScServerFolder} + DellScVolumeFolder: {get_param: CinderDellScVolumeFolder} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [CinderDellScDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml new file mode 100644 index 00000000..c73608f1 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml @@ -0,0 +1,86 @@ +heat_template_version: 2015-11-06 + +description: Configure hieradata for Cinder Eqlx configuration + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + + # Config specific parameters, to be provided via parameter_defaults + CinderEnableEqlxBackend: + type: boolean + default: true + CinderEqlxBackendName: + type: string + default: 'tripleo_eqlx' + CinderEqlxSanIp: + type: string + CinderEqlxSanLogin: + type: string + CinderEqlxSanPassword: + type: string + hidden: true + CinderEqlxSanThinProvision: + type: boolean + default: true + CinderEqlxGroupname: + type: string + default: 'group-0' + CinderEqlxPool: + type: string + default: 'default' + CinderEqlxChapLogin: + type: string + default: '' + CinderEqlxChapPassword: + type: string + default: '' + CinderEqlxUseChap: + type: boolean + default: false + +resources: + CinderEqlxConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + cinder_eqlx_data: + mapped_data: + cinder_enable_eqlx_backend: {get_input: EnableEqlxBackend} + cinder::backend::eqlx::volume_backend_name: {get_input: EqlxBackendName} + cinder::backend::eqlx::san_ip: {get_input: EqlxSanIp} + cinder::backend::eqlx::san_login: {get_input: EqlxSanLogin} + cinder::backend::eqlx::san_password: {get_input: EqlxSanPassword} + cinder::backend::eqlx::san_thin_provision: {get_input: EqlxSanThinProvision} + cinder::backend::eqlx::eqlx_group_name: {get_input: EqlxGroupname} + cinder::backend::eqlx::eqlx_pool: {get_input: EqlxPool} + cinder::backend::eqlx::eqlx_use_chap: {get_input: EqlxUseChap} + cinder::backend::eqlx::eqlx_chap_login: {get_input: EqlxChapLogin} + cinder::backend::eqlx::eqlx_chap_password: {get_input: EqlxChapPassword} + + CinderEqlxDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: CinderEqlxConfig} + server: {get_param: server} + input_values: + EnableEqlxBackend: {get_param: CinderEnableEqlxBackend} + EqlxBackendName: {get_param: CinderEqlxBackendName} + EqlxSanIp: {get_param: CinderEqlxSanIp} + EqlxSanLogin: {get_param: CinderEqlxSanLogin} + EqlxSanPassword: {get_param: CinderEqlxSanPassword} + EqlxSanThinProvision: {get_param: CinderEqlxSanThinProvision} + EqlxGroupname: {get_param: CinderEqlxGroupname} + EqlxPool: {get_param: CinderEqlxPool} + EqlxUseChap: {get_param: CinderEqlxUseChap} + EqlxChapLogin: {get_param: CinderEqlxChapLogin} + EqlxChapPassword: {get_param: CinderEqlxChapPassword} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [CinderEqlxDeployment, deploy_stdout]} diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index b0e6ae96..f8ef6408 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -38,11 +38,18 @@ cinder::api::keystone_tenant: 'service' swift::proxy::authtoken::admin_tenant_name: 'service' ceilometer::api::keystone_tenant: 'service' heat::keystone_tenant: 'service' +sahara::admin_tenant_name: 'service' # keystone keystone::cron::token_flush::maxdelay: 3600 keystone::roles::admin::service_tenant: 'service' keystone::roles::admin::admin_tenant: 'admin' +keystone::cron::token_flush::destination: '/dev/null' +keystone::config::keystone_config: + DEFAULT/secure_proxy_ssl_header: + value: 'HTTP_X_FORWARDED_PROTO' + ec2/driver: + value: 'keystone.contrib.ec2.backends.sql.Ec2' #swift swift::proxy::pipeline: @@ -77,12 +84,15 @@ nova::notify_on_state_change: 'vm_and_task_state' nova::api::default_floating_pool: 'public' nova::api::osapi_v3: true nova::scheduler::filter::ram_allocation_ratio: '1.0' +nova::cron::archive_deleted_rows::hour: '*/12' +nova::cron::archive_deleted_rows::destination: '/dev/null' # ceilometer ceilometer::agent::auth::auth_endpoint_type: 'internalURL' # cinder cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler +cinder::cron::db_purge::destination: '/dev/null' # heat heat::engine::configure_delegated_roles: false @@ -118,6 +128,7 @@ tripleo::loadbalancer::nova_metadata: true tripleo::loadbalancer::nova_novncproxy: true tripleo::loadbalancer::mysql: true tripleo::loadbalancer::redis: true +tripleo::loadbalancer::sahara: true tripleo::loadbalancer::swift_proxy_server: true tripleo::loadbalancer::ceilometer: true tripleo::loadbalancer::heat_api: true diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml index 7e925d90..89577505 100644 --- a/puppet/hieradata/database.yaml +++ b/puppet/hieradata/database.yaml @@ -53,3 +53,10 @@ ceilometer::db::mysql::dbname: ceilometer ceilometer::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + +sahara::db::mysql::user: sahara +sahara::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +sahara::db::mysql::dbname: sahara +sahara::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index 7f8970cc..7444155c 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 { include ::ntp } +include ::timezone + if str2bool(hiera('ceph_osd_selinux_permissive', true)) { exec { 'set selinux to permissive on boot': command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index f3a02eba..bb3575cf 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 { include ::ntp } +include ::timezone + file { ['/etc/libvirt/qemu/networks/autostart/default.xml', '/etc/libvirt/qemu/networks/default.xml']: ensure => absent, @@ -68,11 +70,19 @@ if hiera('cinder_enable_nfs_backend', false) { } include ::nova::compute::libvirt +if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + file {'/etc/libvirt/qemu.conf': + ensure => present, + content => hiera('midonet_libvirt_qemu_data') + } +} include ::nova::network::neutron include ::neutron # If the value of core plugin is set to 'nuage', # include nuage agent, +# If the value of core plugin is set to 'midonet', +# include midonet agent, # else use the default value of 'ml2' if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { include ::nuage::vrs @@ -84,18 +94,24 @@ if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { nova_metadata_ip => hiera('nova_metadata_node_ips'), nova_auth_ip => hiera('keystone_public_api_virtual_ip'), } -} else { - class { '::neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), - tenant_network_types => [hiera('neutron_tenant_network_type')], - } +} +elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') - class { '::neutron::agents::ml2::ovs': - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), + class {'::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips } +} +else { + + include ::neutron::plugins::ml2 + include ::neutron::agents::ml2::ovs - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { class { '::neutron::agents::n1kv_vem': n1kv_source => hiera('n1kv_vem_source', undef), n1kv_version => hiera('n1kv_vem_version', undef), diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index a8abbb77..ea63b1a8 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -39,6 +39,8 @@ if hiera('step') >= 2 { include ::ntp } + include ::timezone + # MongoDB if downcase(hiera('ceilometer_backend')) == 'mongodb' { include ::mongodb::globals @@ -101,6 +103,7 @@ if hiera('step') >= 2 { include ::neutron::db::mysql include ::cinder::db::mysql include ::heat::db::mysql + include ::sahara::db::mysql if downcase(hiera('ceilometer_backend')) == 'mysql' { include ::ceilometer::db::mysql } @@ -128,7 +131,7 @@ if hiera('step') >= 2 { # pre-install swift here so we can build rings include ::swift - $enable_ceph = hiera('ceph_storage_count', 0) > 0 + $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) if $enable_ceph { class { '::ceph::profile::params': @@ -164,13 +167,12 @@ if hiera('step') >= 2 { if hiera('step') >= 3 { include ::keystone + include ::keystone::config include ::keystone::roles::admin include ::keystone::endpoint #TODO: need a cleanup-keystone-tokens.sh solution here - keystone_config { - 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2'; - } + file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]: ensure => 'directory', owner => 'keystone', @@ -230,13 +232,61 @@ if hiera('step') >= 3 { include ::nova::scheduler include ::nova::scheduler::filter - include ::neutron + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') + + # Run zookeeper in the controller if configured + if hiera('enable_zookeeper_on_controller') { + class {'::tripleo::cluster::zookeeper': + zookeeper_server_ips => $zookeeper_node_ips, + zookeeper_client_ip => $ipaddress, + zookeeper_hostnames => hiera('controller_node_names') + } + } + + # Run cassandra in the controller if configured + if hiera('enable_cassandra_on_controller') { + class {'::tripleo::cluster::cassandra': + cassandra_servers => $cassandra_node_ips, + cassandra_ip => $ipaddress + } + } + + class {'::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips + } + + class {'::tripleo::network::midonet::api': + zookeeper_servers => $zookeeper_node_ips, + vip => $ipaddress, + keystone_ip => $ipaddress, + keystone_admin_token => hiera('keystone::admin_token'), + bind_address => $ipaddress, + admin_password => hiera('admin_password') + } + + # TODO: find a way to get an empty list from hiera + class {'::neutron': + service_plugins => [] + } + + } + else { + + # ML2 plugin + include ::neutron + } + include ::neutron::server include ::neutron::server::notifications # If the value of core plugin is set to 'nuage', - # include nuage core plugin, - # else use the default value of 'ml2' + # include nuage core plugin, and it does not + # need the l3, dhcp and metadata agents if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { include ::neutron::plugins::nuage } else { @@ -252,51 +302,57 @@ if hiera('step') >= 3 { require => Package['neutron'], } - class { '::neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), - tenant_network_types => [hiera('neutron_tenant_network_type')], - mechanism_drivers => [hiera('neutron_mechanism_drivers')], - } - class { '::neutron::agents::ml2::ovs': - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), - } - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus1000v + # If the value of core plugin is set to 'midonet', + # skip all the ML2 configuration + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + class {'::neutron::plugins::midonet': + midonet_api_ip => $ipaddress, + keystone_tenant => hiera('neutron::server::auth_tenant'), + keystone_password => hiera('neutron::server::auth_password') } + } else { + + include ::neutron::plugins::ml2 + include ::neutron::agents::ml2::ovs + + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus1000v - class { '::n1k_vsm': - n1kv_source => hiera('n1kv_vsm_source', undef), - n1kv_version => hiera('n1kv_vsm_version', undef), - pacemaker_control => false, + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), + } + + class { '::n1k_vsm': + n1kv_source => hiera('n1kv_vsm_source', undef), + n1kv_version => hiera('n1kv_vsm_version', undef), + pacemaker_control => false, + } } - } - if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::ucsm - } - if 'cisco_nexus' in hiera('neutron_mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus - include ::neutron::plugins::ml2::cisco::type_nexus_vxlan - } + if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::ucsm + } + if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus + include ::neutron::plugins::ml2::cisco::type_nexus_vxlan + } - if hiera('neutron_enable_bigswitch_ml2', false) { - include ::neutron::plugins::ml2::bigswitch::restproxy - } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + if hiera('neutron_enable_bigswitch_ml2', false) { + include ::neutron::plugins::ml2::bigswitch::restproxy + } + neutron_l3_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + } + neutron_dhcp_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + } + Service['neutron-server'] -> Service['neutron-ovs-agent-service'] } Service['neutron-server'] -> Service['neutron-dhcp-service'] Service['neutron-server'] -> Service['neutron-l3'] - Service['neutron-server'] -> Service['neutron-ovs-agent-service'] Service['neutron-server'] -> Service['neutron-metadata'] } @@ -345,6 +401,48 @@ if hiera('step') >= 3 { } } + if hiera('cinder_enable_eqlx_backend', false) { + $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name') + + cinder_config { + "${cinder_eqlx_backend}/host": value => 'hostgroup'; + } + + cinder::backend::eqlx { $cinder_eqlx_backend : + volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef), + san_ip => hiera('cinder::backend::eqlx::san_ip', undef), + san_login => hiera('cinder::backend::eqlx::san_login', undef), + san_password => hiera('cinder::backend::eqlx::san_password', undef), + san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef), + eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef), + eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef), + eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef), + eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef), + eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef), + } + } + + if hiera('cinder_enable_dellsc_backend', false) { + $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name') + + cinder_config { + "${cinder_dellsc_backend}/host": value => 'hostgroup'; + } + + cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend : + volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef), + san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef), + san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef), + san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef), + dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), + iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), + iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), + dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef), + dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), + dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), + } + } + if hiera('cinder_enable_netapp_backend', false) { $cinder_netapp_backend = hiera('cinder::backend::netapp::title') @@ -398,7 +496,7 @@ if hiera('step') >= 3 { } } - $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend]) + $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend]) class { '::cinder::backends' : enabled_backends => $cinder_enabled_backends, } @@ -450,8 +548,6 @@ if hiera('step') >= 3 { include ::ceilometer::api include ::ceilometer::agent::notification include ::ceilometer::agent::central - include ::ceilometer::alarm::notifier - include ::ceilometer::alarm::evaluator include ::ceilometer::expirer include ::ceilometer::collector include ::ceilometer::agent::auth @@ -468,8 +564,13 @@ if hiera('step') >= 3 { include ::heat::api_cloudwatch include ::heat::engine + # Sahara + include ::sahara + include ::sahara::service::api + include ::sahara::service::engine + # Horizon - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { $_profile_support = 'cisco' } else { $_profile_support = 'None' @@ -496,7 +597,19 @@ if hiera('step') >= 3 { } #END STEP 3 if hiera('step') >= 4 { - include ::keystone::cron::token_flush + $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true) + $nova_enable_db_purge = hiera('nova_enable_db_purge', true) + $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true) + + if $keystone_enable_db_purge { + include ::keystone::cron::token_flush + } + if $nova_enable_db_purge { + include ::nova::cron::archive_deleted_rows + } + if $cinder_enable_db_purge { + include ::cinder::cron::db_purge + } } #END STEP 4 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')]) diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 8459f1a3..f8d3fd76 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -41,6 +41,8 @@ if hiera('step') >= 1 { create_resources(sysctl::value, hiera('sysctl_settings'), {}) + include ::timezone + if count(hiera('ntp::servers')) > 0 { include ::ntp } @@ -78,11 +80,11 @@ if hiera('step') >= 1 { Class['tripleo::fencing'] -> Class['pacemaker::stonith'] } - # FIXME(gfidente): sets 100secs as default start timeout op + # FIXME(gfidente): sets 200secs as default start timeout op # param; until we can use pcmk global defaults we'll still # need to add it to every resource which redefines op params Pacemaker::Resource::Service { - op_params => 'start timeout=100s stop timeout=100s', + op_params => 'start timeout=200s stop timeout=200s', } # Only configure RabbitMQ in this step, don't start it yet to @@ -352,7 +354,7 @@ if hiera('step') >= 2 { if downcase(hiera('ceilometer_backend')) == 'mongodb' { pacemaker::resource::service { $::mongodb::params::service_name : - op_params => 'start timeout=120s stop timeout=100s', + op_params => 'start timeout=370s stop timeout=200s', clone_params => true, require => Class['::mongodb::server'], } @@ -443,13 +445,17 @@ MYSQL_HOST=localhost\n", require => Exec['galera-ready'], } } + + class { '::sahara::db::mysql': + require => Exec['galera-ready'], + } } # pre-install swift here so we can build rings include ::swift # Ceph - $enable_ceph = hiera('ceph_storage_count', 0) > 0 + $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) if $enable_ceph { class { '::ceph::profile::params': @@ -490,11 +496,10 @@ if hiera('step') >= 3 { manage_service => false, enabled => false, } + include ::keystone::config #TODO: need a cleanup-keystone-tokens.sh solution here - keystone_config { - 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2'; - } + file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]: ensure => 'directory', owner => 'keystone', @@ -592,8 +597,54 @@ if hiera('step') >= 3 { } include ::nova::network::neutron - # Neutron class definitions - include ::neutron + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') + + # Run zookeeper in the controller if configured + if hiera('enable_zookeeper_on_controller') { + class {'::tripleo::cluster::zookeeper': + zookeeper_server_ips => $zookeeper_node_ips, + zookeeper_client_ip => $ipaddress, + zookeeper_hostnames => hiera('controller_node_names') + } + } + + # Run cassandra in the controller if configured + if hiera('enable_cassandra_on_controller') { + class {'::tripleo::cluster::cassandra': + cassandra_servers => $cassandra_node_ips, + cassandra_ip => $ipaddress + } + } + + class {'::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips + } + + class {'::tripleo::network::midonet::api': + zookeeper_servers => hiera('neutron_api_node_ips'), + vip => $public_vip, + keystone_ip => $public_vip, + keystone_admin_token => hiera('keystone::admin_token'), + bind_address => $ipaddress, + admin_password => hiera('admin_password') + } + + # Configure Neutron + class {'::neutron': + service_plugins => [] + } + + } + else { + # Neutron class definitions + include ::neutron + } + class { '::neutron::server' : sync_db => $sync_db, manage_service => false, @@ -603,6 +654,13 @@ if hiera('step') >= 3 { if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { include ::neutron::plugins::nuage } + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + class {'::neutron::plugins::midonet': + midonet_api_ip => $public_vip, + keystone_tenant => hiera('neutron::server::auth_tenant'), + keystone_password => hiera('neutron::server::auth_password') + } + } if hiera('neutron::enable_dhcp_agent',true) { class { '::neutron::agents::dhcp' : manage_service => false, @@ -628,27 +686,20 @@ if hiera('step') >= 3 { enabled => false, } } - if hiera('neutron::core_plugin') == 'ml2' { - class { '::neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), - tenant_network_types => [hiera('neutron_tenant_network_type')], - mechanism_drivers => [hiera('neutron_mechanism_drivers')], - } - class { '::neutron::agents::ml2::ovs': - manage_service => false, - enabled => false, - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), - } + include ::neutron::plugins::ml2 + class { '::neutron::agents::ml2::ovs': + manage_service => false, + enabled => false, } - if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { + + if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::cisco::ucsm } - if 'cisco_nexus' in hiera('neutron_mechanism_drivers') { + if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::cisco::nexus include ::neutron::plugins::ml2::cisco::type_nexus_vxlan } - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::cisco::nexus1000v class { '::neutron::agents::n1kv_vem': @@ -727,6 +778,48 @@ if hiera('step') >= 3 { } } + if hiera('cinder_enable_eqlx_backend', false) { + $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name') + + cinder_config { + "${cinder_eqlx_backend}/host": value => 'hostgroup'; + } + + cinder::backend::eqlx { $cinder_eqlx_backend : + volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef), + san_ip => hiera('cinder::backend::eqlx::san_ip', undef), + san_login => hiera('cinder::backend::eqlx::san_login', undef), + san_password => hiera('cinder::backend::eqlx::san_password', undef), + san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef), + eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef), + eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef), + eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef), + eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef), + eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef), + } + } + + if hiera('cinder_enable_dellsc_backend', false) { + $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name') + + cinder_config { + "${cinder_dellsc_backend}/host": value => 'hostgroup'; + } + + cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend : + volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef), + san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef), + san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef), + san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef), + dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), + iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), + iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), + dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef), + dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), + dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), + } + } + if hiera('cinder_enable_netapp_backend', false) { $cinder_netapp_backend = hiera('cinder::backend::netapp::title') @@ -780,11 +873,23 @@ if hiera('step') >= 3 { } } - $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend]) + $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend]) class { '::cinder::backends' : enabled_backends => $cinder_enabled_backends, } + class { '::sahara': + sync_db => $sync_db, + } + class { '::sahara::service::api': + manage_service => false, + enabled => false, + } + class { '::sahara::service::engine': + manage_service => false, + enabled => false, + } + # swift proxy class { '::swift::proxy' : manage_service => $non_pcmk_start, @@ -855,14 +960,6 @@ if hiera('step') >= 3 { manage_service => false, enabled => false, } - class { '::ceilometer::alarm::notifier' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::alarm::evaluator' : - manage_service => false, - enabled => false, - } class { '::ceilometer::collector' : manage_service => false, enabled => false, @@ -904,7 +1001,7 @@ if hiera('step') >= 3 { # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? } include ::apache::mod::status - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { $_profile_support = 'cisco' } else { $_profile_support = 'None' @@ -930,7 +1027,19 @@ if hiera('step') >= 3 { } #END STEP 3 if hiera('step') >= 4 { - include ::keystone::cron::token_flush + $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true) + $nova_enable_db_purge = hiera('nova_enable_db_purge', true) + $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true) + + if $keystone_enable_db_purge { + include ::keystone::cron::token_flush + } + if $nova_enable_db_purge { + include ::nova::cron::archive_deleted_rows + } + if $cinder_enable_db_purge { + include ::cinder::cron::db_purge + } if $pacemaker_master { @@ -1033,6 +1142,24 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::cinder::params::volume_service]], } + # Sahara + pacemaker::resource::service { $::sahara::params::api_service_name : + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } + pacemaker::resource::service { $::sahara::params::engine_service_name : + clone_params => 'interleave=true', + } + pacemaker::constraint::base { 'keystone-then-sahara-api-constraint': + constraint_type => 'order', + first_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::sahara::params::api_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name], + Pacemaker::Resource::Service[$::keystone::params::service_name]], + } + # Glance pacemaker::resource::service { $::glance::params::registry_service_name : clone_params => 'interleave=true', @@ -1068,15 +1195,32 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::glance::params::api_service_name]], } - # Neutron - # NOTE(gfidente): Neutron will try to populate the database with some data - # as soon as neutron-server is started; to avoid races we want to make this - # happen only on one node, before normal Pacemaker initialization - # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 - exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } -> - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + if hiera('step') == 4 { + # Neutron + # NOTE(gfidente): Neutron will try to populate the database with some data + # as soon as neutron-server is started; to avoid races we want to make this + # happen only on one node, before normal Pacemaker initialization + # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 + # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec + # will try to start the service while it's already started by Pacemaker + # It would result to a deployment failure since systemd would return 1 to Puppet + # and the overcloud would fail to deploy (6 would be returned). + # This conditional prevents from a race condition during the deployment. + # https://bugzilla.redhat.com/show_bug.cgi?id=1290582 + exec { 'neutron-server-systemd-start-sleep' : + command => 'systemctl start neutron-server && /usr/bin/sleep 5', + path => '/usr/bin', + unless => '/sbin/pcs resource show neutron-server', + } -> + pacemaker::resource::service { $::neutron::params::server_service: + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name] + } + } else { + pacemaker::resource::service { $::neutron::params::server_service: + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name] + } } if hiera('neutron::enable_l3_agent', true) { pacemaker::resource::service { $::neutron::params::l3_agent_service: @@ -1093,6 +1237,11 @@ if hiera('step') >= 4 { clone_params => 'interleave=true', } } + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + pacemaker::resource::service {'tomcat': + clone_params => 'interleave=true', + } + } if hiera('neutron::enable_metadata_agent', true) { pacemaker::resource::service { $::neutron::params::metadata_agent_service: clone_params => 'interleave=true', @@ -1143,7 +1292,6 @@ if hiera('step') >= 4 { } } - #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': constraint_type => 'order', first_resource => "${::keystone::params::service_name}-clone", @@ -1219,28 +1367,65 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] } } + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat + pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::server_service}-clone", + second_resource => "${::neutron::params::dhcp_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::server_service], + Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], + } + pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::dhcp_agent_service}-clone", + second_resource => "${::neutron::params::metadata_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], + } + pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::metadata_agent_service}-clone", + second_resource => 'tomcat-clone', + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service], + Pacemaker::Resource::Service['tomcat']], + } + pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation': + source => "${::neutron::params::metadata_agent_service}-clone", + target => "${::neutron::params::dhcp_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], + } + } # Nova pacemaker::resource::service { $::nova::params::api_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', + op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::conductor_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', + op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::consoleauth_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', + op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::nova::params::vncproxy_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', + op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::scheduler_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', + op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': @@ -1339,12 +1524,6 @@ if hiera('step') >= 4 { pacemaker::resource::service { $::ceilometer::params::api_service_name : clone_params => 'interleave=true', } - pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name : - clone_params => 'interleave=true', - } pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : clone_params => 'interleave=true', } @@ -1419,54 +1598,6 @@ if hiera('step') >= 4 { require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], Pacemaker::Resource::Ocf['delay']], } - pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint': - constraint_type => 'order', - first_resource => 'delay-clone', - second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation': - source => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - target => 'delay-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation': - source => "${::ceilometer::params::alarm_notifier_service_name}-clone", - target => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone", - second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation': - source => "${::ceilometer::params::agent_notification_service_name}-clone", - target => "${::ceilometer::params::alarm_notifier_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } if downcase(hiera('ceilometer_backend')) == 'mongodb' { pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': constraint_type => 'order', @@ -1565,7 +1696,7 @@ if hiera('step') >= 4 { } #VSM - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { pacemaker::resource::ocf { 'vsm-p' : ocf_agent_name => 'heartbeat:VirtualDomain', resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml', diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 1eabddf1..63ac396e 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 { include ::ntp } +include ::timezone + include ::swift class { '::swift::storage::all': mount_check => str2bool(hiera('swift_mount_check')), diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 2bdd8a9c..5a69725a 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 { include ::ntp } +include ::timezone + include ::cinder include ::cinder::config include ::cinder::glance diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp index 4296208b..2d880d33 100644 --- a/puppet/manifests/ringbuilder.pp +++ b/puppet/manifests/ringbuilder.pp @@ -70,7 +70,7 @@ class tripleo::ringbuilder ( # create local rings swift::ringbuilder::create{ ['object', 'account', 'container']: part_power => $part_power, - replicas => $replicas, + replicas => min(count($device_array), $replicas), min_part_hours => $min_part_hours, } -> diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 22fe4f20..142e47cc 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -7,7 +7,6 @@ parameters: constraints: - custom_constraint: nova.flavor HashSuffix: - default: unset description: A random string to be used as a salt when hashing to determine mappings in the ring. hidden: true @@ -17,7 +16,7 @@ parameters: type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string MountCheck: default: 'false' @@ -40,7 +39,6 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true @@ -63,6 +61,10 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json + TimeZone: + default: 'UTC' + description: The timezone to be set on Ceph nodes. + type: string Hostname: type: string default: '' # Defaults to Heat created hostname @@ -82,6 +84,13 @@ parameters: description: > Heat action when to apply network configuration changes default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] CloudDomain: default: '' type: string @@ -94,7 +103,10 @@ parameters: Extra properties or metadata passed to Nova for the created nodes in the overcloud. It's accessible via the Nova metadata API. type: json - + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -109,7 +121,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -131,6 +145,11 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::SwiftStorage::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + InternalApiPort: type: OS::TripleO::SwiftStorage::Ports::InternalApiPort properties: @@ -146,21 +165,37 @@ resources: properties: ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + TenantPort: + type: OS::TripleO::SwiftStorage::Ports::TenantPort + properties: + ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::SwiftStorage::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + NetworkConfig: type: OS::TripleO::ObjectStorage::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -204,6 +239,7 @@ resources: swift_mount_check: {get_input: swift_mount_check } tripleo::ringbuilder::min_part_hours: { get_input: swift_min_part_hours } ntp::servers: {get_input: ntp_servers} + timezone::timezone: {get_input: timezone} # NOTE(dprince): build_ring support is currently not wired in. # See: https://review.openstack.org/#/c/109225/ tripleo::ringbuilder::build_ring: True @@ -230,6 +266,7 @@ resources: swift_part_power: {get_param: PartPower} swift_replicas: { get_param: Replicas} ntp_servers: {get_param: NtpServer} + timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} @@ -281,6 +318,9 @@ outputs: template: 'r1z1-IP:%PORT%/d1' params: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} internal_api_ip_address: description: IP address of the server in the internal_api network value: {get_attr: [InternalApiPort, ip_address]} @@ -290,6 +330,12 @@ outputs: storage_mgmt_ip_address: description: IP address of the server in the storage_mgmt network value: {get_attr: [StorageMgmtPort, ip_address]} + tenant_ip_address: + description: IP address of the server in the tenant network + value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying value: diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml index 1dec489c..c49a1047 100644 --- a/puppet/vip-config.yaml +++ b/puppet/vip-config.yaml @@ -19,6 +19,7 @@ resources: cinder_api_vip: {get_input: cinder_api_vip} glance_api_vip: {get_input: glance_api_vip} glance_registry_vip: {get_input: glance_registry_vip} + sahara_api_vip: {get_input: sahara_api_vip} swift_proxy_vip: {get_input: swift_proxy_vip} nova_api_vip: {get_input: nova_api_vip} nova_metadata_vip: {get_input: nova_metadata_vip} |