diff options
Diffstat (limited to 'puppet')
28 files changed, 2340 insertions, 1296 deletions
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index 2bc519bb..9dd43680 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -51,6 +51,17 @@ parameters: keystone_admin_api_node_ips: type: comma_delimited_list + DeployIdentifier: + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. + UpdateIdentifier: + type: string + description: > + Setting to a previously unused value during stack-update will trigger + package update on all nodes + resources: allNodesConfigImpl: @@ -240,8 +251,17 @@ resources: nova::rabbit_hosts: *rabbit_nodes_array keystone::rabbit_hosts: *rabbit_nodes_array + deploy_identifier: {get_param: DeployIdentifier} + update_identifier: {get_param: UpdateIdentifier} + outputs: config_id: description: The ID of the allNodesConfigImpl resource. value: {get_resource: allNodesConfigImpl} + hosts_entries: + description: | + The content that should be appended to your /etc/hosts if you want to get + hostname-based access to the deployed nodes (useful for testing without + setting up a DNS). + value: {get_attr: [allNodesConfigImpl, config, hosts]} diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml index 99265493..96198c3f 100644 --- a/puppet/ceph-cluster-config.yaml +++ b/puppet/ceph-cluster-config.yaml @@ -13,7 +13,7 @@ parameters: ceph_client_key: default: '' type: string - description: Ceph key used to create the 'openstack' user keyring. + description: Ceph key used to create the client user keyring. ceph_fsid: default: '' type: string @@ -27,6 +27,18 @@ parameters: type: comma_delimited_list ceph_mon_ips: type: comma_delimited_list + NovaRbdPoolName: + default: vms + type: string + CinderRbdPoolName: + default: volumes + type: string + GlanceRbdPoolName: + default: images + type: string + CephClientUserName: + default: openstack + type: string resources: CephClusterConfigImpl: @@ -65,15 +77,34 @@ resources: keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring', cap_mon: 'allow profile bootstrap-osd' }, - client.openstack: { + client.CLIENT_USER: { secret: 'ADMIN_KEY', mode: '0644', cap_mon: 'allow r', - cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=images' + cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL' } }" params: + CLIENT_USER: {get_param: CephClientUserName} ADMIN_KEY: {get_param: ceph_admin_key} + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} + cinder_rbd_pool_name: {get_param: CinderRbdPoolName} + glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} + glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} + nova::compute::rbd::rbd_keyring: + list_join: + - '.' + - - 'client' + - {get_param: CephClientUserName} + ceph_client_user_name: {get_param: CephClientUserName} + ceph_pools: + - {get_param: CinderRbdPoolName} + - {get_param: NovaRbdPoolName} + - {get_param: GlanceRbdPoolName} outputs: config_id: diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index 75294599..ede1263b 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -16,14 +16,15 @@ parameters: description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string KeyName: - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string default: default constraints: - custom_constraint: nova.keypair NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -53,7 +54,34 @@ parameters: description: | Role specific additional hiera configuration to inject into the cluster. type: json - + NetworkDeploymentActions: + type: comma_delimited_list + description: > + Heat action when to apply network configuration changes + default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] + CloudDomain: + default: '' + type: string + description: > + The DNS domain used for the hosts. This should match the dhcp_domain + configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: CephStorage: @@ -68,6 +96,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -89,6 +120,16 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::CephStorage::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + + InternalApiPort: + type: OS::TripleO::CephStorage::Ports::InternalApiPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + StoragePort: type: OS::TripleO::CephStorage::Ports::StoragePort properties: @@ -99,32 +140,55 @@ resources: properties: ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + TenantPort: + type: OS::TripleO::CephStorage::Ports::TenantPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::CephStorage::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + NetworkConfig: type: OS::TripleO::CephStorage::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} + InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetIpSubnetMap: type: OS::TripleO::Network::Ports::NetIpSubnetMap properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: config: {get_resource: NetworkConfig} server: {get_resource: CephStorage} + actions: {get_param: NetworkDeploymentActions} CephStorageDeployment: type: OS::Heat::StructuredDeployment @@ -133,11 +197,7 @@ resources: config: {get_resource: CephStorageConfig} server: {get_resource: CephStorage} input_values: - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} @@ -174,6 +234,13 @@ resources: ceph::profile::params::cluster_network: {get_input: ceph_cluster_network} ceph::profile::params::public_network: {get_input: ceph_public_network} + # Resource for site-specific injection of root certificate + NodeTLSCAData: + depends_on: CephStorageDeployment + type: OS::TripleO::NodeTLSCAData + properties: + server: {get_resource: CephStorage} + # Hook for site-specific additional pre-deployment config, e.g extra hieradata CephStorageExtraConfigPre: depends_on: CephStorageDeployment @@ -184,7 +251,7 @@ resources: # Hook for site-specific additional pre-deployment config, # applying to all nodes, e.g node registration/unregistration NodeExtraConfig: - depends_on: CephStorageExtraConfigPre + depends_on: [CephStorageExtraConfigPre, NodeTLSCAData] type: OS::TripleO::NodeExtraConfig properties: server: {get_resource: CephStorage} @@ -205,25 +272,39 @@ outputs: hosts_entry: value: str_replace: - template: "IP HOST.localdomain HOST" + template: "IP HOST.DOMAIN HOST" params: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} HOST: {get_attr: [CephStorage, name]} nova_server_resource: description: Heat resource handle for the ceph storage server value: {get_resource: CephStorage} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} + internal_api_ip_address: + description: IP address of the server in the internal_api network + value: {get_attr: [InternalApiPort, ip_address]} storage_ip_address: description: IP address of the server in the storage network value: {get_attr: [StoragePort, ip_address]} storage_mgmt_ip_address: description: IP address of the server in the storage_mgmt network value: {get_attr: [StorageMgmtPort, ip_address]} + tenant_ip_address: + description: IP address of the server in the tenant network + value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying value: list_join: - ',' - - {get_attr: [CephStorageDeployment, deploy_stdout]} + - {get_attr: [NodeTLSCAData, deploy_stdout]} - {get_attr: [CephStorageExtraConfigPre, deploy_stdout]} - {get_param: UpdateIdentifier} diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index 6a869219..9fdd0123 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -17,7 +17,6 @@ parameters: description: The size of the loopback file used by the cinder LVM driver. type: number CinderPassword: - default: unset description: The password for the cinder service and db account, used by cinder-api. type: string hidden: true @@ -44,17 +43,9 @@ parameters: type: string constraints: - custom_constraint: nova.flavor - GlancePort: - default: "9292" - description: Glance port. - type: string - GlanceProtocol: - default: http - description: Protocol to use when connecting to glance, set to https for SSL. - type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string RabbitPassword: default: 'guest' @@ -78,13 +69,13 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -103,12 +94,46 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json GlanceApiVirtualIP: type: string default: '' MysqlVirtualIP: type: string default: '' + NetworkDeploymentActions: + type: comma_delimited_list + description: > + Heat action when to apply network configuration changes + default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] + CloudDomain: + default: '' + type: string + description: > + The DNS domain used for the hosts. This should match the dhcp_domain + configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} + resources: BlockStorage: @@ -123,6 +148,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -144,6 +172,11 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::BlockStorage::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + InternalApiPort: type: OS::TripleO::BlockStorage::Ports::InternalApiPort properties: @@ -159,27 +192,44 @@ resources: properties: ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + TenantPort: + type: OS::TripleO::BlockStorage::Ports::TenantPort + properties: + ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::BlockStorage::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + NetworkConfig: type: OS::TripleO::BlockStorage::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: config: {get_resource: NetworkConfig} server: {get_resource: BlockStorage} + actions: {get_param: NetworkDeploymentActions} BlockStorageDeployment: type: OS::Heat::StructuredDeployment @@ -200,23 +250,12 @@ resources: cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} cinder_iscsi_helper: {get_param: CinderISCSIHelper} cinder_iscsi_ip_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]} - glance_api_servers: - list_join: - - '' - - - {get_param: GlanceProtocol} - - '://' - - {get_param: GlanceApiVirtualIP} - - ':' - - {get_param: GlancePort} + glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} rabbit_username: {get_param: RabbitUserName} rabbit_password: {get_param: RabbitPassword} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} @@ -264,10 +303,17 @@ resources: snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} + # Resource for site-specific injection of root certificate + NodeTLSCAData: + depends_on: BlockStorageDeployment + type: OS::TripleO::NodeTLSCAData + properties: + server: {get_resource: BlockStorage} + # Hook for site-specific additional pre-deployment config, # applying to all nodes, e.g node registration/unregistration NodeExtraConfig: - depends_on: BlockStorageDeployment + depends_on: NodeTLSCAData type: OS::TripleO::NodeExtraConfig properties: server: {get_resource: BlockStorage} @@ -288,14 +334,18 @@ outputs: hosts_entry: value: str_replace: - template: "IP HOST.localdomain HOST" + template: "IP HOST.DOMAIN HOST" params: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} HOST: {get_attr: [BlockStorage, name]} nova_server_resource: description: Heat resource handle for the block storage server value: {get_resource: BlockStorage} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} internal_api_ip_address: description: IP address of the server in the internal_api network value: {get_attr: [InternalApiPort, ip_address]} @@ -305,10 +355,17 @@ outputs: storage_mgmt_ip_address: description: IP address of the server in the storage_mgmt network value: {get_attr: [StorageMgmtPort, ip_address]} + tenant_ip_address: + description: IP address of the server in the tenant network + value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying value: list_join: - '' - - {get_attr: [BlockStorageDeployment, deploy_stdout]} + - {get_attr: [NodeTLSCAData, deploy_stdout]} - {get_param: UpdateIdentifier} diff --git a/puppet/compute.yaml b/puppet/compute.yaml index 2b635357..7269d736 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -1,11 +1,10 @@ -heat_template_version: 2015-04-30 +heat_template_version: 2015-10-15 description: > OpenStack hypervisor node configured via Puppet. parameters: AdminPassword: - default: unset description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true @@ -16,12 +15,10 @@ parameters: constraints: - allowed_values: ['', Present] CeilometerMeteringSecret: - default: unset description: Secret shared by the ceilometer services. type: string hidden: true CeilometerPassword: - default: unset description: The password for the ceilometer service account. type: string hidden: true @@ -51,14 +48,6 @@ parameters: GlanceHost: type: string default: '' # Has to be here because of the ignored empty value bug - GlancePort: - default: "9292" - description: Glance port. - type: string - GlanceProtocol: - default: http - description: Protocol to use when connecting to glance, set to https for SSL. - type: string Image: type: string default: overcloud-compute @@ -69,7 +58,7 @@ parameters: description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string KeyName: - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string default: default constraints: @@ -88,13 +77,18 @@ parameters: to create provider networks (and we use this for the default floating network) - if changing this either use different post-install network scripts or be sure to keep 'datacentre' as a mapping network name. - type: string + type: comma_delimited_list default: "datacentre:br-ex" NeutronEnableTunnelling: type: string default: "True" - NeutronFlatNetworks: + NeutronEnableL2Pop: type: string + description: > + Enable/disable the L2 population feature in the Neutron agents. + default: "False" + NeutronFlatNetworks: + type: comma_delimited_list default: 'datacentre' description: > If set, flat networks to configure in neutron plugins. @@ -102,18 +96,17 @@ parameters: type: string default: '' # Has to be here because of the ignored empty value bug NeutronNetworkType: - type: string - description: The tenant network type for Neutron, either gre or vxlan. + type: comma_delimited_list + description: The tenant network type for Neutron. default: 'vxlan' NeutronNetworkVLANRanges: - default: 'datacentre' + default: 'datacentre:1:1000' description: > The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the Neutron documentation for permitted values. Defaults to permitting any VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). type: comma_delimited_list NeutronPassword: - default: unset description: The password for the neutron service account, used by neutron agents. type: string hidden: true @@ -126,10 +119,9 @@ parameters: description: A port to add to the NeutronPhysicalBridge. type: string NeutronTunnelTypes: - type: string + type: comma_delimited_list description: | - The tunnel types for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'gre,vxlan' + The tunnel types for the Neutron tenant network. default: 'vxlan' NeutronTunnelIdRanges: description: | @@ -150,7 +142,6 @@ parameters: default: 'False' type: string NeutronMetadataProxySharedSecret: - default: 'unset' description: Shared secret to prevent spoofing type: string hidden: true @@ -174,9 +165,8 @@ parameters: NeutronMechanismDrivers: default: 'openvswitch' description: | - The mechanism drivers for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'openvswitch,l2_population' - type: string + The mechanism drivers for the Neutron tenant network. + type: comma_delimited_list # Not relevant for Computes, should be removed NeutronAllowL3AgentFailover: default: 'True' @@ -205,22 +195,34 @@ parameters: type: json NovaComputeLibvirtType: type: string + default: kvm + NovaComputeLibvirtVifDriver: default: '' + description: Libvirt VIF driver configuration for the network + type: string NovaEnableRbdBackend: default: false description: Whether to enable or not the Rbd backend for Nova type: boolean NovaPassword: - default: unset description: The password for the nova service account, used by nova-api. type: string hidden: true NovaPublicIP: type: string default: '' # Has to be here because of the ignored empty value bug - NtpServer: + NovaOVSBridge: + default: 'br-int' + description: Name of integration bridge used by Open vSwitch + type: string + NovaSecurityGroupAPI: + default: 'neutron' + description: The full class name of the security API class type: string + NtpServer: default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list RabbitHost: type: string default: '' # Has to be here because of the ignored empty value bug @@ -248,7 +250,6 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true @@ -261,6 +262,11 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json UpdateIdentifier: default: '' type: string @@ -270,6 +276,34 @@ parameters: Hostname: type: string default: '' # Defaults to Heat created hostname + NetworkDeploymentActions: + type: comma_delimited_list + description: > + Heat action when to apply network configuration changes + default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] + CloudDomain: + default: '' + type: string + description: > + The DNS domain used for the hosts. This should match the dhcp_domain + configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -287,6 +321,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -308,6 +345,11 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::Compute::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + InternalApiPort: type: OS::TripleO::Compute::Ports::InternalApiPort properties: @@ -318,32 +360,49 @@ resources: properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + StorageMgmtPort: + type: OS::TripleO::Compute::Ports::StorageMgmtPort + properties: + ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + TenantPort: type: OS::TripleO::Compute::Ports::TenantPort properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + ManagementPort: + type: OS::TripleO::Compute::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetworkConfig: type: OS::TripleO::Compute::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: config: {get_resource: NetworkConfig} server: {get_resource: NovaCompute} + actions: {get_param: NetworkDeploymentActions} input_values: bridge_name: {get_param: NeutronPhysicalBridge} interface_name: {get_param: NeutronPublicInterface} @@ -366,6 +425,8 @@ resources: - '"%{::osfamily}"' - common - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre + - nova_nuage_data # Optionally provided by ComputeExtraConfigPre + - midonet_data # Optionally provided by AllNodesExtraConfig datafiles: compute_extraconfig: mapped_data: {get_param: NovaComputeExtraConfig} @@ -386,12 +447,15 @@ resources: nova::rabbit_port: {get_input: rabbit_client_port} nova_compute_driver: {get_input: nova_compute_driver} nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type} + nova::compute::neutron::libvirt_vif_driver: {get_input: nova_compute_libvirt_vif_driver} nova_api_host: {get_input: nova_api_host} nova::compute::vncproxy_host: {get_input: nova_public_ip} nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend} rbd_persistent_storage: {get_input: cinder_enable_rbd_backend} nova_password: {get_input: nova_password} nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address} + nova::network::neutron::neutron_ovs_bridge: {get_input: nova_ovs_bridge} + nova::network::neutron::security_group_api: {get_input: nova_security_group_api} ceilometer::debug: {get_input: debug} ceilometer::rabbit_userid: {get_input: rabbit_username} ceilometer::rabbit_password: {get_input: rabbit_password} @@ -406,24 +470,25 @@ resources: nova::glance_api_servers: {get_input: glance_api_servers} neutron::debug: {get_input: debug} neutron::rabbit_password: {get_input: rabbit_password} - neutron::rabbit_user: {get_input: rabbit_user} + neutron::rabbit_user: {get_input: rabbit_username} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} neutron::rabbit_port: {get_input: rabbit_client_port} - neutron_flat_networks: {get_input: neutron_flat_networks} + neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} neutron_host: {get_input: neutron_host} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} - neutron_tenant_network_type: {get_input: neutron_tenant_network_type} - neutron_tunnel_types: {get_input: neutron_tunnel_types} + neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} + neutron::agents::ml2::ovs:tunnel_types: {get_input: neutron_tunnel_types} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron_bridge_mappings: {get_input: neutron_bridge_mappings} + neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} + neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} neutron_physical_bridge: {get_input: neutron_physical_bridge} neutron_public_interface: {get_input: neutron_public_interface} nova::network::neutron::neutron_admin_password: {get_input: neutron_password} - nova::network::neutron::neutron_url: {get_input: neutron_url} + nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} neutron_router_distributed: {get_input: neutron_router_distributed} neutron_agent_mode: {get_input: neutron_agent_mode} @@ -431,8 +496,9 @@ resources: neutron::core_plugin: {get_input: neutron_core_plugin} neutron::service_plugins: {get_input: neutron_service_plugins} neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} - neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} + neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} + keystone_public_api_virtual_ip: {get_input: keystone_vip} admin_password: {get_input: admin_password} ntp::servers: {get_input: ntp_servers} tripleo::packages::enable_install: {get_input: enable_package_install} @@ -449,62 +515,61 @@ resources: debug: {get_param: Debug} nova_compute_driver: {get_param: NovaComputeDriver} nova_compute_libvirt_type: {get_param: NovaComputeLibvirtType} + nova_compute_libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver} nova_public_ip: {get_param: NovaPublicIP} nova_api_host: {get_param: NovaApiHost} nova_password: {get_param: NovaPassword} nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend} cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend} nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]} + nova_ovs_bridge: {get_param: NovaOVSBridge} + nova_security_group_api: {get_param: NovaSecurityGroupAPI} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} ceilometer_password: {get_param: CeilometerPassword} ceilometer_compute_agent: {get_param: CeilometerComputeAgent} - ceilometer_agent_auth_url: - list_join: - - '' - - - 'http://' - - {get_param: KeystonePublicApiVirtualIP} - - ':5000/v2.0' + ceilometer_agent_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]} snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} - glance_api_servers: - list_join: - - '' - - - {get_param: GlanceProtocol} - - '://' - - {get_param: GlanceHost} - - ':' - - {get_param: GlancePort} - neutron_flat_networks: {get_param: NeutronFlatNetworks} + glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} + neutron_flat_networks: + str_replace: + template: NETWORKS + params: + NETWORKS: {get_param: NeutronFlatNetworks} neutron_host: {get_param: NeutronHost} neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} - neutron_tenant_network_type: {get_param: NeutronNetworkType} - neutron_tunnel_types: {get_param: NeutronTunnelTypes} neutron_tunnel_id_ranges: str_replace: - template: "['RANGES']" + template: RANGES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronTunnelIdRanges} + RANGES: {get_param: NeutronTunnelIdRanges} neutron_vni_ranges: str_replace: - template: "['RANGES']" + template: RANGES + params: + RANGES: {get_param: NeutronVniRanges} + neutron_tenant_network_types: + str_replace: + template: TYPES + params: + TYPES: {get_param: NeutronNetworkType} + neutron_tunnel_types: + str_replace: + template: TYPES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronVniRanges} + TYPES: {get_param: NeutronTunnelTypes} neutron_network_vlan_ranges: str_replace: - template: "['RANGES']" + template: RANGES + params: + RANGES: {get_param: NeutronNetworkVLANRanges} + neutron_bridge_mappings: + str_replace: + template: MAPPINGS params: - RANGES: - list_join: - - "','" - - {get_param: NeutronNetworkVLANRanges} - neutron_bridge_mappings: {get_param: NeutronBridgeMappings} + MAPPINGS: {get_param: NeutronBridgeMappings} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} + neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} neutron_physical_bridge: {get_param: NeutronPhysicalBridge} neutron_public_interface: {get_param: NeutronPublicInterface} neutron_password: {get_param: NeutronPassword} @@ -514,47 +579,39 @@ resources: neutron_core_plugin: {get_param: NeutronCorePlugin} neutron_service_plugins: str_replace: - template: "['PLUGINS']" + template: PLUGINS params: - PLUGINS: - list_join: - - "','" - - {get_param: NeutronServicePlugins} + PLUGINS: {get_param: NeutronServicePlugins} neutron_type_drivers: str_replace: - template: "['DRIVERS']" + template: DRIVERS params: - DRIVERS: - list_join: - - "','" - - {get_param: NeutronTypeDrivers} - neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} + DRIVERS: {get_param: NeutronTypeDrivers} + neutron_mechanism_drivers: + str_replace: + template: MECHANISMS + params: + MECHANISMS: {get_param: NeutronMechanismDrivers} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} - neutron_url: - list_join: - - '' - - - 'http://' - - {get_param: NeutronHost} - - ':9696' - neutron_admin_auth_url: - list_join: - - '' - - - 'http://' - - {get_param: KeystoneAdminApiVirtualIP} - - ':35357/v2.0' + neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]} + neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]} + keystone_vip: {get_param: KeystonePublicApiVirtualIP} admin_password: {get_param: AdminPassword} rabbit_username: {get_param: RabbitUserName} rabbit_password: {get_param: RabbitPassword} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} + # Resource for site-specific injection of root certificate + NodeTLSCAData: + depends_on: NovaComputeDeployment + type: OS::TripleO::NodeTLSCAData + properties: + server: {get_resource: NovaCompute} + # Hook for site-specific additional pre-deployment config, e.g extra hieradata ComputeExtraConfigPre: depends_on: NovaComputeDeployment @@ -565,7 +622,7 @@ resources: # Hook for site-specific additional pre-deployment config, # applying to all nodes, e.g node registration/unregistration NodeExtraConfig: - depends_on: ComputeExtraConfigPre + depends_on: [ComputeExtraConfigPre, NodeTLSCAData] type: OS::TripleO::NodeExtraConfig properties: server: {get_resource: NovaCompute} @@ -586,15 +643,24 @@ outputs: ip_address: description: IP address of the server in the ctlplane network value: {get_attr: [NovaCompute, networks, ctlplane, 0]} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} internal_api_ip_address: description: IP address of the server in the internal_api network value: {get_attr: [InternalApiPort, ip_address]} storage_ip_address: description: IP address of the server in the storage network value: {get_attr: [StoragePort, ip_address]} + storage_mgmt_ip_address: + description: IP address of the server in the storage_mgmt network + value: {get_attr: [StorageMgmtPort, ip_address]} tenant_ip_address: description: IP address of the server in the tenant network value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} hostname: description: Hostname of the server value: {get_attr: [NovaCompute, name]} @@ -603,9 +669,10 @@ outputs: Server's IP address and hostname in the /etc/hosts format value: str_replace: - template: "IP HOST.localdomain HOST" + template: "IP HOST.DOMAIN HOST" params: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} HOST: {get_attr: [NovaCompute, name]} nova_server_resource: description: Heat resource handle for the Nova compute server @@ -617,5 +684,6 @@ outputs: list_join: - ',' - - {get_attr: [NovaComputeDeployment, deploy_stdout]} + - {get_attr: [NodeTLSCAData, deploy_stdout]} - {get_attr: [ComputeExtraConfigPre, deploy_stdout]} - {get_param: UpdateIdentifier} diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index 941e1ac5..ed8129e7 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -17,6 +17,13 @@ parameters: resources: + ControllerPrePuppet: + type: OS::TripleO::Tasks::ControllerPrePuppet + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + ControllerPuppetConfig: type: OS::TripleO::ControllerConfig @@ -26,6 +33,7 @@ resources: # e.g all Deployment resources should have a *Deployment_StepN suffix ControllerLoadBalancerDeployment_Step1: type: OS::Heat::StructuredDeployments + depends_on: ControllerPrePuppet properties: servers: {get_param: servers} config: {get_resource: ControllerPuppetConfig} @@ -98,10 +106,18 @@ resources: step: 5 update_identifier: {get_param: NodeConfigIdentifiers} + ControllerPostPuppet: + type: OS::TripleO::Tasks::ControllerPostPuppet + depends_on: ControllerOvercloudServicesDeployment_Step6 + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: ControllerOvercloudServicesDeployment_Step5 + depends_on: ControllerPostPuppet type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index a903021a..a825f582 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2015-04-30 +heat_template_version: 2015-10-15 description: > OpenStack controller node configured by Puppet. @@ -10,12 +10,10 @@ parameters: type: string hidden: true AdminPassword: - default: unset description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true AdminToken: - default: unset description: The keystone auth secret and db password. type: string hidden: true @@ -27,18 +25,20 @@ parameters: description: The ceilometer backend type. type: string CeilometerMeteringSecret: - default: unset description: Secret shared by the ceilometer services. type: string hidden: true CeilometerPassword: - default: unset description: The password for the ceilometer service and db account. type: string hidden: true CinderApiVirtualIP: type: string default: '' + CeilometerWorkers: + default: 0 + description: Number of workers for Ceilometer service. + type: number CinderEnableNfsBackend: default: false description: Whether to enable or not the NFS backend for Cinder @@ -72,7 +72,6 @@ parameters: CinderEnableNfsBackend is true. type: comma_delimited_list CinderPassword: - default: unset description: The password for the cinder service and db account, used by cinder-api. type: string hidden: true @@ -81,8 +80,12 @@ parameters: description: Contains parameters to configure Cinder backends. Typically set via parameter_defaults in the resource registry. type: json + CinderWorkers: + default: 0 + description: Number of workers for Cinder service. + type: number CloudName: - default: '' + default: overcloud description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org type: string ControllerExtraConfig: @@ -90,6 +93,15 @@ parameters: description: | Controller specific hiera configuration data to inject into the cluster. type: json + ControllerIPs: + default: {} + description: > + A network mapped list of IPs to assign to Controllers in the following form: + { + "internal_api": ["a.b.c.d", "e.f.g.h"], + ... + } + type: json ControlVirtualInterface: default: 'br-ex' description: Interface where virtual ip will be assigned. @@ -106,6 +118,10 @@ parameters: default: true description: Whether to use Galera instead of regular MariaDB. type: boolean + EnableLoadBalancer: + default: true + description: Whether to deploy a LoadBalancer on the Controller + type: boolean EnableCephStorage: default: false description: Whether to deploy Ceph Storage (OSD) on the Controller @@ -166,18 +182,9 @@ parameters: type: string default: '' GlancePassword: - default: unset description: The password for the glance service and db account, used by the glance services. type: string hidden: true - GlancePort: - default: "9292" - description: Glance port. - type: string - GlanceProtocol: - default: http - description: Protocol to use when connecting to glance, set to https for SSL. - type: string GlanceBackend: default: swift description: The short name of the Glance backend to use. Should be one @@ -213,15 +220,17 @@ parameters: default: /dev/log description: Syslog address where HAproxy will send its log type: string + GlanceWorkers: + default: 0 + description: Number of workers for Glance service. + type: number HeatPassword: - default: unset description: The password for the Heat service and db account, used by the Heat services. type: string hidden: true HeatStackDomainAdminPassword: description: Password for heat_domain_admin user. type: string - default: '' hidden: true HeatAuthEncryptionKey: description: Auth encryption key for heat-engine @@ -231,6 +240,10 @@ parameters: default: '*' description: A list of IP/Hostname allowed to connect to horizon type: comma_delimited_list + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number HorizonSecret: description: Secret key for Django type: string @@ -244,9 +257,13 @@ parameters: default: 'REBUILD_PRESERVE_EPHEMERAL' description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string + InstanceNameTemplate: + default: 'instance-%08x' + description: Template string to be used to generate instance names + type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string constraints: - custom_constraint: nova.keypair @@ -286,6 +303,18 @@ parameters: type: string default: 'regionOne' description: Keystone region for endpoint + ManageFirewall: + default: false + description: Whether to manage IPtables rules. + type: boolean + PurgeFirewallRules: + default: false + description: Whether IPtables rules should be purged before setting up the new ones. + type: boolean + KeystoneWorkers: + default: 0 + description: Number of workers for Keystone service. + type: number MysqlClusterUniquePart: description: A unique identifier of the MySQL cluster the controller is in. type: string @@ -320,12 +349,28 @@ parameters: to create provider networks (and we use this for the default floating network) - if changing this either use different post-install network scripts or be sure to keep 'datacentre' as a mapping network name. - type: string + type: comma_delimited_list default: "datacentre:br-ex" NeutronDnsmasqOptions: default: 'dhcp-option-force=26,1400' description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the gre tunnel overhead. type: string + NeutronEnableDHCPAgent: + description: Knob to enable/disable DHCP Agent + type: boolean + default: true + NeutronEnableL3Agent: + description: Knob to enable/disable L3 agent + type: boolean + default: true + NeutronEnableMetadataAgent: + description: Knob to enable/disable Metadata agent + type: boolean + default: true + NeutronEnableOVSAgent: + description: Knob to enable/disable OVS Agent + type: boolean + default: true NeutronAgentMode: default: 'dvr_snat' description: Agent mode for the neutron-l3-agent on the controller hosts @@ -343,7 +388,6 @@ parameters: description: Whether to configure Neutron Distributed Virtual Routers type: string NeutronMetadataProxySharedSecret: - default: 'unset' description: Shared secret to prevent spoofing type: string hidden: true @@ -367,18 +411,26 @@ parameters: NeutronMechanismDrivers: default: 'openvswitch' description: | - The mechanism drivers for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'openvswitch,l2_population' - type: string + The mechanism drivers for the Neutron tenant network. + type: comma_delimited_list NeutronAllowL3AgentFailover: default: 'True' description: Allow automatic l3-agent failover type: string + NeutronEnableIsolatedMetadata: + default: 'False' + description: If True, DHCP provide metadata route to VM. + type: string NeutronEnableTunnelling: type: string default: "True" - NeutronFlatNetworks: + NeutronEnableL2Pop: type: string + description: > + Enable/disable the L2 population feature in the Neutron agents. + default: "False" + NeutronFlatNetworks: + type: comma_delimited_list default: 'datacentre' description: If set, flat networks to configure in neutron plugins. NeutronL3HA: @@ -387,17 +439,16 @@ parameters: type: string NeutronNetworkType: default: 'vxlan' - description: The tenant network type for Neutron, either gre or vxlan. - type: string + description: The tenant network type for Neutron. + type: comma_delimited_list NeutronNetworkVLANRanges: - default: 'datacentre' + default: 'datacentre:1:1000' description: > The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the Neutron documentation for permitted values. Defaults to permitting any VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). type: comma_delimited_list NeutronPassword: - default: unset description: The password for the neutron service and db account, used by neutron agents. type: string hidden: true @@ -430,9 +481,8 @@ parameters: NeutronTunnelTypes: default: 'vxlan' description: | - The tunnel types for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'gre,vxlan' - type: string + The tunnel types for the Neutron tenant network. + type: comma_delimited_list NeutronTunnelIdRanges: description: | Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges @@ -448,18 +498,31 @@ parameters: NovaApiVirtualIP: type: string default: '' + NeutronWorkers: + default: 0 + description: Number of workers for Neutron service. + type: number + NovaEnableDBPurge: + default: true + description: | + Whether to create cron job for purging soft deleted rows in Nova database. + type: boolean NovaPassword: - default: unset description: The password for the nova service and db account, used by nova-api. type: string hidden: true + NovaWorkers: + default: 0 + description: Number of workers for Nova service. + type: number MongoDbNoJournal: default: false description: Should MongoDb journaling be disabled type: boolean NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list PcsdPassword: type: string description: The password for the 'pcsd' user. @@ -508,26 +571,10 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true - SSLCACertificate: - default: '' - description: If set, the contents of an SSL certificate authority file. - type: string - SSLCertificate: - default: '' - description: If set, the contents of an SSL certificate .crt file for encrypting SSL endpoints. - type: string - hidden: true - SSLKey: - default: '' - description: If set, the contents of an SSL certificate .key file for encrypting SSL endpoints. - type: string - hidden: true SwiftHashSuffix: - default: unset description: A random string to be used as a salt when hashing to determine mappings in the ring. hidden: true @@ -545,7 +592,6 @@ parameters: description: Partition Power to use when building Swift rings type: number SwiftPassword: - default: unset description: The password for the swift service account, used by the swift proxy services. hidden: true @@ -557,6 +603,10 @@ parameters: type: number default: 3 description: How many replicas to use in the swift rings. + SwiftWorkers: + default: 0 + description: Number of workers for Swift service. + type: number VirtualIP: # DEPRECATED: use per service settings instead type: string default: '' # Has to be here because of the ignored empty value bug @@ -590,6 +640,11 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json UpdateIdentifier: default: '' type: string @@ -599,6 +654,37 @@ parameters: Hostname: type: string default: '' # Defaults to Heat created hostname + NetworkDeploymentActions: + type: comma_delimited_list + description: > + Heat action when to apply network configuration changes + default: ['CREATE'] + NodeIndex: + type: number + default: 0 + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] + CloudDomain: + default: '' + type: string + description: > + The DNS domain used for the hosts. This should match the dhcp_domain + configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -614,6 +700,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -638,26 +727,41 @@ resources: ExternalPort: type: OS::TripleO::Controller::Ports::ExternalPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} InternalApiPort: type: OS::TripleO::Controller::Ports::InternalApiPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} StoragePort: type: OS::TripleO::Controller::Ports::StoragePort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} StorageMgmtPort: type: OS::TripleO::Controller::Ports::StorageMgmtPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} TenantPort: type: OS::TripleO::Controller::Ports::TenantPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} + ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::Controller::Ports::ManagementPort + properties: ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} NetIpMap: @@ -669,6 +773,7 @@ resources: StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetIpSubnetMap: type: OS::TripleO::Network::Ports::NetIpSubnetMap @@ -679,6 +784,7 @@ resources: StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkConfig: type: OS::TripleO::Controller::Net::SoftwareConfig @@ -689,16 +795,33 @@ resources: StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: config: {get_resource: NetworkConfig} server: {get_resource: Controller} + actions: {get_param: NetworkDeploymentActions} input_values: bridge_name: br-ex interface_name: {get_param: NeutronPublicInterface} + # Resource for site-specific injection of root certificate + NodeTLSCAData: + depends_on: NetworkDeployment + type: OS::TripleO::NodeTLSCAData + properties: + server: {get_resource: Controller} + + # Resource for site-specific passing of private keys/certificates + NodeTLSData: + depends_on: NodeTLSCAData + type: OS::TripleO::NodeTLSData + properties: + server: {get_resource: Controller} + NodeIndex: {get_param: NodeIndex} + ControllerDeployment: type: OS::TripleO::SoftwareDeployment depends_on: NetworkDeployment @@ -707,7 +830,17 @@ resources: server: {get_resource: Controller} input_values: bootstack_nodeid: {get_attr: [Controller, name]} + ceilometer_workers: {get_param: CeilometerWorkers} + cinder_workers: {get_param: CinderWorkers} + glance_workers: {get_param: GlanceWorkers} + heat_workers: {get_param: HeatWorkers} + keystone_workers: {get_param: KeystoneWorkers} + nova_workers: {get_param: NovaWorkers} + neutron_workers: {get_param: NeutronWorkers} + swift_workers: {get_param: SwiftWorkers} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} + neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} + neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} haproxy_log_address: {get_param: HAProxySyslogAddress} heat.watch_server_url: list_join: @@ -727,24 +860,6 @@ resources: - - 'http://' - {get_param: HeatApiVirtualIP} - ':8000/v1/waitcondition' - heat_public_url: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':8004/v1/%(tenant_id)s' - heat_internal_url: - list_join: - - '' - - - 'http://' - - {get_param: HeatApiVirtualIP} - - ':8004/v1/%(tenant_id)s' - heat_admin_url: - list_join: - - '' - - - 'http://' - - {get_param: HeatApiVirtualIP} - - ':8004/v1/%(tenant_id)s' heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey} horizon_allowed_hosts: {get_param: HorizonAllowedHosts} horizon_secret: {get_param: HorizonSecret} @@ -758,12 +873,9 @@ resources: cinder_nfs_mount_options: {get_param: CinderNfsMountOptions} cinder_nfs_servers: str_replace: - template: "['SERVERS']" + template: SERVERS params: - SERVERS: - list_join: - - "','" - - {get_param: CinderNfsServers} + SERVERS: {get_param: CinderNfsServers} cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize} cinder_password: {get_param: CinderPassword} cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} @@ -777,43 +889,7 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/cinder' - cinder_public_url: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':8776/v1/%(tenant_id)s' - cinder_internal_url: - list_join: - - '' - - - 'http://' - - {get_param: CinderApiVirtualIP} - - ':8776/v1/%(tenant_id)s' - cinder_admin_url: - list_join: - - '' - - - 'http://' - - {get_param: CinderApiVirtualIP} - - ':8776/v1/%(tenant_id)s' - cinder_public_url_v2: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':8776/v2/%(tenant_id)s' - cinder_internal_url_v2: - list_join: - - '' - - - 'http://' - - {get_param: CinderApiVirtualIP} - - ':8776/v2/%(tenant_id)s' - cinder_admin_url_v2: - list_join: - - '' - - - 'http://' - - {get_param: CinderApiVirtualIP} - - ':8776/v2/%(tenant_id)s' - glance_port: {get_param: GlancePort} + glance_port: {get_param: [EndpointMap, GlanceInternal, port]} glance_password: {get_param: GlancePassword} glance_backend: {get_param: GlanceBackend} glance_file_pcmk_device: {get_param: GlanceFilePcmkDevice} @@ -840,7 +916,6 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/heat' - keystone_auth_address: {list_join: ['', ['http://', {get_param: KeystonePublicApiVirtualIP} , ':5000/v2.0']]} keystone_ca_certificate: {get_param: KeystoneCACertificate} keystone_signing_key: {get_param: KeystoneSigningKey} keystone_signing_certificate: {get_param: KeystoneSigningCertificate} @@ -856,40 +931,18 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/keystone' - keystone_identity_uri: - list_join: - - '' - - - 'http://' - - {get_param: KeystoneAdminApiVirtualIP} - - ':35357' - keystone_auth_uri: - list_join: - - '' - - - 'http://' - - {get_param: KeystonePublicApiVirtualIP} - - ':5000/v2.0/' - keystone_public_url: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':5000' - keystone_internal_url: - list_join: - - '' - - - 'http://' - - {get_param: KeystonePublicApiVirtualIP} - - ':5000' - keystone_ec2_uri: - list_join: - - '' - - - 'http://' - - {get_param: KeystonePublicApiVirtualIP} - - ':5000/v2.0/ec2tokens' + keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + keystone_public_url: { get_param: [EndpointMap, KeystonePublic, uri_no_suffix] } + keystone_internal_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } + keystone_ec2_uri: { get_param: [EndpointMap, KeystoneEC2, uri] } enable_fencing: {get_param: EnableFencing} enable_galera: {get_param: EnableGalera} + enable_load_balancer: {get_param: EnableLoadBalancer} enable_ceph_storage: {get_param: EnableCephStorage} enable_swift_storage: {get_param: EnableSwiftStorage} + manage_firewall: {get_param: ManageFirewall} + purge_firewall_rules: {get_param: PurgeFirewallRules} mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize} mysql_max_connections: {get_param: MysqlMaxConnections} mysql_root_password: {get_param: MysqlRootPassword} @@ -898,63 +951,72 @@ resources: template: tripleo-CLUSTER params: CLUSTER: {get_param: MysqlClusterUniquePart} - neutron_flat_networks: {get_param: NeutronFlatNetworks} + neutron_flat_networks: + str_replace: + template: NETWORKS + params: + NETWORKS: {get_param: NeutronFlatNetworks} neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} neutron_agent_mode: {get_param: NeutronAgentMode} neutron_router_distributed: {get_param: NeutronDVR} neutron_core_plugin: {get_param: NeutronCorePlugin} neutron_service_plugins: str_replace: - template: "['PLUGINS']" + template: PLUGINS params: - PLUGINS: - list_join: - - "','" - - {get_param: NeutronServicePlugins} + PLUGINS: {get_param: NeutronServicePlugins} neutron_type_drivers: str_replace: - template: "['DRIVERS']" + template: DRIVERS + params: + DRIVERS: {get_param: NeutronTypeDrivers} + neutron_enable_dhcp_agent: {get_param: NeutronEnableDHCPAgent} + neutron_enable_l3_agent: {get_param: NeutronEnableL3Agent} + neutron_enable_metadata_agent: {get_param: NeutronEnableMetadataAgent} + neutron_enable_ovs_agent: {get_param: NeutronEnableOVSAgent} + neutron_mechanism_drivers: + str_replace: + template: MECHANISMS params: - DRIVERS: - list_join: - - "','" - - {get_param: NeutronTypeDrivers} - neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} + MECHANISMS: {get_param: NeutronMechanismDrivers} neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron_l3_ha: {get_param: NeutronL3HA} neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} neutron_network_vlan_ranges: str_replace: - template: "['RANGES']" + template: RANGES + params: + RANGES: {get_param: NeutronNetworkVLANRanges} + neutron_bridge_mappings: + str_replace: + template: MAPPINGS params: - RANGES: - list_join: - - "','" - - {get_param: NeutronNetworkVLANRanges} - neutron_bridge_mappings: {get_param: NeutronBridgeMappings} + MAPPINGS: {get_param: NeutronBridgeMappings} neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge} neutron_public_interface: {get_param: NeutronPublicInterface} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute} neutron_public_interface_tag: {get_param: NeutronPublicInterfaceTag} - neutron_tenant_network_type: {get_param: NeutronNetworkType} - neutron_tunnel_types: {get_param: NeutronTunnelTypes} neutron_tunnel_id_ranges: str_replace: - template: "['RANGES']" + template: RANGES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronTunnelIdRanges} + RANGES: {get_param: NeutronTunnelIdRanges} neutron_vni_ranges: str_replace: - template: "['RANGES']" + template: RANGES + params: + RANGES: {get_param: NeutronVniRanges} + neutron_tenant_network_types: + str_replace: + template: TYPES + params: + TYPES: {get_param: NeutronNetworkType} + neutron_tunnel_types: + str_replace: + template: TYPES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronVniRanges} + TYPES: {get_param: NeutronTunnelTypes} neutron_password: {get_param: NeutronPassword} neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} neutron_dsn: @@ -965,30 +1027,11 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/ovs_neutron?charset=utf8' - neutron_internal_url: - list_join: - - '' - - - 'http://' - - {get_param: NeutronApiVirtualIP} - - ':9696' - neutron_public_url: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':9696' - neutron_admin_url: - list_join: - - '' - - - 'http://' - - {get_param: NeutronApiVirtualIP} - - ':9696' - neutron_admin_auth_url: - list_join: - - '' - - - 'http://' - - {get_param: KeystoneAdminApiVirtualIP} - - ':35357/v2.0' + neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] } + neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] } + neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] } + neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri ] } + nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] } ceilometer_backend: {get_param: CeilometerBackend} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} ceilometer_password: {get_param: CeilometerPassword} @@ -1006,26 +1049,9 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/ceilometer' - ceilometer_public_url: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':8777' - ceilometer_internal_url: - list_join: - - '' - - - 'http://' - - {get_param: CeilometerApiVirtualIP} - - ':8777' - ceilometer_admin_url: - list_join: - - '' - - - 'http://' - - {get_param: CeilometerApiVirtualIP} - - ':8777' snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} + nova_enable_db_purge: {get_param: NovaEnableDBPurge} nova_password: {get_param: NovaPassword} nova_dsn: list_join: @@ -1035,60 +1061,7 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/nova' - nova_public_url: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':8774/v2/%(tenant_id)s' - nova_internal_url: - list_join: - - '' - - - 'http://' - - {get_param: NovaApiVirtualIP} - - ':8774/v2/%(tenant_id)s' - nova_admin_url: - list_join: - - '' - - - 'http://' - - {get_param: NovaApiVirtualIP} - - ':8774/v2/%(tenant_id)s' - nova_v3_public_url: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':8774/v3' - nova_v3_internal_url: - list_join: - - '' - - - 'http://' - - {get_param: NovaApiVirtualIP} - - ':8774/v3' - nova_v3_admin_url: - list_join: - - '' - - - 'http://' - - {get_param: NovaApiVirtualIP} - - ':8774/v3' - nova_ec2_public_url: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':8773/services/Cloud' - nova_ec2_internal_url: - list_join: - - '' - - - 'http://' - - {get_param: NovaApiVirtualIP} - - ':8773/services/Cloud' - nova_ec2_admin_url: - list_join: - - '' - - - 'http://' - - {get_param: NovaApiVirtualIP} - - ':8773/services/Admin' + instance_name_template: {get_param: InstanceNameTemplate} fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} rabbit_username: {get_param: RabbitUserName} @@ -1105,11 +1078,7 @@ resources: template: "'LIMIT'" params: LIMIT: {get_param: RabbitFDLimit} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} control_virtual_interface: {get_param: ControlVirtualInterface} public_virtual_interface: {get_param: PublicVirtualInterface} swift_hash_suffix: {get_param: SwiftHashSuffix} @@ -1118,42 +1087,6 @@ resources: swift_replicas: {get_param: SwiftReplicas} swift_min_part_hours: {get_param: SwiftMinPartHours} swift_mount_check: {get_param: SwiftMountCheck} - swift_public_url: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':8080/v1/AUTH_%(tenant_id)s' - swift_internal_url: - list_join: - - '' - - - 'http://' - - {get_param: SwiftProxyVirtualIP} - - ':8080/v1/AUTH_%(tenant_id)s' - swift_admin_url: - list_join: - - '' - - - 'http://' - - {get_param: SwiftProxyVirtualIP} - - ':8080' - swift_public_url_s3: - list_join: - - '' - - - 'http://' - - {get_param: PublicVirtualIP} - - ':8080' - swift_internal_url_s3: - list_join: - - '' - - - 'http://' - - {get_param: SwiftProxyVirtualIP} - - ':8080' - swift_admin_url_s3: - list_join: - - '' - - - 'http://' - - {get_param: SwiftProxyVirtualIP} - - ':8080' enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} @@ -1162,39 +1095,8 @@ resources: cinder_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} glance_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} glance_registry_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} - glance_api_servers: - list_join: - - '' - - - {get_param: GlanceProtocol} - - '://' - - {get_param: GlanceApiVirtualIP} - - ':' - - {get_param: GlancePort} + glance_api_servers: { get_param: [EndpointMap, GlanceInternal, uri]} glance_registry_host: {get_param: GlanceRegistryVirtualIP} - glance_public_url: - list_join: - - '' - - - {get_param: GlanceProtocol} - - '://' - - {get_param: PublicVirtualIP} - - ':' - - {get_param: GlancePort} - glance_internal_url: - list_join: - - '' - - - {get_param: GlanceProtocol} - - '://' - - {get_param: GlanceApiVirtualIP} - - ':' - - {get_param: GlancePort} - glance_admin_url: - list_join: - - '' - - - {get_param: GlanceProtocol} - - '://' - - {get_param: GlanceApiVirtualIP} - - ':' - - {get_param: GlancePort} heat_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} keystone_public_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} keystone_admin_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} @@ -1244,6 +1146,8 @@ resources: - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre + - neutron_nuage_data # Optionally provided by ControllerExtraConfigPre + - midonet_data #Optionally provided by AllNodesExtraConfig datafiles: controller_extraconfig: mapped_data: {get_param: ControllerExtraConfig} @@ -1268,6 +1172,7 @@ resources: # Pacemaker enable_fencing: {get_input: enable_fencing} + enable_load_balancer: {get_input: enable_load_balancer} hacluster_pwd: {get_input: pcsd_password} tripleo::fencing::config: {get_input: fencing_config} @@ -1278,18 +1183,11 @@ resources: swift::storage::all::storage_local_net_ip: {get_input: swift_management_network} swift::swift_hash_suffix: {get_input: swift_hash_suffix} swift::proxy::authtoken::admin_password: {get_input: swift_password} + swift::proxy::workers: {get_input: swift_workers} tripleo::ringbuilder::part_power: {get_input: swift_part_power} tripleo::ringbuilder::replicas: {get_input: swift_replicas} tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours} swift_mount_check: {get_input: swift_mount_check} - swift::keystone::auth::public_url: {get_input: swift_public_url } - swift::keystone::auth::internal_url: {get_input: swift_internal_url } - swift::keystone::auth::admin_url: {get_input: swift_admin_url } - swift::keystone::auth::public_url_s3: {get_input: swift_public_url_v3 } - swift::keystone::auth::internal_url_s3: {get_input: swift_internal_url_v3 } - swift::keystone::auth::admin_url_s3: {get_input: swift_admin_url_v3 } - swift::keystone::auth::password: {get_input: swift_password } - swift::keystone::auth::region: {get_input: keystone_region} # NOTE(dprince): build_ring support is currently not wired in. # See: https://review.openstack.org/#/c/109225/ @@ -1317,14 +1215,6 @@ resources: cinder::glance::glance_api_servers: {get_input: glance_api_servers} cinder_backend_config: {get_input: CinderBackendConfig} cinder::db::mysql::password: {get_input: cinder_password} - cinder::keystone::auth::public_url: {get_input: cinder_public_url } - cinder::keystone::auth::internal_url: {get_input: cinder_internal_url } - cinder::keystone::auth::admin_url: {get_input: cinder_admin_url } - cinder::keystone::auth::public_url_v2: {get_input: cinder_public_url_v2 } - cinder::keystone::auth::internal_url_v2: {get_input: cinder_internal_url_v2 } - cinder::keystone::auth::admin_url_v2: {get_input: cinder_admin_url_v2 } - cinder::keystone::auth::password: {get_input: cinder_password } - cinder::keystone::auth::region: {get_input: keystone_region} # Glance glance::api::bind_port: {get_input: glance_port} @@ -1334,6 +1224,7 @@ resources: glance::api::registry_host: {get_input: glance_registry_host} glance::api::keystone_password: {get_input: glance_password} glance::api::debug: {get_input: debug} + glance::api::workers: {get_input: glance_workers} glance_notifier_strategy: {get_input: glance_notifier_strategy} glance_log_file: {get_input: glance_log_file} glance_log_file: {get_input: glance_log_file} @@ -1344,16 +1235,12 @@ resources: glance::registry::auth_uri: {get_input: keystone_auth_uri} glance::registry::identity_uri: {get_input: keystone_identity_uri} glance::registry::debug: {get_input: debug} - glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_address} + glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_uri} + glance::registry::workers: {get_input: glance_workers} glance::backend::swift::swift_store_user: service:glance glance::backend::swift::swift_store_key: {get_input: glance_password} glance_backend: {get_input: glance_backend} glance::db::mysql::password: {get_input: glance_password} - glance::keystone::auth::public_url: {get_input: glance_public_url } - glance::keystone::auth::internal_url: {get_input: glance_internal_url } - glance::keystone::auth::admin_url: {get_input: glance_admin_url } - glance::keystone::auth::password: {get_input: glance_password } - glance::keystone::auth::region: {get_input: keystone_region} glance_file_pcmk_device: {get_input: glance_file_pcmk_device} glance_file_pcmk_fstype: {get_input: glance_file_pcmk_fstype} glance_file_pcmk_manage: {get_input: glance_file_pcmk_manage} @@ -1374,16 +1261,14 @@ resources: heat::identity_uri: {get_input: keystone_identity_uri} heat::keystone_password: {get_input: heat_password} heat::api::bind_host: {get_input: heat_api_network} + heat::api::workers: {get_input: heat_workers} heat::api_cloudwatch::bind_host: {get_input: heat_api_network} + heat::api_cloudwatch::workers: {get_input: heat_workers} heat::api_cfn::bind_host: {get_input: heat_api_network} + heat::api_cfn::workers: {get_input: heat_workers} heat::database_connection: {get_input: heat_dsn} heat::debug: {get_input: debug} heat::db::mysql::password: {get_input: heat_password} - heat::keystone::auth::public_url: {get_input: heat_public_url } - heat::keystone::auth::internal_url: {get_input: heat_internal_url } - heat::keystone::auth::admin_url: {get_input: heat_admin_url } - heat::keystone::auth::password: {get_input: heat_password } - heat::keystone::auth::region: {get_input: keystone_region} # Keystone keystone::admin_token: {get_input: admin_token} @@ -1409,6 +1294,9 @@ resources: keystone::endpoint::internal_url: {get_input: keystone_internal_url} keystone::endpoint::admin_url: {get_input: keystone_identity_uri} keystone::endpoint::region: {get_input: keystone_region} + keystone::admin_workers: {get_input: keystone_workers} + keystone::public_workers: {get_input: keystone_workers} + # MongoDB mongodb::server::bind_ip: {get_input: mongo_db_network} mongodb::server::nojournal: {get_input: mongodb_no_journal} @@ -1427,38 +1315,46 @@ resources: # Neutron neutron::bind_host: {get_input: neutron_api_network} neutron::rabbit_password: {get_input: rabbit_password} - neutron::rabbit_user: {get_input: rabbit_user} + neutron::rabbit_user: {get_input: rabbit_username} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} neutron::rabbit_port: {get_input: rabbit_client_port} neutron::debug: {get_input: debug} neutron::server::auth_uri: {get_input: keystone_auth_uri} neutron::server::identity_uri: {get_input: keystone_identity_uri} neutron::server::database_connection: {get_input: neutron_dsn} + neutron::server::api_workers: {get_input: neutron_workers} neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} + neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} + neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} - neutron_flat_networks: {get_input: neutron_flat_networks} + neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret} neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network} + neutron::agents::metadata::metadata_workers: {get_input: neutron_workers} neutron_agent_mode: {get_input: neutron_agent_mode} neutron_router_distributed: {get_input: neutron_router_distributed} neutron::core_plugin: {get_input: neutron_core_plugin} neutron::service_plugins: {get_input: neutron_service_plugins} + neutron::enable_dhcp_agent: {get_input: neutron_enable_dhcp_agent} + neutron::enable_l3_agent: {get_input: neutron_enable_l3_agent} + neutron::enable_metadata_agent: {get_input: neutron_enable_metadata_agent} + neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent} neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} - neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} + neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} neutron::server::l3_ha: {get_input: neutron_l3_ha} neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron_bridge_mappings: {get_input: neutron_bridge_mappings} + neutron::agents::ml2::ovs:bridge_mappings: {get_input: neutron_bridge_mappings} neutron_public_interface: {get_input: neutron_public_interface} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route} neutron_public_interface_tag: {get_input: neutron_public_interface_tag} - neutron_tenant_network_type: {get_input: neutron_tenant_network_type} - neutron_tunnel_types: {get_input: neutron_tunnel_types} + neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} + neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} neutron::server::auth_password: {get_input: neutron_password} neutron::agents::metadata::auth_password: {get_input: neutron_password} neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options} @@ -1470,6 +1366,10 @@ resources: neutron::keystone::auth::admin_url: {get_input: neutron_admin_url } neutron::keystone::auth::password: {get_input: neutron_password } neutron::keystone::auth::region: {get_input: keystone_region} + neutron::server::notifications::nova_url: {get_input: nova_internal_url} + neutron::server::notifications::auth_url: {get_input: neutron_admin_auth_url} + neutron::server::notifications::tenant_name: 'service' + neutron::server::notifications::password: {get_input: nova_password} # Ceilometer ceilometer_backend: {get_input: ceilometer_backend} @@ -1485,14 +1385,9 @@ resources: ceilometer::api::keystone_auth_uri: {get_input: keystone_auth_uri} ceilometer::api::keystone_identity_uri: {get_input: keystone_identity_uri} ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} - ceilometer::agent::auth::auth_url: {get_input: keystone_auth_address} + ceilometer::agent::auth::auth_url: {get_input: keystone_auth_uri} ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url} ceilometer::db::mysql::password: {get_input: ceilometer_password} - ceilometer::keystone::auth::public_url: {get_input: ceilometer_public_url } - ceilometer::keystone::auth::internal_url: {get_input: ceilometer_internal_url } - ceilometer::keystone::auth::admin_url: {get_input: ceilometer_admin_url } - ceilometer::keystone::auth::password: {get_input: ceilometer_password } - ceilometer::keystone::auth::region: {get_input: keystone_region} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} @@ -1507,25 +1402,19 @@ resources: nova::api::api_bind_address: {get_input: nova_api_network} nova::api::metadata_listen: {get_input: nova_metadata_network} nova::api::admin_password: {get_input: nova_password} + nova::api::osapi_compute_workers: {get_input: nova_workers} + nova::api::ec2_workers: {get_input: nova_workers} + nova::api::metadata_workers: {get_input: nova_workers} nova::database_connection: {get_input: nova_dsn} nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} + nova::api::instance_name_template: {get_input: instance_name_template} nova::network::neutron::neutron_admin_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} nova::vncproxy::host: {get_input: nova_api_network} nova::db::mysql::password: {get_input: nova_password} - nova::keystone::auth::public_url: {get_input: nova_public_url} - nova::keystone::auth::internal_url: {get_input: nova_internal_url} - nova::keystone::auth::admin_url: {get_input: nova_admin_url} - nova::keystone::auth::public_url_v3: {get_input: nova_v3_public_url} - nova::keystone::auth::internal_url_v3: {get_input: nova_v3_internal_url} - nova::keystone::auth::admin_url_v3: {get_input: nova_v3_admin_url} - nova::keystone::auth::ec2_public_url: {get_input: nova_ec2_public_url} - nova::keystone::auth::ec2_internal_url: {get_input: nova_ec2_internal_url} - nova::keystone::auth::ec2_admin_url: {get_input: nova_ec2_admin_url} - nova::keystone::auth::password: {get_input: nova_password } - nova::keystone::auth::region: {get_input: keystone_region} + nova_enable_db_purge: {get_input: nova_enable_db_purge} # Horizon apache::ip: {get_input: horizon_network} @@ -1539,9 +1428,14 @@ resources: rabbitmq::node_ip_address: {get_input: rabbitmq_network} rabbitmq::erlang_cookie: {get_input: rabbit_cookie} rabbitmq::file_limit: {get_input: rabbit_fd_limit} + rabbitmq::default_user: {get_input: rabbit_username} + rabbitmq::default_pass: {get_input: rabbit_password} # Redis redis::bind: {get_input: redis_network} redis_vip: {get_input: redis_vip} + # Firewall + tripleo::firewall::manage_firewall: {get_input: manage_firewall} + tripleo::firewall::purge_firewall_rules: {get_input: purge_firewall_rules} # Misc memcached::listen_ip: {get_input: memcached_network} neutron_public_interface_ip: {get_input: neutron_public_interface_ip} @@ -1551,6 +1445,7 @@ resources: tripleo::loadbalancer::control_virtual_interface: {get_input: control_virtual_interface} tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface} tripleo::loadbalancer::haproxy_log_address: {get_input: haproxy_log_address} + tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} @@ -1564,7 +1459,7 @@ resources: # Hook for site-specific additional pre-deployment config, # applying to all nodes, e.g node registration/unregistration NodeExtraConfig: - depends_on: ControllerExtraConfigPre + depends_on: [ControllerExtraConfigPre, NodeTLSData] type: OS::TripleO::NodeExtraConfig properties: server: {get_resource: Controller} @@ -1600,6 +1495,9 @@ outputs: tenant_ip_address: description: IP address of the server in the tenant network value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} hostname: description: Hostname of the server value: {get_attr: [Controller, name]} @@ -1615,9 +1513,10 @@ outputs: Server's IP address and hostname in the /etc/hosts format value: str_replace: - template: IP HOST.localdomain HOST CLOUDNAME + template: IP HOST.DOMAIN HOST CLOUDNAME params: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} HOST: {get_attr: [Controller, name]} CLOUDNAME: {get_param: CloudName} nova_server_resource: @@ -1644,5 +1543,13 @@ outputs: list_join: - ',' - - {get_attr: [ControllerDeployment, deploy_stdout]} + - {get_attr: [NodeTLSCAData, deploy_stdout]} + - {get_attr: [NodeTLSData, deploy_stdout]} - {get_attr: [ControllerExtraConfigPre, deploy_stdout]} - {get_param: UpdateIdentifier} + tls_key_modulus_md5: + description: MD5 checksum of the TLS Key Modulus + value: {get_attr: [NodeTLSData, key_modulus_md5]} + tls_cert_modulus_md5: + description: MD5 checksum of the TLS Certificate Modulus + value: {get_attr: [NodeTLSData, cert_modulus_md5]} diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml new file mode 100644 index 00000000..26ce7138 --- /dev/null +++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml @@ -0,0 +1,119 @@ +heat_template_version: 2015-10-15 + +description: Configure hieradata for all MidoNet nodes + +parameters: + # Parameters passed from the parent template + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json + + EnableZookeeperOnController: + label: Enable Zookeeper On Controller + description: 'Whether enable Zookeeper cluster on Controller' + type: boolean + default: false + EnableCassandraOnController: + label: Enable Cassandra On Controller + description: 'Whether enable Cassandra cluster on Controller' + type: boolean + default: false + CassandraStoragePort: + label: Cassandra Storage Port + description: 'The Cassandra port for inter-node communication' + type: string + default: '7000' + CassandraSslStoragePort: + label: Cassandra SSL Storage Port + description: 'The SSL port for encrypted communication. Unused unless enabled in encryption_options' + type: string + default: '7001' + CassandraClientPort: + label: Cassandra Client Port + description: 'Native Transport Port' + type: string + default: '9042' + CassandraClientPortThrift: + label: Cassandra Client Thrift Port + description: 'The port for the Thrift RPC service, which is used for client connections' + type: string + default: '9160' + TunnelZoneName: + label: Name of the Tunnelzone + description: 'Name of the tunnel zone used to tunnel packages' + type: string + default: 'tunnelzone_tripleo' + TunnelZoneType: + label: Type of the Tunnel + description: 'Type of the tunnels on the overlay. Choose between `gre` and `vxlan`' + type: string + default: 'vxlan' + +resources: + + NetworkMidoNetConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + midonet_data: + mapped_data: + enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController} + enable_cassandra_on_controller: {get_param: EnableCassandraOnController} + midonet_tunnelzone_name: {get_param: TunnelZoneName} + midonet_tunnelzone_type: {get_param: TunnelZoneType} + midonet_libvirt_qemu_data: | + user = "root" + group = "root" + cgroup_device_acl = [ + "/dev/null", "/dev/full", "/dev/zero", + "/dev/random", "/dev/urandom", + "/dev/ptmx", "/dev/kvm", "/dev/kqemu", + "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", + "/dev/net/tun" + ] + tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort} + tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} + tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} + tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} + tripleo::loadbalancer::midonet_api: true + # Missed Neutron Puppet data + neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' + neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' + neutron::plugins::midonet::midonet_api_port: 8081 + neutron::params::midonet_server_package: 'python-networking-midonet' + + # Make sure the l3 agent does not run + l3_agent_service: false + neutron::agents::l3::manage_service: false + neutron::agents::l3::enabled: false + + + NetworkMidonetDeploymentControllers: + type: OS::Heat::StructuredDeploymentGroup + properties: + config: {get_resource: NetworkMidoNetConfig} + servers: {get_param: controller_servers} + + NetworkMidonetDeploymentComputes: + type: OS::Heat::StructuredDeploymentGroup + properties: + config: {get_resource: NetworkMidoNetConfig} + servers: {get_param: compute_servers} + +outputs: + config_identifier: + value: + list_join: + - ' ' + - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]} + - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]} diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml index 62907104..7cefc24b 100644 --- a/puppet/extraconfig/ceph/ceph-external-config.yaml +++ b/puppet/extraconfig/ceph/ceph-external-config.yaml @@ -29,6 +29,18 @@ parameters: type: comma_delimited_list ceph_mon_ips: type: comma_delimited_list + NovaRbdPoolName: + default: vms + type: string + CinderRbdPoolName: + default: volumes + type: string + GlanceRbdPoolName: + default: images + type: string + CephClientUserName: + default: openstack + type: string resources: CephClusterConfigImpl: @@ -47,16 +59,34 @@ resources: ceph::profile::params::client_keys: str_replace: template: "{ - client.openstack: { + client.CLIENT_USER: { secret: 'CLIENT_KEY', mode: '0644', cap_mon: 'allow r', - cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=images' + cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL' } }" params: + CLIENT_USER: {get_param: CephClientUserName} CLIENT_KEY: {get_param: ceph_client_key} - + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} + cinder_rbd_pool_name: {get_param: CinderRbdPoolName} + glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} + glance::backend::rbd::rbd_store_pool: {get_param: CephClientUserName} + nova::compute::rbd::rbd_keyring: + list_join: + - '.' + - - 'client' + - {get_param: CephClientUserName} + ceph_client_user_name: {get_param: CephClientUserName} + ceph_pools: + - {get_param: CinderRbdPoolName} + - {get_param: NovaRbdPoolName} + - {get_param: GlanceRbdPoolName} outputs: config_id: diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml new file mode 100644 index 00000000..96368e37 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml @@ -0,0 +1,92 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata for Nuage configuration on the Compute + +parameters: + server: + description: ID of the compute node to apply this config to + type: string + + NuageActiveController: + description: IP address of the Active Virtualized Services Controller (VSC) + type: string + NuageStandbyController: + description: IP address of the Standby Virtualized Services Controller (VSC) + type: string + NuageMetadataPort: + description: TCP Port to listen for metadata server requests + type: string + default: '9697' + NuageNovaMetadataPort: + description: TCP Port used by Nova metadata server + type: string + default: '8775' + NuageMetadataProxySharedSecret: + description: Shared secret to sign the instance-id request + type: string + NuageNovaClientVersion: + description: Client Version Nova + type: string + default: '2' + NuageNovaOsUsername: + description: Nova username in keystone_authtoken + type: string + default: 'nova' + NuageMetadataAgentStartWithOvs: + description: Set to True if nuage-metadata-agent needs to be started with nuage-openvswitch-switch + type: string + default: 'True' + NuageNovaApiEndpoint: + description: One of publicURL, internalURL, adminURL in "keystone endpoint-list" + type: string + default: 'publicURL' + NuageNovaRegionName: + description: Region name in "keystone endpoint-list" + type: string + default: 'regionOne' + +# Declaration of resources for the template. +resources: + NovaNuageConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + nova_nuage_data: + mapped_data: + nuage::vrs::active_controller: {get_input: ActiveController} + nuage::vrs::standby_controller: {get_input: StandbyController} + nuage::metadataagent::metadata_port: {get_input: MetadataPort} + nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort} + nuage::metadataagent::metadata_secret: {get_input: SharedSecret} + nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion} + nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername} + nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs} + nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType} + nuage::metadataagent::nova_region_name: {get_input: NovaRegionName} + + NovaNuageDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: NovaNuageConfig} + server: {get_param: server} + input_values: + ActiveController: {get_param: NuageActiveController} + StandbyController: {get_param: NuageStandbyController} + MetadataPort: {get_param: NuageMetadataPort} + NovaMetadataPort: {get_param: NuageNovaMetadataPort} + SharedSecret: {get_param: NuageMetadataProxySharedSecret} + NovaClientVersion: {get_param: NuageNovaClientVersion} + NovaOsUsername: {get_param: NuageNovaOsUsername} + MetadataAgentStartWithOvs: {get_param: NuageMetadataAgentStartWithOvs} + NovaApiEndpointType: {get_param: NuageNovaApiEndpoint} + NovaRegionName: {get_param: NuageNovaRegionName} + +# Specify output parameters that will be available +# after the template is instantiated. +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [NovaNuageDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml index 5985116b..6730ddf1 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml @@ -69,6 +69,9 @@ parameters: N1000vExistingBridge: type: boolean default: true + N1000vVSMHostMgmtIntfVlan: + type: number + default: 0 #Plugin Parameters N1000vVSMUser: type: string @@ -125,6 +128,7 @@ resources: n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask} n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip} n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip} + n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan} # Cisco N1KV driver Parameters neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username} @@ -159,6 +163,7 @@ resources: n1kv_vsm_password: {get_param: N1000vVSMPassword} n1kv_vsm_mgmt_netmask: {get_param: N1000vMgmtNetmask} n1kv_vsm_gateway_ip: {get_param: N1000vMgmtGatewayIP} + n1kv_phy_brige_vlan: {get_param: N1000vVSMHostMgmtIntfVlan} n1kv_vsm_pacemaker_ctrl: {get_param: N1000vPacemakerControl} n1kv_vsm_existing_br: {get_param: N1000vExistingBridge} n1kv_vsm_username: {get_param: N1000vVSMUser} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml new file mode 100644 index 00000000..8378d2fc --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml @@ -0,0 +1,90 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata for Nuage configuration on the Controller + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + + # Config specific parameters, to be provided via parameter_defaults + NeutronNuageOSControllerIp: + description: IP address of the OpenStack Controller + type: string + + NeutronNuageNetPartitionName: + description: Specifies the title that you will see on the VSD + type: string + default: 'default_name' + + NeutronNuageVSDIp: + description: IP address and port of the Virtual Services Directory + type: string + + NeutronNuageVSDUsername: + description: Username to be used to log into VSD + type: string + + NeutronNuageVSDPassword: + description: Password to be used to log into VSD + type: string + + NeutronNuageVSDOrganization: + description: Organization parameter required to log into VSD + type: string + default: 'organization' + + NeutronNuageBaseURIVersion: + description: URI version to be used based on the VSD release + type: string + default: 'default_uri_version' + + NeutronNuageCMSId: + description: Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD + type: string + + UseForwardedFor: + description: Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. + type: boolean + default: false + +resources: + NeutronNuageConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + neutron_nuage_data: + mapped_data: + neutron::plugins::nuage::nuage_oscontroller_ip: {get_input: NuageOSControllerIp} + neutron::plugins::nuage::nuage_net_partition_name: {get_input: NuageNetPartitionName} + neutron::plugins::nuage::nuage_vsd_ip: {get_input: NuageVSDIp} + neutron::plugins::nuage::nuage_vsd_username: {get_input: NuageVSDUsername} + neutron::plugins::nuage::nuage_vsd_password: {get_input: NuageVSDPassword} + neutron::plugins::nuage::nuage_vsd_organization: {get_input: NuageVSDOrganization} + neutron::plugins::nuage::nuage_base_uri_version: {get_input: NuageBaseURIVersion} + neutron::plugins::nuage::nuage_cms_id: {get_input: NuageCMSId} + nova::api::use_forwarded_for: {get_input: NovaUseForwardedFor} + + NeutronNuageDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: NeutronNuageConfig} + server: {get_param: server} + input_values: + NuageOSControllerIp: {get_param: NeutronNuageOSControllerIp} + NuageNetPartitionName: {get_param: NeutronNuageNetPartitionName} + NuageVSDIp: {get_param: NeutronNuageVSDIp} + NuageVSDUsername: {get_param: NeutronNuageVSDUsername} + NuageVSDPassword: {get_param: NeutronNuageVSDPassword} + NuageVSDOrganization: {get_param: NeutronNuageVSDOrganization} + NuageBaseURIVersion: {get_param: NeutronNuageBaseURIVersion} + NuageCMSId: {get_param: NeutronNuageCMSId} + NovaUseForwardedFor: {get_param: UseForwardedFor} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [NeutronNuageDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/tls/ca-inject.yaml b/puppet/extraconfig/tls/ca-inject.yaml new file mode 100644 index 00000000..7e34f071 --- /dev/null +++ b/puppet/extraconfig/tls/ca-inject.yaml @@ -0,0 +1,66 @@ +heat_template_version: 2015-04-30 + +description: > + This is a template which will inject the trusted anchor. + +parameters: + # Can be overriden via parameter_defaults in the environment + SSLRootCertificate: + description: > + The content of a CA's SSL certificate file in PEM format. + This is evaluated on the client side. + type: string + SSLRootCertificatePath: + default: '/etc/pki/ca-trust/source/anchors/ca.crt.pem' + description: > + The filepath of the root certificate as it will be stored in the nodes. + Note that the path has to be one that can be picked up by the update + trust anchor command. e.g. in RHEL it would be + /etc/pki/ca-trust/source/anchors/ca.crt.pem + type: string + UpdateTrustAnchorsCommand: + default: update-ca-trust extract + description: > + command that will be executed to update the trust anchors. + type: string + + # Passed in by controller.yaml + server: + description: ID of the node to apply this config to + type: string + +resources: + CAConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: cacert_path + - name: cacert_content + - name: update_anchor_command + outputs: + - name: root_cert_md5sum + config: | + #!/bin/sh + cat > ${cacert_path} << EOF + ${cacert_content} + EOF + chmod 0440 ${cacert_path} + chown root:root ${cacert_path} + ${update_anchor_command} + md5sum ${cacert_path} > ${heat_outputs_path}.root_cert_md5sum + + CADeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: CAConfig} + server: {get_param: server} + input_values: + cacert_path: {get_param: SSLRootCertificatePath} + cacert_content: {get_param: SSLRootCertificate} + update_anchor_command: {get_param: UpdateTrustAnchorsCommand} + +outputs: + deploy_stdout: + description: Deployment reference + value: {get_attr: [CADeployment, root_cert_md5sum]} diff --git a/puppet/extraconfig/tls/no-ca.yaml b/puppet/extraconfig/tls/no-ca.yaml new file mode 100644 index 00000000..5862a85c --- /dev/null +++ b/puppet/extraconfig/tls/no-ca.yaml @@ -0,0 +1,17 @@ +heat_template_version: 2015-04-30 + +description: > + This is a default no-op template which can be passed to the + OS::Nova::Server resources. This template can be replaced with + a different implementation via the resource registry, such that + deployers may customize their configuration. + +parameters: + server: # Here for compatibility with controller.yaml + description: ID of the controller node to apply this config to + type: string + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: 'Root CA cert injection not enabled.' diff --git a/puppet/extraconfig/tls/no-tls.yaml b/puppet/extraconfig/tls/no-tls.yaml new file mode 100644 index 00000000..a2b5c569 --- /dev/null +++ b/puppet/extraconfig/tls/no-tls.yaml @@ -0,0 +1,34 @@ +heat_template_version: 2015-04-30 + +description: > + This is a default no-op template. This defines the parameters that + need to be passed in order to have TLS enabled in the controller + nodes. This template can be replaced with a different + implementation via the resource registry, such that deployers + may customize their configuration. + +parameters: + DeployedSSLCertificatePath: + default: '' + description: > + The filepath of the certificate as it will be stored in the controller. + type: string + NodeIndex: # Here for compatibility with puppet/controller.yaml + default: 0 + type: number + server: # Here for compatibility with puppet/controller.yaml + description: ID of the controller node to apply this config to + type: string + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: 'TLS not enabled.' + deployed_ssl_certificate_path: + value: '' + key_modulus_md5: + description: Key SSL Modulus + value: '' + cert_modulus_md5: + description: Certificate SSL Modulus + value: '' diff --git a/puppet/extraconfig/tls/tls-cert-inject.yaml b/puppet/extraconfig/tls/tls-cert-inject.yaml new file mode 100644 index 00000000..ce524ba9 --- /dev/null +++ b/puppet/extraconfig/tls/tls-cert-inject.yaml @@ -0,0 +1,93 @@ +heat_template_version: 2015-04-30 + +description: > + This is a template which will build the TLS Certificates necessary + for the load balancer using the given parameters. + +parameters: + # Can be overriden via parameter_defaults in the environment + SSLCertificate: + description: > + The content of the SSL certificate (without Key) in PEM format. + type: string + SSLIntermediateCertificate: + default: '' + description: > + The content of an SSL intermediate CA certificate in PEM format. + type: string + SSLKey: + description: > + The content of the SSL Key in PEM format. + type: string + hidden: true + + # Can be overriden by parameter_defaults if the user wants to try deploying + # this in a distro that doesn't support this path. + DeployedSSLCertificatePath: + default: '/etc/pki/tls/private/overcloud_endpoint.pem' + description: > + The filepath of the certificate as it will be stored in the controller. + type: string + + # Passed in by the controller + NodeIndex: + default: 0 + type: number + server: + description: ID of the controller node to apply this config to + type: string + +resources: + ControllerTLSConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: cert_path + - name: cert_chain_content + outputs: + - name: chain_md5sum + - name: cert_modulus + - name: key_modulus + config: | + #!/bin/sh + cat > ${cert_path} << EOF + ${cert_chain_content} + EOF + chmod 0440 ${cert_path} + chown root:haproxy ${cert_path} + md5sum ${cert_path} > ${heat_outputs_path}.chain_md5sum + openssl x509 -noout -modulus -in ${cert_path} \ + | openssl md5 | cut -c 10- \ + > ${heat_outputs_path}.cert_modulus + openssl rsa -noout -modulus -in ${cert_path} \ + | openssl md5 | cut -c 10- \ + > ${heat_outputs_path}.key_modulus + + ControllerTLSDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: ControllerTLSConfig} + server: {get_param: server} + input_values: + cert_path: {get_param: DeployedSSLCertificatePath} + cert_chain_content: + list_join: + - '' + - - {get_param: SSLCertificate} + - {get_param: SSLIntermediateCertificate} + - {get_param: SSLKey} + +outputs: + deploy_stdout: + description: Deployment reference + value: {get_attr: [ControllerTLSDeployment, chain_md5sum]} + deployed_ssl_certificate_path: + description: The location that the TLS certificate was deployed to. + value: {get_param: DeployedSSLCertificatePath} + key_modulus_md5: + description: MD5 checksum of the Key SSL Modulus + value: {get_attr: [ControllerTLSDeployment, key_modulus]} + cert_modulus_md5: + description: MD5 checksum of the Certificate SSL Modulus + value: {get_attr: [ControllerTLSDeployment, cert_modulus]} diff --git a/puppet/hieradata/ceph.yaml b/puppet/hieradata/ceph.yaml index 18a48622..1e480e60 100644 --- a/puppet/hieradata/ceph.yaml +++ b/puppet/hieradata/ceph.yaml @@ -1,17 +1,12 @@ ceph::profile::params::osd_journal_size: 1024 -ceph::profile::params::osd_pool_default_pg_num: 128 -ceph::profile::params::osd_pool_default_pgp_num: 128 +ceph::profile::params::osd_pool_default_pg_num: 32 +ceph::profile::params::osd_pool_default_pgp_num: 32 ceph::profile::params::osd_pool_default_size: 3 ceph::profile::params::osd_pool_default_min_size: 1 ceph::profile::params::osds: {/srv/data: {}} ceph::profile::params::manage_repo: false ceph::profile::params::authentication_type: cephx -ceph_pools: - - volumes - - vms - - images - ceph_classes: [] ceph_osd_selinux_permissive: true diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index 030f661d..b4b51abf 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -7,10 +7,9 @@ ceilometer::agent::auth::auth_region: 'regionOne' # changes in the tripleo-incubator keystone role setup ceilometer::agent::auth::auth_tenant_name: 'admin' +nova::api::admin_tenant_name: 'service' nova::network::neutron::neutron_admin_tenant_name: 'service' nova::network::neutron::neutron_admin_username: 'neutron' -nova::network::neutron::vif_plugging_is_fatal: false -nova::network::neutron::vif_plugging_timeout: 30 nova::network::neutron::dhcp_domain: '' neutron::allow_overlapping_ips: true diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index 16aeb98c..fa8dcc81 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -10,11 +10,14 @@ nova::compute::vnc_enabled: true nova::compute::libvirt::vncserver_listen: '0.0.0.0' nova::compute::libvirt::migration_support: true -nova::compute::rbd::libvirt_rbd_user: 'openstack' -nova::compute::rbd::rbd_keyring: 'client.openstack' -nova::compute::rbd::libvirt_images_rbd_pool: 'vms' nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" +# Changing the default from 512MB. The current templates can not deploy +# overclouds with swap. On an idle compute node, we see ~1024MB of RAM +# used. 2048 is suggested to account for other possible operations for +# example openvswitch. +nova::compute::reserved_host_memory: 2048 + ceilometer::agent::auth::auth_tenant_name: 'service' ceilometer::agent::auth::auth_endpoint_type: 'internalURL' diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 07bfe543..c9f3a417 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -30,7 +30,6 @@ redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}" redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh' # service tenant -nova::api::admin_tenant_name: 'service' glance::api::keystone_tenant: 'service' glance::registry::keystone_tenant: 'service' neutron::server::auth_tenant: 'service' @@ -39,13 +38,6 @@ cinder::api::keystone_tenant: 'service' swift::proxy::authtoken::admin_tenant_name: 'service' ceilometer::api::keystone_tenant: 'service' heat::keystone_tenant: 'service' -glance::keystone::auth::tenant: 'service' -nova::keystone::auth::tenant: 'service' -neutron::keystone::auth::tenant: 'service' -cinder::keystone::auth::tenant: 'service' -swift::keystone::auth::tenant: 'service' -ceilometer::keystone::auth::tenant: 'service' -heat::keystone::auth::tenant: 'service' # keystone keystone::cron::token_flush::maxdelay: 3600 @@ -67,13 +59,10 @@ swift::proxy::pipeline: - 'proxy-server' swift::proxy::account_autocreate: true -swift::keystone::auth::configure_s3_endpoint: false -swift::keystone::auth::operator_roles: - - admin - - swiftoperator # glance glance::api::pipeline: 'keystone' +glance::api::show_image_direct_url: true glance::registry::pipeline: 'keystone' glance::backend::swift::swift_store_create_container_on_put: true glance::backend::rbd::rbd_store_user: 'openstack' @@ -88,7 +77,8 @@ nova::notify_on_state_change: 'vm_and_task_state' nova::api::default_floating_pool: 'public' nova::api::osapi_v3: true nova::scheduler::filter::ram_allocation_ratio: '1.0' -nova::keystone::auth::configure_ec2_endpoint: false +nova::cron::archive_deleted_rows::hour: '*/12' +nova::cron::archive_deleted_rows::destination: '/dev/null' # ceilometer ceilometer::agent::auth::auth_endpoint_type: 'internalURL' @@ -138,3 +128,109 @@ tripleo::loadbalancer::heat_cfn: true tripleo::loadbalancer::horizon: true controller_classes: [] +# firewall +tripleo::firewall::firewall_rules: + '101 mongodb_config': + port: 27019 + '102 mongodb_sharding': + port: 27018 + '103 mongod': + port: 27017 + '104 mysql galera': + port: + - 873 + - 3306 + - 4444 + - 4567 + - 4568 + - 9200 + '105 ntp': + port: 123 + proto: udp + '106 vrrp': + proto: vrrp + '107 haproxy stats': + port: 1993 + '108 redis': + port: + - 6379 + - 26379 + '109 rabbitmq': + port: + - 5672 + - 35672 + '110 ceph': + port: + - 6789 + - '6800-6810' + '111 keystone': + port: + - 5000 + - 13000 + - 35357 + - 13357 + '112 glance': + port: + - 9292 + - 9191 + - 13292 + '113 nova': + port: + - 6080 + - 13080 + - 8773 + - 3773 + - 8774 + - 13774 + - 8775 + '114 neutron server': + port: + - 9696 + - 13696 + '115 neutron dhcp input': + proto: 'udp' + port: 67 + '116 neutron dhcp output': + proto: 'udp' + chain: 'OUTPUT' + port: 68 + '118 neutron vxlan networks': + proto: 'udp' + port: 4789 + '119 cinder': + port: + - 8776 + - 13776 + '120 iscsi initiator': + port: 3260 + '121 memcached': + port: 11211 + '122 swift proxy': + port: + - 8080 + - 13808 + '123 swift storage': + port: + - 873 + - 6000 + - 6001 + - 6002 + '124 ceilometer': + port: + - 8777 + - 13777 + '125 heat': + port: + - 8000 + - 13800 + - 8003 + - 13003 + - 8004 + - 13004 + '126 horizon': + port: + - 80 + - 443 + '127 snmp': + port: 161 + proto: 'udp' diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index a88ca2d9..7f8970cc 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -13,7 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages +include ::tripleo::firewall create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -25,13 +26,13 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) { exec { 'set selinux to permissive on boot': command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } exec { 'set selinux to permissive': - command => "setenforce 0", + command => 'setenforce 0', onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } -> Class['ceph::profile::osd'] } diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 2150bab8..79a6abbb 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -13,7 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages +include ::tripleo::firewall create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -24,14 +25,14 @@ if count(hiera('ntp::servers')) > 0 { file { ['/etc/libvirt/qemu/networks/autostart/default.xml', '/etc/libvirt/qemu/networks/default.xml']: ensure => absent, - before => Service['libvirt'] + before => Service['libvirt'], } # in case libvirt has been already running before the Puppet run, make # sure the default network is destroyed exec { 'libvirt-default-net-destroy': command => '/usr/bin/virsh net-destroy default', - onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', - before => Service['libvirt'], + onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', + before => Service['libvirt'], } include ::nova @@ -49,16 +50,17 @@ if $rbd_ephemeral_storage or $rbd_persistent_storage { include ::ceph::profile::client $client_keys = hiera('ceph::profile::params::client_keys') + $client_user = join(['client.', hiera('ceph_client_user_name')]) class { '::nova::compute::rbd': - libvirt_rbd_secret_key => $client_keys['client.openstack']['secret'], + libvirt_rbd_secret_key => $client_keys[$client_user]['secret'], } } if hiera('cinder_enable_nfs_backend', false) { - if ($::selinux != "false") { + if str2bool($::selinux) { selboolean { 'virt_use_nfs': - value => on, - persistent => true, + value => on, + persistent => true, } -> Package['nfs-utils'] } @@ -66,23 +68,52 @@ if hiera('cinder_enable_nfs_backend', false) { } include ::nova::compute::libvirt +if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + file {'/etc/libvirt/qemu.conf': + ensure => present, + content => hiera('midonet_libvirt_qemu_data') + } +} include ::nova::network::neutron include ::neutron -class { 'neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), - tenant_network_types => [hiera('neutron_tenant_network_type')], +# If the value of core plugin is set to 'nuage', +# include nuage agent, +# If the value of core plugin is set to 'midonet', +# include midonet agent, +# else use the default value of 'ml2' +if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { + include ::nuage::vrs + include ::nova::compute::neutron + + class { '::nuage::metadataagent': + nova_os_tenant_name => hiera('nova::api::admin_tenant_name'), + nova_os_password => hiera('nova_password'), + nova_metadata_ip => hiera('nova_metadata_node_ips'), + nova_auth_ip => hiera('keystone_public_api_virtual_ip'), + } } +elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') -class { 'neutron::agents::ml2::ovs': - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), + class {'::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips + } } +else { + + include ::neutron::plugins::ml2 + include ::neutron::agents::ml2::ovs -if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { - class { 'neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), + } } } @@ -97,7 +128,7 @@ snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } -class { 'snmp': +class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 280cc344..913bcb63 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -13,7 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages +include ::tripleo::firewall + +$enable_load_balancer = hiera('enable_load_balancer', true) if hiera('step') >= 1 { @@ -21,9 +24,11 @@ if hiera('step') >= 1 { $controller_node_ips = split(hiera('controller_node_ips'), ',') - class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - manage_vip => true, + if $enable_load_balancer { + class { '::tripleo::loadbalancer' : + controller_hosts => $controller_node_ips, + manage_vip => true, + } } } @@ -70,18 +75,18 @@ if hiera('step') >= 2 { include ::tripleo::redis_notification } - if str2bool(hiera('enable_galera', 'true')) { + if str2bool(hiera('enable_galera', true)) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' } else { $mysql_config_file = '/etc/my.cnf.d/server.cnf' } # TODO Galara - class { 'mysql::server': - config_file => $mysql_config_file, - override_options => { + class { '::mysql::server': + config_file => $mysql_config_file, + override_options => { 'mysqld' => { - 'bind-address' => hiera('mysql_bind_host'), - 'max_connections' => hiera('mysql_max_connections'), + 'bind-address' => hiera('mysql_bind_host'), + 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', }, }, @@ -126,31 +131,31 @@ if hiera('step') >= 2 { $enable_ceph = hiera('ceph_storage_count', 0) > 0 if $enable_ceph { - class { 'ceph::profile::params': - mon_initial_members => downcase(hiera('ceph_mon_initial_members')) + class { '::ceph::profile::params': + mon_initial_members => downcase(hiera('ceph_mon_initial_members')), } include ::ceph::profile::mon } - if str2bool(hiera('enable_ceph_storage', 'false')) { + if str2bool(hiera('enable_ceph_storage', false)) { if str2bool(hiera('ceph_osd_selinux_permissive', true)) { exec { 'set selinux to permissive on boot': command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } exec { 'set selinux to permissive': - command => "setenforce 0", + command => 'setenforce 0', onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } -> Class['ceph::profile::osd'] } include ::ceph::profile::osd } - if str2bool(hiera('enable_external_ceph', 'false')) { + if str2bool(hiera('enable_external_ceph', false)) { include ::ceph::profile::client } @@ -196,9 +201,9 @@ if hiera('step') >= 3 { $glance_backend = downcase(hiera('glance_backend', 'swift')) case $glance_backend { - swift: { $backend_store = 'glance.store.swift.Store' } - file: { $backend_store = 'glance.store.filesystem.Store' } - rbd: { $backend_store = 'glance.store.rbd.Store' } + 'swift': { $backend_store = 'glance.store.swift.Store' } + 'file': { $backend_store = 'glance.store.filesystem.Store' } + 'rbd': { $backend_store = 'glance.store.rbd.Store' } default: { fail('Unrecognized glance_backend parameter.') } } $http_store = ['glance.store.http.Store'] @@ -206,8 +211,8 @@ if hiera('step') >= 3 { # TODO: notifications, scrubber, etc. include ::glance - class { 'glance::api': - known_stores => $glance_store + class { '::glance::api': + known_stores => $glance_store, } include ::glance::registry include join(['::glance::backend::', $glance_backend]) @@ -225,73 +230,136 @@ if hiera('step') >= 3 { include ::nova::scheduler include ::nova::scheduler::filter - include ::neutron - include ::neutron::server - include ::neutron::agents::l3 - include ::neutron::agents::dhcp - include ::neutron::agents::metadata + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - file { '/etc/neutron/dnsmasq-neutron.conf': - content => hiera('neutron_dnsmasq_options'), - owner => 'neutron', - group => 'neutron', - notify => Service['neutron-dhcp-service'], - require => Package['neutron'], - } + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') - class { 'neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), - tenant_network_types => [hiera('neutron_tenant_network_type')], - mechanism_drivers => [hiera('neutron_mechanism_drivers')], - } - class { 'neutron::agents::ml2::ovs': - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), - } - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { - include neutron::plugins::ml2::cisco::nexus1000v + # Run zookeeper in the controller if configured + if hiera('enable_zookeeper_on_controller') { + class {'::tripleo::cluster::zookeeper': + zookeeper_server_ips => $zookeeper_node_ips, + zookeeper_client_ip => $ipaddress, + zookeeper_hostnames => hiera('controller_node_names') + } + } - class { 'neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + # Run cassandra in the controller if configured + if hiera('enable_cassandra_on_controller') { + class {'::tripleo::cluster::cassandra': + cassandra_servers => $cassandra_node_ips, + cassandra_ip => $ipaddress + } } - class { 'n1k_vsm': - n1kv_source => hiera('n1kv_vsm_source', undef), - n1kv_version => hiera('n1kv_vsm_version', undef), - pacemaker_control => false, + class {'::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips } - } - if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::ucsm - } - if 'cisco_nexus' in hiera('neutron_mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus - include ::neutron::plugins::ml2::cisco::type_nexus_vxlan - } + class {'::tripleo::network::midonet::api': + zookeeper_servers => $zookeeper_node_ips, + vip => $ipaddress, + keystone_ip => $ipaddress, + keystone_admin_token => hiera('keystone::admin_token'), + bind_address => $ipaddress, + admin_password => hiera('admin_password') + } + + # TODO: find a way to get an empty list from hiera + class {'::neutron': + service_plugins => [] + } - if hiera('neutron_enable_bigswitch_ml2', false) { - include neutron::plugins::ml2::bigswitch::restproxy - } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + else { + + # ML2 plugin + include ::neutron } - Service['neutron-server'] -> Service['neutron-dhcp-service'] - Service['neutron-server'] -> Service['neutron-l3'] - Service['neutron-server'] -> Service['neutron-ovs-agent-service'] - Service['neutron-server'] -> Service['neutron-metadata'] + include ::neutron::server + include ::neutron::server::notifications + + # If the value of core plugin is set to 'nuage', + # include nuage core plugin, and it does not + # need the l3, dhcp and metadata agents + if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { + include ::neutron::plugins::nuage + } else { + include ::neutron::agents::l3 + include ::neutron::agents::dhcp + include ::neutron::agents::metadata + + file { '/etc/neutron/dnsmasq-neutron.conf': + content => hiera('neutron_dnsmasq_options'), + owner => 'neutron', + group => 'neutron', + notify => Service['neutron-dhcp-service'], + require => Package['neutron'], + } + + # If the value of core plugin is set to 'midonet', + # skip all the ML2 configuration + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + + class {'::neutron::plugins::midonet': + midonet_api_ip => $ipaddress, + keystone_tenant => hiera('neutron::server::auth_tenant'), + keystone_password => hiera('neutron::server::auth_password') + } + } else { + + include ::neutron::plugins::ml2 + include ::neutron::agents::ml2::ovs + + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus1000v + + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), + } + + class { '::n1k_vsm': + n1kv_source => hiera('n1kv_vsm_source', undef), + n1kv_version => hiera('n1kv_vsm_version', undef), + pacemaker_control => false, + } + } + + if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::ucsm + } + if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus + include ::neutron::plugins::ml2::cisco::type_nexus_vxlan + } + + if hiera('neutron_enable_bigswitch_ml2', false) { + include ::neutron::plugins::ml2::bigswitch::restproxy + } + neutron_l3_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + } + neutron_dhcp_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + } + Service['neutron-server'] -> Service['neutron-ovs-agent-service'] + } + + Service['neutron-server'] -> Service['neutron-dhcp-service'] + Service['neutron-server'] -> Service['neutron-l3'] + Service['neutron-server'] -> Service['neutron-metadata'] + } include ::cinder include ::cinder::api include ::cinder::glance include ::cinder::scheduler include ::cinder::volume - class {'cinder::setup_test_volume': + class { '::cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -307,16 +375,14 @@ if hiera('step') >= 3 { if $enable_ceph { - Ceph_pool { + $ceph_pools = hiera('ceph_pools') + ceph::pool { $ceph_pools : pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), size => hiera('ceph::profile::params::osd_pool_default_size'), } - $ceph_pools = hiera('ceph_pools') - ceph::pool { $ceph_pools : } - - $cinder_pool_requires = [Ceph::Pool['volumes']] + $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]] } else { $cinder_pool_requires = [] @@ -326,8 +392,8 @@ if hiera('step') >= 3 { $cinder_rbd_backend = 'tripleo_ceph' cinder::backend::rbd { $cinder_rbd_backend : - rbd_pool => 'volumes', - rbd_user => 'openstack', + rbd_pool => hiera('cinder_rbd_pool_name'), + rbd_user => hiera('ceph_client_user_name'), rbd_secret_uuid => hiera('ceph::profile::params::fsid'), require => $cinder_pool_requires, } @@ -392,18 +458,18 @@ if hiera('step') >= 3 { if hiera('cinder_enable_nfs_backend', false) { $cinder_nfs_backend = 'tripleo_nfs' - if ($::selinux != "false") { + if str2bool($::selinux) { selboolean { 'virt_use_nfs': - value => on, - persistent => true, + value => on, + persistent => true, } -> Package['nfs-utils'] } package {'nfs-utils': } -> cinder::backend::nfs { $cinder_nfs_backend : - nfs_servers => hiera('cinder_nfs_servers'), - nfs_mount_options => hiera('cinder_nfs_mount_options'), - nfs_shares_config => '/etc/cinder/shares-nfs.conf', + nfs_servers => hiera('cinder_nfs_servers'), + nfs_mount_options => hiera('cinder_nfs_mount_options',''), + nfs_shares_config => '/etc/cinder/shares-nfs.conf', } } @@ -427,9 +493,9 @@ if hiera('step') >= 3 { include ::swift::proxy::formpost # swift storage - if str2bool(hiera('enable_swift_storage', 'true')) { - class {'swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')) + if str2bool(hiera('enable_swift_storage', true)) { + class { '::swift::storage::all': + mount_check => str2bool(hiera('swift_mount_check')), } if(!defined(File['/srv/node'])) { file { '/srv/node': @@ -459,11 +525,9 @@ if hiera('step') >= 3 { include ::ceilometer::api include ::ceilometer::agent::notification include ::ceilometer::agent::central - include ::ceilometer::alarm::notifier - include ::ceilometer::alarm::evaluator include ::ceilometer::expirer include ::ceilometer::collector - include ceilometer::agent::auth + include ::ceilometer::agent::auth class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, } @@ -478,15 +542,16 @@ if hiera('step') >= 3 { include ::heat::engine # Horizon - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { $_profile_support = 'cisco' } else { $_profile_support = 'None' } $neutron_options = {'profile_support' => $_profile_support } - class { 'horizon': - cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), - neutron_options => $neutron_options, + + class { '::horizon': + cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), + neutron_options => $neutron_options, } $snmpd_user = hiera('snmpd_readonly_user_name') @@ -494,7 +559,7 @@ if hiera('step') >= 3 { authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } - class { 'snmp': + class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } @@ -504,16 +569,12 @@ if hiera('step') >= 3 { } #END STEP 3 if hiera('step') >= 4 { - include ::keystone::cron::token_flush - - include ::ceilometer::keystone::auth - include ::cinder::keystone::auth - include ::glance::keystone::auth - include ::heat::keystone::auth - include ::neutron::keystone::auth - include ::nova::keystone::auth - include ::swift::keystone::auth + $nova_enable_db_purge = hiera('nova_enable_db_purge', true) + include ::keystone::cron::token_flush + if $nova_enable_db_purge { + include ::nova::cron::archive_deleted_rows + } } #END STEP 4 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')]) diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 5729ba00..e6ee85ae 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -18,7 +18,8 @@ Pcmk_resource <| |> { try_sleep => 3, } -include tripleo::packages +include ::tripleo::packages +include ::tripleo::firewall if $::hostname == downcase(hiera('bootstrap_nodeid')) { $pacemaker_master = true @@ -28,7 +29,8 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) { $sync_db = false } -$enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5 +$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5 +$enable_load_balancer = hiera('enable_load_balancer', true) # When to start and enable services which haven't been Pacemakerized # FIXME: remove when we start all OpenStack services using Pacemaker @@ -45,17 +47,19 @@ if hiera('step') >= 1 { $controller_node_ips = split(hiera('controller_node_ips'), ',') $controller_node_names = split(downcase(hiera('controller_node_names')), ',') - class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - controller_hosts_names => $controller_node_names, - manage_vip => false, - mysql_clustercheck => true, - haproxy_service_manage => false, + if $enable_load_balancer { + class { '::tripleo::loadbalancer' : + controller_hosts => $controller_node_ips, + controller_hosts_names => $controller_node_names, + manage_vip => false, + mysql_clustercheck => true, + haproxy_service_manage => false, + } } $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) user { 'hacluster': - ensure => present, + ensure => present, } -> class { '::pacemaker': hacluster_pwd => hiera('hacluster_pwd'), @@ -68,17 +72,17 @@ if hiera('step') >= 1 { disable => !$enable_fencing, } if $enable_fencing { - include tripleo::fencing + include ::tripleo::fencing # enable stonith after all fencing devices have been created Class['tripleo::fencing'] -> Class['pacemaker::stonith'] } - # FIXME(gfidente): sets 90secs as default start timeout op + # FIXME(gfidente): sets 200secs as default start timeout op # param; until we can use pcmk global defaults we'll still # need to add it to every resource which redefines op params Pacemaker::Resource::Service { - op_params => 'start timeout=90s', + op_params => 'start timeout=200s stop timeout=200s', } # Only configure RabbitMQ in this step, don't start it yet to @@ -93,7 +97,7 @@ if hiera('step') >= 1 { environment_variables => hiera('rabbitmq_environment'), } -> file { '/var/lib/rabbitmq/.erlang.cookie': - ensure => 'present', + ensure => file, owner => 'rabbitmq', group => 'rabbitmq', mode => '0400', @@ -120,7 +124,7 @@ if hiera('step') >= 1 { } # Galera - if str2bool(hiera('enable_galera', 'true')) { + if str2bool(hiera('enable_galera', true)) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' } else { $mysql_config_file = '/etc/my.cnf.d/server.cnf' @@ -154,7 +158,7 @@ if hiera('step') >= 1 { 'wsrep_causal_reads' => '0', 'wsrep_notify_cmd' => '', 'wsrep_sst_method' => 'rsync', - } + }, } class { '::mysql::server': @@ -178,160 +182,164 @@ if hiera('step') >= 2 { if $pacemaker_master { - include pacemaker::resource_defaults - - # FIXME: we should not have to access tripleo::loadbalancer class - # parameters here to configure pacemaker VIPs. The configuration - # of pacemaker VIPs could move into puppet-tripleo or we should - # make use of less specific hiera parameters here for the settings. - pacemaker::resource::service { 'haproxy': - clone_params => true, - } + if $enable_load_balancer { - $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') - pacemaker::resource::ip { 'control_vip': - ip_address => $control_vip, - } - pacemaker::constraint::base { 'control_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${control_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['control_vip']], - } - pacemaker::constraint::colocation { 'control_vip-with-haproxy': - source => "ip-${control_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['control_vip']], - } + include ::pacemaker::resource_defaults - $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip') - if $public_vip and $public_vip != $control_vip { - pacemaker::resource::ip { 'public_vip': - ip_address => $public_vip, - } - pacemaker::constraint::base { 'public_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${public_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['public_vip']], - } - pacemaker::constraint::colocation { 'public_vip-with-haproxy': - source => "ip-${public_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['public_vip']], + # FIXME: we should not have to access tripleo::loadbalancer class + # parameters here to configure pacemaker VIPs. The configuration + # of pacemaker VIPs could move into puppet-tripleo or we should + # make use of less specific hiera parameters here for the settings. + pacemaker::resource::service { 'haproxy': + clone_params => true, } - } - $redis_vip = hiera('redis_vip') - if $redis_vip and $redis_vip != $control_vip { - pacemaker::resource::ip { 'redis_vip': - ip_address => $redis_vip, + $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') + pacemaker::resource::ip { 'control_vip': + ip_address => $control_vip, } - pacemaker::constraint::base { 'redis_vip-then-haproxy': + pacemaker::constraint::base { 'control_vip-then-haproxy': constraint_type => 'order', - first_resource => "ip-${redis_vip}", + first_resource => "ip-${control_vip}", second_resource => 'haproxy-clone', first_action => 'start', second_action => 'start', constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['redis_vip']], + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['control_vip']], } - pacemaker::constraint::colocation { 'redis_vip-with-haproxy': - source => "ip-${redis_vip}", + pacemaker::constraint::colocation { 'control_vip-with-haproxy': + source => "ip-${control_vip}", target => 'haproxy-clone', score => 'INFINITY', require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['redis_vip']], + Pacemaker::Resource::Ip['control_vip']], } - } - $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') - if $internal_api_vip and $internal_api_vip != $control_vip { - pacemaker::resource::ip { 'internal_api_vip': - ip_address => $internal_api_vip, - } - pacemaker::constraint::base { 'internal_api_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${internal_api_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['internal_api_vip']], - } - pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy': - source => "ip-${internal_api_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['internal_api_vip']], + $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip') + if $public_vip and $public_vip != $control_vip { + pacemaker::resource::ip { 'public_vip': + ip_address => $public_vip, + } + pacemaker::constraint::base { 'public_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${public_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['public_vip']], + } + pacemaker::constraint::colocation { 'public_vip-with-haproxy': + source => "ip-${public_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['public_vip']], + } } - } - $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') - if $storage_vip and $storage_vip != $control_vip { - pacemaker::resource::ip { 'storage_vip': - ip_address => $storage_vip, - } - pacemaker::constraint::base { 'storage_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${storage_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_vip']], - } - pacemaker::constraint::colocation { 'storage_vip-with-haproxy': - source => "ip-${storage_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_vip']], + $redis_vip = hiera('redis_vip') + if $redis_vip and $redis_vip != $control_vip { + pacemaker::resource::ip { 'redis_vip': + ip_address => $redis_vip, + } + pacemaker::constraint::base { 'redis_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${redis_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['redis_vip']], + } + pacemaker::constraint::colocation { 'redis_vip-with-haproxy': + source => "ip-${redis_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['redis_vip']], + } } - } - $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') - if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip { - pacemaker::resource::ip { 'storage_mgmt_vip': - ip_address => $storage_mgmt_vip, + $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') + if $internal_api_vip and $internal_api_vip != $control_vip { + pacemaker::resource::ip { 'internal_api_vip': + ip_address => $internal_api_vip, + } + pacemaker::constraint::base { 'internal_api_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${internal_api_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['internal_api_vip']], + } + pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy': + source => "ip-${internal_api_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['internal_api_vip']], + } } - pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy': - constraint_type => 'order', - first_resource => "ip-${storage_mgmt_vip}", - second_resource => 'haproxy-clone', - first_action => 'start', - second_action => 'start', - constraint_params => 'kind=Optional', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_mgmt_vip']], + + $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') + if $storage_vip and $storage_vip != $control_vip { + pacemaker::resource::ip { 'storage_vip': + ip_address => $storage_vip, + } + pacemaker::constraint::base { 'storage_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${storage_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_vip']], + } + pacemaker::constraint::colocation { 'storage_vip-with-haproxy': + source => "ip-${storage_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_vip']], + } } - pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy': - source => "ip-${storage_mgmt_vip}", - target => 'haproxy-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Ip['storage_mgmt_vip']], + + $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') + if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip { + pacemaker::resource::ip { 'storage_mgmt_vip': + ip_address => $storage_mgmt_vip, + } + pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${storage_mgmt_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_mgmt_vip']], + } + pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy': + source => "ip-${storage_mgmt_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_mgmt_vip']], + } } + } pacemaker::resource::service { $::memcached::params::service_name : - clone_params => "interleave=true", + clone_params => 'interleave=true', require => Class['::memcached'], } @@ -344,7 +352,7 @@ if hiera('step') >= 2 { if downcase(hiera('ceilometer_backend')) == 'mongodb' { pacemaker::resource::service { $::mongodb::params::service_name : - op_params => 'start timeout=120s', + op_params => 'start timeout=370s stop timeout=200s', clone_params => true, require => Class['::mongodb::server'], } @@ -385,7 +393,7 @@ if hiera('step') >= 2 { timeout => 30, tries => 180, try_sleep => 10, - environment => ["AVAILABLE_WHEN_READONLY=0"], + environment => ['AVAILABLE_WHEN_READONLY=0'], require => File['/etc/sysconfig/clustercheck'], } @@ -411,28 +419,28 @@ MYSQL_HOST=localhost\n", # Create all the database schemas if $sync_db { - class { 'keystone::db::mysql': - require => Exec['galera-ready'], + class { '::keystone::db::mysql': + require => Exec['galera-ready'], } - class { 'glance::db::mysql': - require => Exec['galera-ready'], + class { '::glance::db::mysql': + require => Exec['galera-ready'], } - class { 'nova::db::mysql': - require => Exec['galera-ready'], + class { '::nova::db::mysql': + require => Exec['galera-ready'], } - class { 'neutron::db::mysql': - require => Exec['galera-ready'], + class { '::neutron::db::mysql': + require => Exec['galera-ready'], } - class { 'cinder::db::mysql': - require => Exec['galera-ready'], + class { '::cinder::db::mysql': + require => Exec['galera-ready'], } - class { 'heat::db::mysql': - require => Exec['galera-ready'], + class { '::heat::db::mysql': + require => Exec['galera-ready'], } if downcase(hiera('ceilometer_backend')) == 'mysql' { - class { 'ceilometer::db::mysql': - require => Exec['galera-ready'], + class { '::ceilometer::db::mysql': + require => Exec['galera-ready'], } } } @@ -444,31 +452,31 @@ MYSQL_HOST=localhost\n", $enable_ceph = hiera('ceph_storage_count', 0) > 0 if $enable_ceph { - class { 'ceph::profile::params': - mon_initial_members => downcase(hiera('ceph_mon_initial_members')) + class { '::ceph::profile::params': + mon_initial_members => downcase(hiera('ceph_mon_initial_members')), } include ::ceph::profile::mon } - if str2bool(hiera('enable_ceph_storage', 'false')) { + if str2bool(hiera('enable_ceph_storage', false)) { if str2bool(hiera('ceph_osd_selinux_permissive', true)) { exec { 'set selinux to permissive on boot': command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } exec { 'set selinux to permissive': - command => "setenforce 0", + command => 'setenforce 0', onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ["/usr/bin", "/usr/sbin"], + path => ['/usr/bin', '/usr/sbin'], } -> Class['ceph::profile::osd'] } include ::ceph::profile::osd } - if str2bool(hiera('enable_external_ceph', 'false')) { + if str2bool(hiera('enable_external_ceph', false)) { include ::ceph::profile::client } @@ -478,9 +486,9 @@ MYSQL_HOST=localhost\n", if hiera('step') >= 3 { class { '::keystone': - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } #TODO: need a cleanup-keystone-tokens.sh solution here @@ -517,35 +525,36 @@ if hiera('step') >= 3 { $glance_backend = downcase(hiera('glance_backend', 'swift')) case $glance_backend { - swift: { $backend_store = 'glance.store.swift.Store' } - file: { $backend_store = 'glance.store.filesystem.Store' } - rbd: { $backend_store = 'glance.store.rbd.Store' } + 'swift': { $backend_store = 'glance.store.swift.Store' } + 'file': { $backend_store = 'glance.store.filesystem.Store' } + 'rbd': { $backend_store = 'glance.store.rbd.Store' } default: { fail('Unrecognized glance_backend parameter.') } } $http_store = ['glance.store.http.Store'] $glance_store = concat($http_store, $backend_store) if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) { - pacemaker::resource::filesystem { "glance-fs": + $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"' + pacemaker::resource::filesystem { 'glance-fs': device => hiera('glance_file_pcmk_device'), directory => hiera('glance_file_pcmk_directory'), fstype => hiera('glance_file_pcmk_fstype'), - fsoptions => hiera('glance_file_pcmk_options', ''), + fsoptions => join([$secontext, hiera('glance_file_pcmk_options', '')],','), clone_params => '', } } # TODO: notifications, scrubber, etc. include ::glance - class { 'glance::api': - known_stores => $glance_store, + class { '::glance::api': + known_stores => $glance_store, manage_service => false, - enabled => false, + enabled => false, } class { '::glance::registry' : - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } include join(['::glance::backend::', $glance_backend]) @@ -556,94 +565,151 @@ if hiera('step') >= 3 { include ::nova::config class { '::nova::api' : - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } class { '::nova::cert' : manage_service => false, - enabled => false, + enabled => false, } class { '::nova::conductor' : manage_service => false, - enabled => false, + enabled => false, } class { '::nova::consoleauth' : manage_service => false, - enabled => false, + enabled => false, } class { '::nova::vncproxy' : manage_service => false, - enabled => false, + enabled => false, } include ::nova::scheduler::filter class { '::nova::scheduler' : manage_service => false, - enabled => false, + enabled => false, } include ::nova::network::neutron - # Neutron class definitions - include ::neutron + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') + + # Run zookeeper in the controller if configured + if hiera('enable_zookeeper_on_controller') { + class {'::tripleo::cluster::zookeeper': + zookeeper_server_ips => $zookeeper_node_ips, + zookeeper_client_ip => $ipaddress, + zookeeper_hostnames => hiera('controller_node_names') + } + } + + # Run cassandra in the controller if configured + if hiera('enable_cassandra_on_controller') { + class {'::tripleo::cluster::cassandra': + cassandra_servers => $cassandra_node_ips, + cassandra_ip => $ipaddress + } + } + + class {'::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips + } + + class {'::tripleo::network::midonet::api': + zookeeper_servers => hiera('neutron_api_node_ips'), + vip => $public_vip, + keystone_ip => $public_vip, + keystone_admin_token => hiera('keystone::admin_token'), + bind_address => $ipaddress, + admin_password => hiera('admin_password') + } + + # Configure Neutron + class {'::neutron': + service_plugins => [] + } + + } + else { + # Neutron class definitions + include ::neutron + } + class { '::neutron::server' : - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } - class { '::neutron::agents::dhcp' : - manage_service => false, - enabled => false, + include ::neutron::server::notifications + if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { + include ::neutron::plugins::nuage } - class { '::neutron::agents::l3' : - manage_service => false, - enabled => false, + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + class {'::neutron::plugins::midonet': + midonet_api_ip => $public_vip, + keystone_tenant => hiera('neutron::server::auth_tenant'), + keystone_password => hiera('neutron::server::auth_password') + } } - class { 'neutron::agents::metadata': - manage_service => false, - enabled => false, + if hiera('neutron::enable_dhcp_agent',true) { + class { '::neutron::agents::dhcp' : + manage_service => false, + enabled => false, + } + file { '/etc/neutron/dnsmasq-neutron.conf': + content => hiera('neutron_dnsmasq_options'), + owner => 'neutron', + group => 'neutron', + notify => Service['neutron-dhcp-service'], + require => Package['neutron'], + } } - file { '/etc/neutron/dnsmasq-neutron.conf': - content => hiera('neutron_dnsmasq_options'), - owner => 'neutron', - group => 'neutron', - notify => Service['neutron-dhcp-service'], - require => Package['neutron'], + if hiera('neutron::enable_l3_agent',true) { + class { '::neutron::agents::l3' : + manage_service => false, + enabled => false, + } } - class { 'neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), - tenant_network_types => [hiera('neutron_tenant_network_type')], - mechanism_drivers => [hiera('neutron_mechanism_drivers')], + if hiera('neutron::enable_metadata_agent',true) { + class { '::neutron::agents::metadata': + manage_service => false, + enabled => false, + } } - class { 'neutron::agents::ml2::ovs': - manage_service => false, - enabled => false, - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), + include ::neutron::plugins::ml2 + class { '::neutron::agents::ml2::ovs': + manage_service => false, + enabled => false, } - if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { + if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::cisco::ucsm } - if 'cisco_nexus' in hiera('neutron_mechanism_drivers') { + if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::cisco::nexus include ::neutron::plugins::ml2::cisco::type_nexus_vxlan } - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { - include neutron::plugins::ml2::cisco::nexus1000v + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus1000v - class { 'neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), } - class { 'n1k_vsm': - n1kv_source => hiera('n1kv_vsm_source', undef), - n1kv_version => hiera('n1kv_vsm_version', undef), + class { '::n1k_vsm': + n1kv_source => hiera('n1kv_vsm_source', undef), + n1kv_version => hiera('n1kv_vsm_version', undef), } } if hiera('neutron_enable_bigswitch_ml2', false) { - include neutron::plugins::ml2::bigswitch::restproxy + include ::neutron::plugins::ml2::bigswitch::restproxy } neutron_l3_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); @@ -654,20 +720,20 @@ if hiera('step') >= 3 { include ::cinder class { '::cinder::api': - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } class { '::cinder::scheduler' : manage_service => false, - enabled => false, + enabled => false, } class { '::cinder::volume' : manage_service => false, - enabled => false, + enabled => false, } include ::cinder::glance - class {'cinder::setup_test_volume': + class { '::cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -683,16 +749,14 @@ if hiera('step') >= 3 { if $enable_ceph { - Ceph_pool { + $ceph_pools = hiera('ceph_pools') + ceph::pool { $ceph_pools : pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), size => hiera('ceph::profile::params::osd_pool_default_size'), } - $ceph_pools = hiera('ceph_pools') - ceph::pool { $ceph_pools : } - - $cinder_pool_requires = [Ceph::Pool['volumes']] + $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]] } else { $cinder_pool_requires = [] @@ -702,8 +766,8 @@ if hiera('step') >= 3 { $cinder_rbd_backend = 'tripleo_ceph' cinder::backend::rbd { $cinder_rbd_backend : - rbd_pool => 'volumes', - rbd_user => 'openstack', + rbd_pool => hiera('cinder_rbd_pool_name'), + rbd_user => hiera('ceph_client_user_name'), rbd_secret_uuid => hiera('ceph::profile::params::fsid'), require => $cinder_pool_requires, } @@ -768,18 +832,18 @@ if hiera('step') >= 3 { if hiera('cinder_enable_nfs_backend', false) { $cinder_nfs_backend = 'tripleo_nfs' - if ($::selinux != "false") { + if str2bool($::selinux) { selboolean { 'virt_use_nfs': - value => on, - persistent => true, + value => on, + persistent => true, } -> Package['nfs-utils'] } - package {'nfs-utils': } -> + package { 'nfs-utils': } -> cinder::backend::nfs { $cinder_nfs_backend: - nfs_servers => hiera('cinder_nfs_servers'), - nfs_mount_options => hiera('cinder_nfs_mount_options'), - nfs_shares_config => '/etc/cinder/shares-nfs.conf', + nfs_servers => hiera('cinder_nfs_servers'), + nfs_mount_options => hiera('cinder_nfs_mount_options',''), + nfs_shares_config => '/etc/cinder/shares-nfs.conf', } } @@ -791,7 +855,7 @@ if hiera('step') >= 3 { # swift proxy class { '::swift::proxy' : manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } include ::swift::proxy::proxy_logging include ::swift::proxy::healthcheck @@ -805,21 +869,21 @@ if hiera('step') >= 3 { include ::swift::proxy::formpost # swift storage - if str2bool(hiera('enable_swift_storage', 'true')) { + if str2bool(hiera('enable_swift_storage', true)) { class {'::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')) + mount_check => str2bool(hiera('swift_mount_check')), } class {'::swift::storage::account': manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } class {'::swift::storage::container': manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } class {'::swift::storage::object': manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } if(!defined(File['/srv/node'])) { file { '/srv/node': @@ -848,34 +912,26 @@ if hiera('step') >= 3 { include ::ceilometer::config class { '::ceilometer::api' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::agent::notification' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::agent::central' : manage_service => false, - enabled => false, - } - class { '::ceilometer::alarm::notifier' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::alarm::evaluator' : - manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::collector' : manage_service => false, - enabled => false, + enabled => false, } include ::ceilometer::expirer class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, sync_db => $sync_db, } - include ceilometer::agent::auth + include ::ceilometer::agent::auth Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } @@ -885,19 +941,19 @@ if hiera('step') >= 3 { } class { '::heat::api' : manage_service => false, - enabled => false, + enabled => false, } class { '::heat::api_cfn' : manage_service => false, - enabled => false, + enabled => false, } class { '::heat::api_cloudwatch' : manage_service => false, - enabled => false, + enabled => false, } class { '::heat::engine' : manage_service => false, - enabled => false, + enabled => false, } # httpd/apache and horizon @@ -907,15 +963,15 @@ if hiera('step') >= 3 { # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? } include ::apache::mod::status - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { $_profile_support = 'cisco' } else { $_profile_support = 'None' } $neutron_options = {'profile_support' => $_profile_support } - class { 'horizon': - cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), - neutron_options => $neutron_options, + class { '::horizon': + cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), + neutron_options => $neutron_options, } $snmpd_user = hiera('snmpd_readonly_user_name') @@ -923,7 +979,7 @@ if hiera('step') >= 3 { authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } - class { 'snmp': + class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } @@ -933,31 +989,37 @@ if hiera('step') >= 3 { } #END STEP 3 if hiera('step') >= 4 { - include ::keystone::cron::token_flush + $nova_enable_db_purge = hiera('nova_enable_db_purge', true) + + include ::keystone::cron::token_flush + if $nova_enable_db_purge { + include ::nova::cron::archive_deleted_rows + } if $pacemaker_master { # Keystone pacemaker::resource::service { $::keystone::params::service_name : - clone_params => "interleave=true", + clone_params => 'interleave=true', verify_on_create => true, require => [File['/etc/keystone/ssl/certs/ca.pem'], - File['/etc/keystone/ssl/private/signing_key.pem'], - File['/etc/keystone/ssl/certs/signing_cert.pem']], + File['/etc/keystone/ssl/private/signing_key.pem'], + File['/etc/keystone/ssl/certs/signing_cert.pem']], } - - pacemaker::constraint::base { 'haproxy-then-keystone-constraint': - constraint_type => 'order', - first_resource => "haproxy-clone", - second_resource => "${::keystone::params::service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + if $enable_load_balancer { + pacemaker::constraint::base { 'haproxy-then-keystone-constraint': + constraint_type => 'order', + first_resource => 'haproxy-clone', + second_resource => "${::keystone::params::service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Service[$::keystone::params::service_name]], + } } pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint': constraint_type => 'order', - first_resource => "rabbitmq-clone", + first_resource => 'rabbitmq-clone', second_resource => "${::keystone::params::service_name}-clone", first_action => 'start', second_action => 'start', @@ -966,7 +1028,7 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'memcached-then-keystone-constraint': constraint_type => 'order', - first_resource => "memcached-clone", + first_resource => 'memcached-clone', second_resource => "${::keystone::params::service_name}-clone", first_action => 'start', second_action => 'start', @@ -975,7 +1037,7 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'galera-then-keystone-constraint': constraint_type => 'order', - first_resource => "galera-master", + first_resource => 'galera-master', second_resource => "${::keystone::params::service_name}-clone", first_action => 'promote', second_action => 'start', @@ -985,11 +1047,11 @@ if hiera('step') >= 4 { # Cinder pacemaker::resource::service { $::cinder::params::api_service : - clone_params => "interleave=true", + clone_params => 'interleave=true', require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::cinder::params::scheduler_service : - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::service { $::cinder::params::volume_service : } @@ -1003,45 +1065,45 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::keystone::params::service_name]], } pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint': - constraint_type => "order", - first_resource => "${::cinder::params::api_service}-clone", + constraint_type => 'order', + first_resource => "${::cinder::params::api_service}-clone", second_resource => "${::cinder::params::scheduler_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::cinder::params::api_service], + Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], } pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation': - source => "${::cinder::params::scheduler_service}-clone", - target => "${::cinder::params::api_service}-clone", - score => "INFINITY", + source => "${::cinder::params::scheduler_service}-clone", + target => "${::cinder::params::api_service}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::cinder::params::api_service], Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], } pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint': - constraint_type => "order", - first_resource => "${::cinder::params::scheduler_service}-clone", - second_resource => "${::cinder::params::volume_service}", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], - Pacemaker::Resource::Service[$::cinder::params::volume_service]], + constraint_type => 'order', + first_resource => "${::cinder::params::scheduler_service}-clone", + second_resource => $::cinder::params::volume_service, + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], + Pacemaker::Resource::Service[$::cinder::params::volume_service]], } pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation': - source => "${::cinder::params::volume_service}", - target => "${::cinder::params::scheduler_service}-clone", - score => "INFINITY", + source => $::cinder::params::volume_service, + target => "${::cinder::params::scheduler_service}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], Pacemaker::Resource::Service[$::cinder::params::volume_service]], } # Glance pacemaker::resource::service { $::glance::params::registry_service_name : - clone_params => "interleave=true", + clone_params => 'interleave=true', require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::glance::params::api_service_name : - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::constraint::base { 'keystone-then-glance-registry-constraint': @@ -1054,177 +1116,253 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::keystone::params::service_name]], } pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::glance::params::registry_service_name}-clone", second_resource => "${::glance::params::api_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], - Pacemaker::Resource::Service[$::glance::params::api_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], + Pacemaker::Resource::Service[$::glance::params::api_service_name]], } pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation': source => "${::glance::params::api_service_name}-clone", target => "${::glance::params::registry_service_name}-clone", - score => "INFINITY", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], Pacemaker::Resource::Service[$::glance::params::api_service_name]], } - # Neutron - # NOTE(gfidente): Neutron will try to populate the database with some data - # as soon as neutron-server is started; to avoid races we want to make this - # happen only on one node, before normal Pacemaker initialization - # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 - exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } -> - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => "interleave=true", - require => Pacemaker::Resource::Service[$::keystone::params::service_name] - } - pacemaker::resource::service { $::neutron::params::l3_agent_service: - clone_params => "interleave=true", - } - pacemaker::resource::service { $::neutron::params::dhcp_agent_service: - clone_params => "interleave=true", - } - pacemaker::resource::service { $::neutron::params::ovs_agent_service: - clone_params => "interleave=true", - } - pacemaker::resource::service { $::neutron::params::metadata_agent_service: - clone_params => "interleave=true", - } - pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: - ocf_agent_name => "neutron:OVSCleanup", - clone_params => "interleave=true", - } - pacemaker::resource::ocf { 'neutron-netns-cleanup': - ocf_agent_name => "neutron:NetnsCleanup", - clone_params => "interleave=true", - } - - # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent - pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::ovs_cleanup_service}-clone", - second_resource => "neutron-netns-cleanup-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], - Pacemaker::Resource::Ocf['neutron-netns-cleanup']], - } - pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': - source => "neutron-netns-cleanup-clone", - target => "${::neutron::params::ovs_cleanup_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], - Pacemaker::Resource::Ocf['neutron-netns-cleanup']], - } - pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': - constraint_type => "order", - first_resource => "neutron-netns-cleanup-clone", - second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], - Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], - } - pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': - source => "${::neutron::params::ovs_agent_service}-clone", - target => "neutron-netns-cleanup-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], - Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], - } - - #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 + if hiera('step') == 4 { + # Neutron + # NOTE(gfidente): Neutron will try to populate the database with some data + # as soon as neutron-server is started; to avoid races we want to make this + # happen only on one node, before normal Pacemaker initialization + # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 + # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec + # will try to start the service while it's already started by Pacemaker + # It would result to a deployment failure since systemd would return 1 to Puppet + # and the overcloud would fail to deploy (6 would be returned). + # This conditional prevents from a race condition during the deployment. + # https://bugzilla.redhat.com/show_bug.cgi?id=1290582 + exec { 'neutron-server-systemd-start-sleep' : + command => 'systemctl start neutron-server && /usr/bin/sleep 5', + path => '/usr/bin', + unless => '/sbin/pcs resource show neutron-server', + } -> + pacemaker::resource::service { $::neutron::params::server_service: + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name] + } + } else { + pacemaker::resource::service { $::neutron::params::server_service: + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name] + } + } + if hiera('neutron::enable_l3_agent', true) { + pacemaker::resource::service { $::neutron::params::l3_agent_service: + clone_params => 'interleave=true', + } + } + if hiera('neutron::enable_dhcp_agent', true) { + pacemaker::resource::service { $::neutron::params::dhcp_agent_service: + clone_params => 'interleave=true', + } + } + if hiera('neutron::enable_ovs_agent', true) { + pacemaker::resource::service { $::neutron::params::ovs_agent_service: + clone_params => 'interleave=true', + } + } + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + pacemaker::resource::service {'tomcat': + clone_params => 'interleave=true', + } + } + if hiera('neutron::enable_metadata_agent', true) { + pacemaker::resource::service { $::neutron::params::metadata_agent_service: + clone_params => 'interleave=true', + } + } + if hiera('neutron::enable_ovs_agent', true) { + pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: + ocf_agent_name => 'neutron:OVSCleanup', + clone_params => 'interleave=true', + } + pacemaker::resource::ocf { 'neutron-netns-cleanup': + ocf_agent_name => 'neutron:NetnsCleanup', + clone_params => 'interleave=true', + } + + # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent + pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::ovs_cleanup_service}-clone", + second_resource => 'neutron-netns-cleanup-clone', + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], + Pacemaker::Resource::Ocf['neutron-netns-cleanup']], + } + pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': + source => 'neutron-netns-cleanup-clone', + target => "${::neutron::params::ovs_cleanup_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], + Pacemaker::Resource::Ocf['neutron-netns-cleanup']], + } + pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': + constraint_type => 'order', + first_resource => 'neutron-netns-cleanup-clone', + second_resource => "${::neutron::params::ovs_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], + } + pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': + source => "${::neutron::params::ovs_agent_service}-clone", + target => 'neutron-netns-cleanup-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], + } + } + pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => "order", - first_resource => "${::keystone::params::service_name}-clone", + constraint_type => 'order', + first_resource => "${::keystone::params::service_name}-clone", second_resource => "${::neutron::params::server_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::neutron::params::server_service]], - } - pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], - } - pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::ovs_agent_service}-clone", - second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], - - } - pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': - source => "${::neutron::params::dhcp_agent_service}-clone", - target => "${::neutron::params::ovs_agent_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], - } - pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::dhcp_agent_service}-clone", - second_resource => "${::neutron::params::l3_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]] - } - pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': - source => "${::neutron::params::l3_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]] - } - pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::l3_agent_service}-clone", - second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]] - } - pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::l3_agent_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]] + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::keystone::params::service_name], + Pacemaker::Resource::Service[$::neutron::params::server_service]], + } + if hiera('neutron::enable_ovs_agent',true) { + pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::server_service}-clone", + second_resource => "${::neutron::params::ovs_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::server_service], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], + } + } + if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) { + pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::ovs_agent_service}-clone", + second_resource => "${::neutron::params::dhcp_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], + Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], + + } + pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': + source => "${::neutron::params::dhcp_agent_service}-clone", + target => "${::neutron::params::ovs_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], + Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], + } + } + if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) { + pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::dhcp_agent_service}-clone", + second_resource => "${::neutron::params::l3_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]] + } + pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': + source => "${::neutron::params::l3_agent_service}-clone", + target => "${::neutron::params::dhcp_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]] + } + } + if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) { + pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::l3_agent_service}-clone", + second_resource => "${::neutron::params::metadata_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] + } + pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': + source => "${::neutron::params::metadata_agent_service}-clone", + target => "${::neutron::params::l3_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] + } + } + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat + pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::server_service}-clone", + second_resource => "${::neutron::params::dhcp_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::server_service], + Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], + } + pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::dhcp_agent_service}-clone", + second_resource => "${::neutron::params::metadata_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], + } + pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::metadata_agent_service}-clone", + second_resource => 'tomcat-clone', + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service], + Pacemaker::Resource::Service['tomcat']], + } + pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation': + source => "${::neutron::params::metadata_agent_service}-clone", + target => "${::neutron::params::dhcp_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], + } } # Nova pacemaker::resource::service { $::nova::params::api_service_name : - clone_params => "interleave=true", - op_params => "start timeout=90s monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::conductor_service_name : - clone_params => "interleave=true", - op_params => "start timeout=90s monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::consoleauth_service_name : - clone_params => "interleave=true", - op_params => "start timeout=90s monitor start-delay=10s", - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + clone_params => 'interleave=true', + op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', + require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::nova::params::vncproxy_service_name : - clone_params => "interleave=true", - op_params => "start timeout=90s monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::scheduler_service_name : - clone_params => "interleave=true", - op_params => "start timeout=90s monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': @@ -1237,66 +1375,66 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::keystone::params::service_name]], } pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::consoleauth_service_name}-clone", second_resource => "${::nova::params::vncproxy_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], + Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], } pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation': - source => "${::nova::params::vncproxy_service_name}-clone", - target => "${::nova::params::consoleauth_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::vncproxy_service_name}-clone", + target => "${::nova::params::consoleauth_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], } pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::vncproxy_service_name}-clone", second_resource => "${::nova::params::api_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], - Pacemaker::Resource::Service[$::nova::params::api_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], + Pacemaker::Resource::Service[$::nova::params::api_service_name]], } pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation': - source => "${::nova::params::api_service_name}-clone", - target => "${::nova::params::vncproxy_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::api_service_name}-clone", + target => "${::nova::params::vncproxy_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], Pacemaker::Resource::Service[$::nova::params::api_service_name]], } pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::api_service_name}-clone", second_resource => "${::nova::params::scheduler_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], - Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], + Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], } pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation': - source => "${::nova::params::scheduler_service_name}-clone", - target => "${::nova::params::api_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::scheduler_service_name}-clone", + target => "${::nova::params::api_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], } pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::scheduler_service_name}-clone", second_resource => "${::nova::params::conductor_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], - Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], + Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], } pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation': - source => "${::nova::params::conductor_service_name}-clone", - target => "${::nova::params::scheduler_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::conductor_service_name}-clone", + target => "${::nova::params::scheduler_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], } @@ -1313,7 +1451,7 @@ if hiera('step') >= 4 { pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : clone_params => 'interleave=true', require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], + Pacemaker::Resource::Service[$::mongodb::params::service_name]], } } } @@ -1323,12 +1461,6 @@ if hiera('step') >= 4 { pacemaker::resource::service { $::ceilometer::params::api_service_name : clone_params => 'interleave=true', } - pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name : - clone_params => 'interleave=true', - } pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : clone_params => 'interleave=true', } @@ -1345,7 +1477,7 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint': constraint_type => 'order', - first_resource => "redis-master", + first_resource => 'redis-master', second_resource => "${::ceilometer::params::agent_central_service_name}-clone", first_action => 'promote', second_action => 'start', @@ -1403,54 +1535,6 @@ if hiera('step') >= 4 { require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], Pacemaker::Resource::Ocf['delay']], } - pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint': - constraint_type => 'order', - first_resource => 'delay-clone', - second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation': - source => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - target => 'delay-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation': - source => "${::ceilometer::params::alarm_notifier_service_name}-clone", - target => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone", - second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation': - source => "${::ceilometer::params::agent_notification_service_name}-clone", - target => "${::ceilometer::params::alarm_notifier_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } if downcase(hiera('ceilometer_backend')) == 'mongodb' { pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': constraint_type => 'order', @@ -1491,8 +1575,8 @@ if hiera('step') >= 4 { second_resource => "${::heat::params::api_cfn_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], + require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], + Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], } pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation': source => "${::heat::params::api_cfn_service_name}-clone", @@ -1507,8 +1591,8 @@ if hiera('step') >= 4 { second_resource => "${::heat::params::api_cloudwatch_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], + require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], + Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], } pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation': source => "${::heat::params::api_cloudwatch_service_name}-clone", @@ -1523,8 +1607,8 @@ if hiera('step') >= 4 { second_resource => "${::heat::params::engine_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::engine_service_name]], + require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], + Pacemaker::Resource::Service[$::heat::params::engine_service_name]], } pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation': source => "${::heat::params::engine_service_name}-clone", @@ -1545,18 +1629,18 @@ if hiera('step') >= 4 { # Horizon pacemaker::resource::service { $::horizon::params::http_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } #VSM - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { pacemaker::resource::ocf { 'vsm-p' : ocf_agent_name => 'heartbeat:VirtualDomain', resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml', require => Class['n1k_vsm'], meta_params => 'resource-stickiness=INFINITY', } - if str2bool(hiera('n1k_vsm::pacemaker_control', 'true')) { + if str2bool(hiera('n1k_vsm::pacemaker_control', true)) { pacemaker::resource::ocf { 'vsm-s' : ocf_agent_name => 'heartbeat:VirtualDomain', resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml', @@ -1564,9 +1648,9 @@ if hiera('step') >= 4 { meta_params => 'resource-stickiness=INFINITY', } pacemaker::constraint::colocation { 'vsm-colocation-contraint': - source => "vsm-p", - target => "vsm-s", - score => "-INFINITY", + source => 'vsm-p', + target => 'vsm-s', + score => '-INFINITY', require => [Pacemaker::Resource::Ocf['vsm-p'], Pacemaker::Resource::Ocf['vsm-s']], } @@ -1586,27 +1670,6 @@ if hiera('step') >= 5 { } -> class {'::keystone::endpoint' : require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } -> - class { '::ceilometer::keystone::auth' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } -> - class { '::cinder::keystone::auth' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } -> - class { '::glance::keystone::auth' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } -> - class { '::heat::keystone::auth' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } -> - class { '::neutron::keystone::auth' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } -> - class { '::nova::keystone::auth' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], - } -> - class { '::swift::keystone::auth' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], } } diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 5f4b070d..1eabddf1 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -13,7 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages +include ::tripleo::firewall create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -22,8 +23,8 @@ if count(hiera('ntp::servers')) > 0 { } include ::swift -class {'swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')) +class { '::swift::storage::all': + mount_check => str2bool(hiera('swift_mount_check')), } if(!defined(File['/srv/node'])) { file { '/srv/node': @@ -43,7 +44,7 @@ snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } -class { 'snmp': +class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index eaaed66e..2bdd8a9c 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -13,7 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages +include ::tripleo::firewall create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -47,7 +48,7 @@ snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } -class { 'snmp': +class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp index 1897dcd0..2d880d33 100644 --- a/puppet/manifests/ringbuilder.pp +++ b/puppet/manifests/ringbuilder.pp @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -include tripleo::packages +include ::tripleo::packages define add_devices( $swift_zones = '1' @@ -37,44 +37,46 @@ define add_devices( $base = regsubst($name,'^r1.*-(.*)$','\1') $object = regsubst($base, '%PORT%', '6000') ring_object_device { $object: - zone => '1', - weight => 100, + zone => '1', + weight => 100, } $container = regsubst($base, '%PORT%', '6001') ring_container_device { $container: - zone => '1', - weight => 100, + zone => '1', + weight => 100, } $account = regsubst($base, '%PORT%', '6002') ring_account_device { $account: - zone => '1', - weight => 100, + zone => '1', + weight => 100, } } class tripleo::ringbuilder ( $swift_zones = '1', $devices = '', - $build_ring = 'True', + $build_ring = true, $part_power, $replicas, $min_part_hours, ) { - if str2bool(downcase("$build_ring")) { + validate_bool($build_ring) + + if $build_ring { $device_array = strip(split(rstrip($devices), ',')) # create local rings swift::ringbuilder::create{ ['object', 'account', 'container']: part_power => $part_power, - replicas => $replicas, + replicas => min(count($device_array), $replicas), min_part_hours => $min_part_hours, } -> # add all other devices add_devices {$device_array: - swift_zones => $swift_zones + swift_zones => $swift_zones, } -> # rebalance diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 22ec6096..b60664a1 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -7,7 +7,6 @@ parameters: constraints: - custom_constraint: nova.flavor HashSuffix: - default: unset description: A random string to be used as a salt when hashing to determine mappings in the ring. hidden: true @@ -17,7 +16,7 @@ parameters: type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string MountCheck: default: 'false' @@ -40,13 +39,13 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -76,7 +75,34 @@ parameters: description: | Role specific additional hiera configuration to inject into the cluster. type: json - + NetworkDeploymentActions: + type: comma_delimited_list + description: > + Heat action when to apply network configuration changes + default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] + CloudDomain: + default: '' + type: string + description: > + The DNS domain used for the hosts. This should match the dhcp_domain + configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -91,6 +117,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -112,6 +141,11 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::SwiftStorage::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + InternalApiPort: type: OS::TripleO::SwiftStorage::Ports::InternalApiPort properties: @@ -127,27 +161,44 @@ resources: properties: ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + TenantPort: + type: OS::TripleO::SwiftStorage::Ports::TenantPort + properties: + ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::SwiftStorage::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + NetworkConfig: type: OS::TripleO::ObjectStorage::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment properties: config: {get_resource: NetworkConfig} server: {get_resource: SwiftStorage} + actions: {get_param: NetworkDeploymentActions} SwiftStorageHieraConfig: type: OS::Heat::StructuredConfig @@ -207,19 +258,22 @@ resources: swift_min_part_hours: {get_param: MinPartHours} swift_part_power: {get_param: PartPower} swift_replicas: { get_param: Replicas} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} + # Resource for site-specific injection of root certificate + NodeTLSCAData: + depends_on: SwiftStorageHieraDeploy + type: OS::TripleO::NodeTLSCAData + properties: + server: {get_resource: SwiftStorage} + # Hook for site-specific additional pre-deployment config, # applying to all nodes, e.g node registration/unregistration NodeExtraConfig: - depends_on: SwiftStorageHieraDeploy + depends_on: NodeTLSCAData type: OS::TripleO::NodeExtraConfig properties: server: {get_resource: SwiftStorage} @@ -240,9 +294,10 @@ outputs: hosts_entry: value: str_replace: - template: "IP HOST.localdomain HOST" + template: "IP HOST.DOMAIN HOST" params: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} HOST: {get_attr: [SwiftStorage, name]} nova_server_resource: description: Heat resource handle for the swift storage server @@ -255,6 +310,9 @@ outputs: template: 'r1z1-IP:%PORT%/d1' params: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} internal_api_ip_address: description: IP address of the server in the internal_api network value: {get_attr: [InternalApiPort, ip_address]} @@ -264,10 +322,17 @@ outputs: storage_mgmt_ip_address: description: IP address of the server in the storage_mgmt network value: {get_attr: [StorageMgmtPort, ip_address]} + tenant_ip_address: + description: IP address of the server in the tenant network + value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying value: list_join: - ',' - - {get_attr: [SwiftStorageHieraDeploy, deploy_stdout]} + - {get_attr: [NodeTLSCAData, deploy_stdout]} - {get_param: UpdateIdentifier} |