diff options
29 files changed, 155 insertions, 172 deletions
diff --git a/environments/neutron-nuage-config.yaml b/environments/neutron-nuage-config.yaml index 4ba8d9cb..50ba8f53 100644 --- a/environments/neutron-nuage-config.yaml +++ b/environments/neutron-nuage-config.yaml @@ -12,3 +12,4 @@ parameter_defaults: NeutronNuageVSDOrganization: 'organization' NeutronNuageBaseURIVersion: 'default_uri_version' NeutronNuageCMSId: '' + UseForwardedFor: true diff --git a/firstboot/userdata_heat_admin.yaml b/firstboot/userdata_heat_admin.yaml index 73481c63..f8891b29 100644 --- a/firstboot/userdata_heat_admin.yaml +++ b/firstboot/userdata_heat_admin.yaml @@ -1,7 +1,7 @@ heat_template_version: 2014-10-16 parameters: - # Can be overriden via parameter_defaults in the environment + # Can be overridden via parameter_defaults in the environment node_admin_username: type: string default: heat-admin diff --git a/network/external.yaml b/network/external.yaml index e8f92a5e..3b24da7e 100644 --- a/network/external.yaml +++ b/network/external.yaml @@ -15,7 +15,7 @@ parameters: type: json ExternalNetAdminStateUp: default: false - description: This admin state of of the network. + description: This admin state of the network. type: boolean ExternalNetEnableDHCP: default: false diff --git a/network/internal_api.yaml b/network/internal_api.yaml index 69154bef..6f8aa3a8 100644 --- a/network/internal_api.yaml +++ b/network/internal_api.yaml @@ -15,7 +15,7 @@ parameters: type: json InternalApiNetAdminStateUp: default: false - description: This admin state of of the network. + description: This admin state of the network. type: boolean InternalApiNetEnableDHCP: default: false diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml index 3e949f41..ab6b18f8 100644 --- a/network/ports/ctlplane_vip.yaml +++ b/network/ports/ctlplane_vip.yaml @@ -13,7 +13,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string diff --git a/network/ports/external.yaml b/network/ports/external.yaml index 1e2fff68..4180a223 100644 --- a/network/ports/external.yaml +++ b/network/ports/external.yaml @@ -13,7 +13,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string diff --git a/network/ports/internal_api.yaml b/network/ports/internal_api.yaml index d528b327..01cdfe9b 100644 --- a/network/ports/internal_api.yaml +++ b/network/ports/internal_api.yaml @@ -12,7 +12,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string diff --git a/network/ports/noop.yaml b/network/ports/noop.yaml index 31ee6f3c..028624fd 100644 --- a/network/ports/noop.yaml +++ b/network/ports/noop.yaml @@ -16,7 +16,7 @@ parameters: default: '' type: string NetworkName: - description: # Here for compatability with vip.yaml + description: # Here for compatibility with vip.yaml default: '' type: string FixedIPs: diff --git a/network/ports/storage.yaml b/network/ports/storage.yaml index 88fb537c..1d2384c5 100644 --- a/network/ports/storage.yaml +++ b/network/ports/storage.yaml @@ -12,7 +12,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string diff --git a/network/ports/storage_mgmt.yaml b/network/ports/storage_mgmt.yaml index c98a21ef..f10e3582 100644 --- a/network/ports/storage_mgmt.yaml +++ b/network/ports/storage_mgmt.yaml @@ -12,7 +12,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string diff --git a/network/ports/tenant.yaml b/network/ports/tenant.yaml index 94408ca2..ccdc57ee 100644 --- a/network/ports/tenant.yaml +++ b/network/ports/tenant.yaml @@ -12,7 +12,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml index 56efc178..ab6cd2c0 100644 --- a/network/ports/vip.yaml +++ b/network/ports/vip.yaml @@ -13,7 +13,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string diff --git a/network/storage.yaml b/network/storage.yaml index 60b779e0..dc9f35ea 100644 --- a/network/storage.yaml +++ b/network/storage.yaml @@ -15,7 +15,7 @@ parameters: type: json StorageNetAdminStateUp: default: false - description: This admin state of of the network. + description: This admin state of the network. type: boolean StorageNetEnableDHCP: default: false diff --git a/network/storage_mgmt.yaml b/network/storage_mgmt.yaml index 043bc87b..59933c8c 100644 --- a/network/storage_mgmt.yaml +++ b/network/storage_mgmt.yaml @@ -15,7 +15,7 @@ parameters: type: json StorageMgmtNetAdminStateUp: default: false - description: This admin state of of the network. + description: This admin state of the network. type: boolean StorageMgmtNetEnableDHCP: default: false diff --git a/network/tenant.yaml b/network/tenant.yaml index daf5cb75..6fe96121 100644 --- a/network/tenant.yaml +++ b/network/tenant.yaml @@ -15,7 +15,7 @@ parameters: type: json TenantNetAdminStateUp: default: false - description: This admin state of of the network. + description: This admin state of the network. type: boolean TenantNetEnableDHCP: default: false diff --git a/overcloud-resource-registry.yaml b/overcloud-resource-registry.yaml deleted file mode 100644 index 11a33599..00000000 --- a/overcloud-resource-registry.yaml +++ /dev/null @@ -1,81 +0,0 @@ -resource_registry: - OS::TripleO::BlockStorage: os-apply-config/cinder-storage.yaml - OS::TripleO::BlockStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::Compute: os-apply-config/compute.yaml - OS::TripleO::Compute::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment - OS::TripleO::Controller: os-apply-config/controller.yaml - OS::TripleO::Controller::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::ObjectStorage: os-apply-config/swift-storage.yaml - OS::TripleO::ObjectStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::CephStorage: os-apply-config/ceph-storage.yaml - OS::TripleO::CephStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::ControllerPostDeployment: os-apply-config/controller-post.yaml - OS::TripleO::ComputePostDeployment: os-apply-config/compute-post.yaml - OS::TripleO::ObjectStoragePostDeployment: os-apply-config/swift-storage-post.yaml - OS::TripleO::BlockStoragePostDeployment: os-apply-config/cinder-storage-post.yaml - OS::TripleO::CephStoragePostDeployment: os-apply-config/ceph-storage-post.yaml - OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig: os-apply-config/swift-devices-and-proxy-config.yaml - OS::TripleO::CephClusterConfig::SoftwareConfig: os-apply-config/ceph-cluster-config.yaml - OS::TripleO::AllNodes::SoftwareConfig: os-apply-config/all-nodes-config.yaml - OS::TripleO::BootstrapNode::SoftwareConfig: bootstrap-config.yaml - OS::TripleO::NodeUserData: firstboot/userdata_default.yaml - OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml - OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml - - # "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy - # phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when - # configuration with knowledge of all nodes in the cluster is required vs single - # node configuration in the pre_deploy step. - OS::TripleO::AllNodesExtraConfig: extraconfig/all_nodes/default.yaml - - # TripleO overcloud networks - OS::TripleO::Network: network/networks.yaml - OS::TripleO::VipConfig: os-apply-config/vip-config.yaml - - OS::TripleO::Network::External: network/noop.yaml - OS::TripleO::Network::InternalApi: network/noop.yaml - OS::TripleO::Network::StorageMgmt: network/noop.yaml - OS::TripleO::Network::Storage: network/noop.yaml - OS::TripleO::Network::Tenant: network/noop.yaml - - OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml - OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml - OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml - OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml - - # Port assignments for the controller role - OS::TripleO::Controller::Ports::ExternalPort: network/ports/noop.yaml - OS::TripleO::Controller::Ports::InternalApiPort: network/ports/noop.yaml - OS::TripleO::Controller::Ports::StoragePort: network/ports/noop.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: network/ports/noop.yaml - OS::TripleO::Controller::Ports::TenantPort: network/ports/noop.yaml - - # Port assignments for the compute role - OS::TripleO::Compute::Ports::InternalApiPort: network/ports/noop.yaml - OS::TripleO::Compute::Ports::StoragePort: network/ports/noop.yaml - OS::TripleO::Compute::Ports::TenantPort: network/ports/noop.yaml - - # Port assignments for the ceph storage role - OS::TripleO::CephStorage::Ports::StoragePort: network/ports/noop.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: network/ports/noop.yaml - - # Port assignments for the swift storage role - OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml - OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml - OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml - - # Port assignments for the block storage role - OS::TripleO::BlockStorage::Ports::InternalApiPort: network/ports/noop.yaml - OS::TripleO::BlockStorage::Ports::StoragePort: network/ports/noop.yaml - OS::TripleO::BlockStorage::Ports::StorageMgmtPort: network/ports/noop.yaml - - # Port assignments for service virtual IPs for the controller role - OS::TripleO::Controller::Ports::RedisVipPort: network/ports/noop.yaml - - # Service Endpoint Mappings - OS::TripleO::Endpoint: network/endpoints/endpoint.yaml - OS::TripleO::EndpointMap: network/endpoints/endpoint_map.yaml - - # validation resources - OS::TripleO::AllNodes::Validation: os-apply-config/all-nodes-validation.yaml diff --git a/overcloud.yaml b/overcloud.yaml index 82b5f408..a4f4578b 100644 --- a/overcloud.yaml +++ b/overcloud.yaml @@ -93,7 +93,7 @@ parameters: type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string constraints: - custom_constraint: nova.keypair @@ -289,6 +289,12 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json # Controller-specific params AdminToken: @@ -362,6 +368,10 @@ parameters: default: true description: Whether to enable Swift Storage on the Controller type: boolean + ControllerSchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} ExtraConfig: default: {} description: | @@ -431,6 +441,10 @@ parameters: type: string default: '' hidden: true + InstanceNameTemplate: + default: 'instance-%08x' + description: Template string to be used to generate instance names + type: string KeystoneCACertificate: default: '' description: Keystone self-signed certificate authority certificate. @@ -575,6 +589,10 @@ parameters: default: '' description: Libvirt VIF driver configuration for the network type: string + NovaComputeSchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} NovaEnableRbdBackend: default: false description: Whether to enable or not the Rbd backend for Nova @@ -649,6 +667,11 @@ parameters: BlockStorage specific configuration to inject into the cluster. Same structure as ExtraConfig. type: json + BlockStorageSchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} + # Object storage specific parameters ObjectStorageCount: @@ -668,7 +691,10 @@ parameters: ObjectStorage specific configuration to inject into the cluster. Same structure as ExtraConfig. type: json - + ObjectStorageSchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} # Ceph storage specific parameters CephStorageCount: @@ -689,6 +715,11 @@ parameters: CephStorage specific configuration to inject into the cluster. Same structure as ExtraConfig. type: json + CephStorageSchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} + # Hostname format for each role # Note %index% is translated into the index of the node, e.g 0/1/2 etc @@ -844,6 +875,7 @@ resources: HorizonSecret: {get_resource: HorizonSecret} Image: {get_param: controllerImage} ImageUpdatePolicy: {get_param: ImageUpdatePolicy} + InstanceNameTemplate: {get_param: InstanceNameTemplate} KeyName: {get_param: KeyName} KeystoneCACertificate: {get_param: KeystoneCACertificate} KeystoneSigningCertificate: {get_param: KeystoneSigningCertificate} @@ -924,6 +956,8 @@ resources: params: '%stackname%': {get_param: 'OS::stack_name'} NodeIndex: '%index%' + ServerMetadata: {get_param: ServerMetadata} + SchedulerHints: {get_param: ControllerSchedulerHints} Compute: type: OS::Heat::ResourceGroup @@ -998,6 +1032,8 @@ resources: params: '%stackname%': {get_param: 'OS::stack_name'} CloudDomain: {get_param: CloudDomain} + ServerMetadata: {get_param: ServerMetadata} + SchedulerHints: {get_param: NovaComputeSchedulerHints} BlockStorage: type: OS::Heat::ResourceGroup @@ -1036,6 +1072,8 @@ resources: ExtraConfig: {get_param: ExtraConfig} BlockStorageExtraConfig: {get_param: BlockStorageExtraConfig} CloudDomain: {get_param: CloudDomain} + ServerMetadata: {get_param: ServerMetadata} + SchedulerHints: {get_param: BlockStorageSchedulerHints} ObjectStorage: type: OS::Heat::ResourceGroup @@ -1065,6 +1103,8 @@ resources: ExtraConfig: {get_param: ExtraConfig} ObjectStorageExtraConfig: {get_param: ObjectStorageExtraConfig} CloudDomain: {get_param: CloudDomain} + ServerMetadata: {get_param: ServerMetadata} + SchedulerHints: {get_param: ObjectStorageSchedulerHints} CephStorage: type: OS::Heat::ResourceGroup @@ -1089,6 +1129,8 @@ resources: ExtraConfig: {get_param: ExtraConfig} CephStorageExtraConfig: {get_param: CephStorageExtraConfig} CloudDomain: {get_param: CloudDomain} + ServerMetadata: {get_param: ServerMetadata} + SchedulerHints: {get_param: CephStorageSchedulerHints} ControllerIpListMap: type: OS::TripleO::Network::Ports::NetIpListMap @@ -1498,3 +1540,9 @@ outputs: SwiftInternalVip: description: VIP for Swift Proxy internal endpoint value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} + HostsEntry: + description: | + The content that should be appended to your /etc/hosts if you want do get + hostname-based access to the deployed nodes (useful for testing without + setting up a DNS). + value: {get_attr: [allNodesConfig, hosts_entries]} diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index 1147b856..895ddc3d 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -259,3 +259,9 @@ outputs: description: The ID of the allNodesConfigImpl resource. value: {get_resource: allNodesConfigImpl} + hosts_entries: + description: | + The content that should be appended to your /etc/hosts if you want do get + hostname-based access to the deployed nodes (useful for testing without + setting up a DNS). + value: {get_attr: [allNodesConfigImpl, config, hosts]} diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index 1dc20a50..b6a1007a 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -16,7 +16,7 @@ parameters: description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string KeyName: - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string default: default constraints: @@ -65,7 +65,16 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. - + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: CephStorage: @@ -80,6 +89,8 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index f1d25e78..fc197059 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -46,7 +46,7 @@ parameters: - custom_constraint: nova.flavor KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string RabbitPassword: default: 'guest' @@ -118,6 +118,16 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -133,6 +143,8 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: diff --git a/puppet/compute.yaml b/puppet/compute.yaml index c33373d1..247c0326 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -61,7 +61,7 @@ parameters: description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string KeyName: - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string default: default constraints: @@ -296,7 +296,16 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. - + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -314,6 +323,8 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -466,6 +477,7 @@ resources: neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} + keystone_public_api_virtual_ip: {get_input: keystone_vip} admin_password: {get_input: admin_password} ntp::servers: {get_input: ntp_servers} tripleo::packages::enable_install: {get_input: enable_package_install} @@ -557,6 +569,7 @@ resources: neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]} neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]} + keystone_vip: {get_param: KeystonePublicApiVirtualIP} admin_password: {get_param: AdminPassword} rabbit_username: {get_param: RabbitUserName} rabbit_password: {get_param: RabbitPassword} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index 2c1c18a3..5d39462c 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -240,9 +240,13 @@ parameters: default: 'REBUILD_PRESERVE_EPHEMERAL' description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string + InstanceNameTemplate: + default: 'instance-%08x' + description: Template string to be used to generate instance names + type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string constraints: - custom_constraint: nova.keypair @@ -634,6 +638,16 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -649,6 +663,8 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -978,6 +994,7 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/nova' + instance_name_template: {get_param: InstanceNameTemplate} fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} rabbit_username: {get_param: RabbitUserName} @@ -1308,6 +1325,7 @@ resources: nova::database_connection: {get_input: nova_dsn} nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} + nova::api::instance_name_template: {get_input: instance_name_template} nova::network::neutron::neutron_admin_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml index 60f02bf8..8378d2fc 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml @@ -43,6 +43,11 @@ parameters: description: Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD type: string + UseForwardedFor: + description: Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. + type: boolean + default: false + resources: NeutronNuageConfig: type: OS::Heat::StructuredConfig @@ -61,6 +66,7 @@ resources: neutron::plugins::nuage::nuage_vsd_organization: {get_input: NuageVSDOrganization} neutron::plugins::nuage::nuage_base_uri_version: {get_input: NuageBaseURIVersion} neutron::plugins::nuage::nuage_cms_id: {get_input: NuageCMSId} + nova::api::use_forwarded_for: {get_input: NovaUseForwardedFor} NeutronNuageDeployment: type: OS::Heat::StructuredDeployment @@ -76,6 +82,7 @@ resources: NuageVSDOrganization: {get_param: NeutronNuageVSDOrganization} NuageBaseURIVersion: {get_param: NeutronNuageBaseURIVersion} NuageCMSId: {get_param: NeutronNuageCMSId} + NovaUseForwardedFor: {get_param: UseForwardedFor} outputs: deploy_stdout: diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index 95f5ccb8..b4b51abf 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -7,6 +7,7 @@ ceilometer::agent::auth::auth_region: 'regionOne' # changes in the tripleo-incubator keystone role setup ceilometer::agent::auth::auth_tenant_name: 'admin' +nova::api::admin_tenant_name: 'service' nova::network::neutron::neutron_admin_tenant_name: 'service' nova::network::neutron::neutron_admin_username: 'neutron' nova::network::neutron::dhcp_domain: '' diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index f42ddf6c..b0e6ae96 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -30,7 +30,6 @@ redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}" redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh' # service tenant -nova::api::admin_tenant_name: 'service' glance::api::keystone_tenant: 'service' glance::registry::keystone_tenant: 'service' neutron::server::auth_tenant: 'service' diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index a8abbb77..683c1213 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -450,8 +450,6 @@ if hiera('step') >= 3 { include ::ceilometer::api include ::ceilometer::agent::notification include ::ceilometer::agent::central - include ::ceilometer::alarm::notifier - include ::ceilometer::alarm::evaluator include ::ceilometer::expirer include ::ceilometer::collector include ::ceilometer::agent::auth diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 1a66c5ea..6c8530ff 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -534,11 +534,12 @@ if hiera('step') >= 3 { $glance_store = concat($http_store, $backend_store) if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) { + $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"' pacemaker::resource::filesystem { 'glance-fs': device => hiera('glance_file_pcmk_device'), directory => hiera('glance_file_pcmk_directory'), fstype => hiera('glance_file_pcmk_fstype'), - fsoptions => hiera('glance_file_pcmk_options', ''), + fsoptions => join([$secontext, hiera('glance_file_pcmk_options', '')],','), clone_params => '', } } @@ -854,14 +855,6 @@ if hiera('step') >= 3 { manage_service => false, enabled => false, } - class { '::ceilometer::alarm::notifier' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::alarm::evaluator' : - manage_service => false, - enabled => false, - } class { '::ceilometer::collector' : manage_service => false, enabled => false, @@ -1338,12 +1331,6 @@ if hiera('step') >= 4 { pacemaker::resource::service { $::ceilometer::params::api_service_name : clone_params => 'interleave=true', } - pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name : - clone_params => 'interleave=true', - } pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : clone_params => 'interleave=true', } @@ -1418,54 +1405,6 @@ if hiera('step') >= 4 { require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], Pacemaker::Resource::Ocf['delay']], } - pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint': - constraint_type => 'order', - first_resource => 'delay-clone', - second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation': - source => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - target => 'delay-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation': - source => "${::ceilometer::params::alarm_notifier_service_name}-clone", - target => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone", - second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation': - source => "${::ceilometer::params::agent_notification_service_name}-clone", - target => "${::ceilometer::params::alarm_notifier_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } if downcase(hiera('ceilometer_backend')) == 'mongodb' { pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': constraint_type => 'order', diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp index 4296208b..2d880d33 100644 --- a/puppet/manifests/ringbuilder.pp +++ b/puppet/manifests/ringbuilder.pp @@ -70,7 +70,7 @@ class tripleo::ringbuilder ( # create local rings swift::ringbuilder::create{ ['object', 'account', 'container']: part_power => $part_power, - replicas => $replicas, + replicas => min(count($device_array), $replicas), min_part_hours => $min_part_hours, } -> diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index fbb2b878..721dcba4 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -17,7 +17,7 @@ parameters: type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string MountCheck: default: 'false' @@ -88,7 +88,16 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. - + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -103,6 +112,8 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: |