aboutsummaryrefslogtreecommitdiffstats
path: root/puppet
diff options
context:
space:
mode:
Diffstat (limited to 'puppet')
-rw-r--r--puppet/all-nodes-config.yaml13
-rw-r--r--puppet/ceph-storage-post.yaml1
-rw-r--r--puppet/ceph-storage.yaml60
-rw-r--r--puppet/cinder-storage-post.yaml9
-rw-r--r--puppet/cinder-storage.yaml51
-rw-r--r--puppet/compute-post.yaml1
-rw-r--r--puppet/compute.yaml74
-rw-r--r--puppet/controller-post.yaml6
-rw-r--r--puppet/controller.yaml238
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml119
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml8
-rw-r--r--puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml1
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml87
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml86
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml1
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml1
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml1
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml1
-rw-r--r--puppet/extraconfig/pre_deploy/per_node.yaml1
-rw-r--r--puppet/extraconfig/tls/ca-inject.yaml1
-rw-r--r--puppet/extraconfig/tls/tls-cert-inject.yaml1
-rw-r--r--puppet/hieradata/controller.yaml11
-rw-r--r--puppet/hieradata/database.yaml7
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp2
-rw-r--r--puppet/manifests/overcloud_compute.pp25
-rw-r--r--puppet/manifests/overcloud_controller.pp197
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp252
-rw-r--r--puppet/manifests/overcloud_object.pp2
-rw-r--r--puppet/manifests/overcloud_volume.pp2
-rw-r--r--puppet/swift-storage-post.yaml2
-rw-r--r--puppet/swift-storage.yaml48
-rw-r--r--puppet/vip-config.yaml1
32 files changed, 1192 insertions, 118 deletions
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 895ddc3d..3dd3d5c9 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -50,6 +50,8 @@ parameters:
type: comma_delimited_list
keystone_admin_api_node_ips:
type: comma_delimited_list
+ sahara_api_node_ips:
+ type: comma_delimited_list
DeployIdentifier:
type: string
@@ -241,6 +243,14 @@ resources:
list_join:
- "','"
- {get_param: keystone_admin_api_node_ips}
+ sahara_api_node_ips:
+ str_replace:
+ template: "['SERVERS_LIST']"
+ params:
+ SERVERS_LIST:
+ list_join:
+ - "','"
+ - {get_param: sahara_api_node_ips}
# NOTE(gfidente): interpolation with %{} in the
# hieradata file can't be used as it returns string
@@ -250,6 +260,7 @@ resources:
neutron::rabbit_hosts: *rabbit_nodes_array
nova::rabbit_hosts: *rabbit_nodes_array
keystone::rabbit_hosts: *rabbit_nodes_array
+ sahara::rabbit_hosts: *rabbit_nodes_array
deploy_identifier: {get_param: DeployIdentifier}
update_identifier: {get_param: UpdateIdentifier}
@@ -261,7 +272,7 @@ outputs:
{get_resource: allNodesConfigImpl}
hosts_entries:
description: |
- The content that should be appended to your /etc/hosts if you want do get
+ The content that should be appended to your /etc/hosts if you want to get
hostname-based access to the deployed nodes (useful for testing without
setting up a DNS).
value: {get_attr: [allNodesConfigImpl, config, hosts]}
diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml
index 0f7dd36f..f9c53465 100644
--- a/puppet/ceph-storage-post.yaml
+++ b/puppet/ceph-storage-post.yaml
@@ -30,6 +30,7 @@ resources:
CephStorageDeployment_Step1:
type: OS::Heat::StructuredDeployments
properties:
+ name: CephStorageDeployment_Step1
servers: {get_param: servers}
config: {get_resource: CephStoragePuppetConfig}
input_values:
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index b6a1007a..e310e1f5 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -34,6 +34,10 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ TimeZone:
+ default: 'UTC'
+ description: The timezone to be set on Ceph nodes.
+ type: string
UpdateIdentifier:
default: ''
type: string
@@ -59,6 +63,13 @@ parameters:
description: >
Heat action when to apply network configuration changes
default: ['CREATE']
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
default: ''
type: string
@@ -89,6 +100,7 @@ resources:
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
name: {get_param: Hostname}
+ software_config_transport: {get_param: SoftwareConfigTransport}
metadata: {get_param: ServerMetadata}
scheduler_hints: {get_param: SchedulerHints}
@@ -112,6 +124,16 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ ExternalPort:
+ type: OS::TripleO::CephStorage::Ports::ExternalPort
+ properties:
+ ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
+ InternalApiPort:
+ type: OS::TripleO::CephStorage::Ports::InternalApiPort
+ properties:
+ ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
StoragePort:
type: OS::TripleO::CephStorage::Ports::StoragePort
properties:
@@ -122,30 +144,53 @@ resources:
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ TenantPort:
+ type: OS::TripleO::CephStorage::Ports::TenantPort
+ properties:
+ ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
+ ManagementPort:
+ type: OS::TripleO::CephStorage::Ports::ManagementPort
+ properties:
+ ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
NetworkConfig:
type: OS::TripleO::CephStorage::Net::SoftwareConfig
properties:
ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
properties:
ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
StorageIp: {get_attr: [StoragePort, ip_address]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ TenantIp: {get_attr: [TenantPort, ip_address]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
NetIpSubnetMap:
type: OS::TripleO::Network::Ports::NetIpSubnetMap
properties:
ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
properties:
+ name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: CephStorage}
actions: {get_param: NetworkDeploymentActions}
@@ -154,10 +199,12 @@ resources:
type: OS::Heat::StructuredDeployment
depends_on: NetworkDeployment
properties:
+ name: CephStorageDeployment
config: {get_resource: CephStorageConfig}
server: {get_resource: CephStorage}
input_values:
ntp_servers: {get_param: NtpServer}
+ timezone: {get_param: TimeZone}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]}
@@ -189,6 +236,7 @@ resources:
raw_data: {get_file: hieradata/ceph.yaml}
mapped_data:
ntp::servers: {get_input: ntp_servers}
+ timezone::timezone: {get_input: timezone}
tripleo::packages::enable_install: {get_input: enable_package_install}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
ceph::profile::params::cluster_network: {get_input: ceph_cluster_network}
@@ -241,12 +289,24 @@ outputs:
description: Heat resource handle for the ceph storage server
value:
{get_resource: CephStorage}
+ external_ip_address:
+ description: IP address of the server in the external network
+ value: {get_attr: [ExternalPort, ip_address]}
+ internal_api_ip_address:
+ description: IP address of the server in the internal_api network
+ value: {get_attr: [InternalApiPort, ip_address]}
storage_ip_address:
description: IP address of the server in the storage network
value: {get_attr: [StoragePort, ip_address]}
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ tenant_ip_address:
+ description: IP address of the server in the tenant network
+ value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
config_identifier:
description: identifier which changes if the node configuration may need re-applying
value:
diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml
index c97cfcf9..9b7c752a 100644
--- a/puppet/cinder-storage-post.yaml
+++ b/puppet/cinder-storage-post.yaml
@@ -28,5 +28,14 @@ resources:
VolumeDeployment_Step1:
type: OS::Heat::StructuredDeployments
properties:
+ name: VolumeDeployment_Step1
servers: {get_param: servers}
config: {get_resource: VolumePuppetConfig}
+
+ # Note, this should come last, so use depends_on to ensure
+ # this is created after any other resources.
+ ExtraConfig:
+ depends_on: VolumeDeployment_Step1
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: servers}
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index fc197059..f7e8f907 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -17,7 +17,6 @@ parameters:
description: The size of the loopback file used by the cinder LVM driver.
type: number
CinderPassword:
- default: unset
description: The password for the cinder service and db account, used by cinder-api.
type: string
hidden: true
@@ -70,7 +69,6 @@ parameters:
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
type: string
SnmpdReadonlyUserPassword:
- default: unset
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
@@ -101,6 +99,10 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ TimeZone:
+ default: 'UTC'
+ description: The timezone to be set on Cinder nodes.
+ type: string
GlanceApiVirtualIP:
type: string
default: ''
@@ -112,6 +114,13 @@ parameters:
description: >
Heat action when to apply network configuration changes
default: ['CREATE']
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
default: ''
type: string
@@ -143,6 +152,7 @@ resources:
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
name: {get_param: Hostname}
+ software_config_transport: {get_param: SoftwareConfigTransport}
metadata: {get_param: ServerMetadata}
scheduler_hints: {get_param: SchedulerHints}
@@ -166,6 +176,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ ExternalPort:
+ type: OS::TripleO::BlockStorage::Ports::ExternalPort
+ properties:
+ ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+
InternalApiPort:
type: OS::TripleO::BlockStorage::Ports::InternalApiPort
properties:
@@ -181,25 +196,42 @@ resources:
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ TenantPort:
+ type: OS::TripleO::BlockStorage::Ports::TenantPort
+ properties:
+ ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+
+ ManagementPort:
+ type: OS::TripleO::BlockStorage::Ports::ManagementPort
+ properties:
+ ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+
NetworkConfig:
type: OS::TripleO::BlockStorage::Net::SoftwareConfig
properties:
ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
properties:
ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
StorageIp: {get_attr: [StoragePort, ip_address]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ TenantIp: {get_attr: [TenantPort, ip_address]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
properties:
+ name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: BlockStorage}
actions: {get_param: NetworkDeploymentActions}
@@ -208,11 +240,12 @@ resources:
type: OS::Heat::StructuredDeployment
depends_on: NetworkDeployment
properties:
+ name: BlockStorageDeployment
server: {get_resource: BlockStorage}
config: {get_resource: BlockStorageConfig}
input_values:
debug: {get_param: Debug}
- cinder_dsn: {list_join: ['', ['mysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIP} , '/cinder']]}
+ cinder_dsn: {list_join: ['', ['mysql+pymysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIP} , '/cinder']]}
snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
cinder_lvm_loop_device_size:
@@ -229,6 +262,7 @@ resources:
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
rabbit_client_port: {get_param: RabbitClientPort}
ntp_servers: {get_param: NtpServer}
+ timezone: {get_param: TimeZone}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
@@ -271,6 +305,7 @@ resources:
cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address}
cinder::glance::glance_api_servers: {get_input: glance_api_servers}
ntp::servers: {get_input: ntp_servers}
+ timezone::timezone: {get_input: timezone}
tripleo::packages::enable_install: {get_input: enable_package_install}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
@@ -297,6 +332,7 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
properties:
+ name: UpdateDeployment
config: {get_resource: UpdateConfig}
server: {get_resource: BlockStorage}
input_values:
@@ -316,6 +352,9 @@ outputs:
description: Heat resource handle for the block storage server
value:
{get_resource: BlockStorage}
+ external_ip_address:
+ description: IP address of the server in the external network
+ value: {get_attr: [ExternalPort, ip_address]}
internal_api_ip_address:
description: IP address of the server in the internal_api network
value: {get_attr: [InternalApiPort, ip_address]}
@@ -325,6 +364,12 @@ outputs:
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ tenant_ip_address:
+ description: IP address of the server in the tenant network
+ value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
config_identifier:
description: identifier which changes if the node configuration may need re-applying
value:
diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml
index b63b06b4..3861e50c 100644
--- a/puppet/compute-post.yaml
+++ b/puppet/compute-post.yaml
@@ -31,6 +31,7 @@ resources:
ComputePuppetDeployment:
type: OS::Heat::StructuredDeployments
properties:
+ name: ComputePuppetDeployment
servers: {get_param: servers}
config: {get_resource: ComputePuppetConfig}
input_values:
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index 6498b485..8b2bcd33 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -5,7 +5,6 @@ description: >
parameters:
AdminPassword:
- default: unset
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
@@ -16,12 +15,10 @@ parameters:
constraints:
- allowed_values: ['', Present]
CeilometerMeteringSecret:
- default: unset
description: Secret shared by the ceilometer services.
type: string
hidden: true
CeilometerPassword:
- default: unset
description: The password for the ceilometer service account.
type: string
hidden: true
@@ -103,14 +100,13 @@ parameters:
description: The tenant network type for Neutron.
default: 'vxlan'
NeutronNetworkVLANRanges:
- default: 'datacentre'
+ default: 'datacentre:1:1000'
description: >
The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
Neutron documentation for permitted values. Defaults to permitting any
VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
type: comma_delimited_list
NeutronPassword:
- default: unset
description: The password for the neutron service account, used by neutron agents.
type: string
hidden: true
@@ -146,7 +142,6 @@ parameters:
default: 'False'
type: string
NeutronMetadataProxySharedSecret:
- default: 'unset'
description: Shared secret to prevent spoofing
type: string
hidden: true
@@ -157,7 +152,7 @@ parameters:
from neutron.core_plugins namespace.
type: string
NeutronServicePlugins:
- default: "router"
+ default: "router,qos"
description: |
Comma-separated list of service plugin entrypoints to be loaded from the
neutron.service_plugins namespace.
@@ -172,6 +167,11 @@ parameters:
description: |
The mechanism drivers for the Neutron tenant network.
type: comma_delimited_list
+ NeutronAgentExtensions:
+ default: "qos"
+ description: |
+ Comma-separated list of extensions enabled for the Neutron agents.
+ type: comma_delimited_list
# Not relevant for Computes, should be removed
NeutronAllowL3AgentFailover:
default: 'True'
@@ -200,7 +200,7 @@ parameters:
type: json
NovaComputeLibvirtType:
type: string
- default: ''
+ default: kvm
NovaComputeLibvirtVifDriver:
default: ''
description: Libvirt VIF driver configuration for the network
@@ -210,7 +210,6 @@ parameters:
description: Whether to enable or not the Rbd backend for Nova
type: boolean
NovaPassword:
- default: unset
description: The password for the nova service account, used by nova-api.
type: string
hidden: true
@@ -256,7 +255,6 @@ parameters:
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
type: string
SnmpdReadonlyUserPassword:
- default: unset
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
@@ -274,6 +272,10 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ TimeZone:
+ default: 'UTC'
+ description: The timezone to be set on compute nodes.
+ type: string
UpdateIdentifier:
default: ''
type: string
@@ -288,6 +290,13 @@ parameters:
description: >
Heat action when to apply network configuration changes
default: ['CREATE']
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
default: ''
type: string
@@ -321,6 +330,7 @@ resources:
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
name: {get_param: Hostname}
+ software_config_transport: {get_param: SoftwareConfigTransport}
metadata: {get_param: ServerMetadata}
scheduler_hints: {get_param: SchedulerHints}
@@ -344,6 +354,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ ExternalPort:
+ type: OS::TripleO::Compute::Ports::ExternalPort
+ properties:
+ ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+
InternalApiPort:
type: OS::TripleO::Compute::Ports::InternalApiPort
properties:
@@ -354,30 +369,47 @@ resources:
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ StorageMgmtPort:
+ type: OS::TripleO::Compute::Ports::StorageMgmtPort
+ properties:
+ ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+
TenantPort:
type: OS::TripleO::Compute::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ ManagementPort:
+ type: OS::TripleO::Compute::Ports::ManagementPort
+ properties:
+ ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
properties:
ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
NetworkConfig:
type: OS::TripleO::Compute::Net::SoftwareConfig
properties:
ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
+ StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
properties:
+ name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: NovaCompute}
actions: {get_param: NetworkDeploymentActions}
@@ -404,6 +436,7 @@ resources:
- common
- cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre
- nova_nuage_data # Optionally provided by ComputeExtraConfigPre
+ - midonet_data # Optionally provided by AllNodesExtraConfig
datafiles:
compute_extraconfig:
mapped_data: {get_param: NovaComputeExtraConfig}
@@ -455,7 +488,8 @@ resources:
neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types}
- neutron::agents::ml2::ovs:tunnel_types: {get_input: neutron_tunnel_types}
+ neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types}
+ neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions}
neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges}
@@ -478,6 +512,7 @@ resources:
keystone_public_api_virtual_ip: {get_input: keystone_vip}
admin_password: {get_input: admin_password}
ntp::servers: {get_input: ntp_servers}
+ timezone::timezone: {get_input: timezone}
tripleo::packages::enable_install: {get_input: enable_package_install}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
@@ -485,6 +520,7 @@ resources:
type: OS::TripleO::SoftwareDeployment
depends_on: NetworkDeployment
properties:
+ name: NovaComputeDeployment
config: {get_resource: NovaComputeConfig}
server: {get_resource: NovaCompute}
input_values:
@@ -569,6 +605,11 @@ resources:
template: MECHANISMS
params:
MECHANISMS: {get_param: NeutronMechanismDrivers}
+ neutron_agent_extensions:
+ str_replace:
+ template: AGENT_EXTENSIONS
+ params:
+ AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions}
neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]}
neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]}
@@ -579,6 +620,7 @@ resources:
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
rabbit_client_port: {get_param: RabbitClientPort}
ntp_servers: {get_param: NtpServer}
+ timezone: {get_param: TimeZone}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
@@ -610,6 +652,7 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
properties:
+ name: UpdateDeployment
config: {get_resource: UpdateConfig}
server: {get_resource: NovaCompute}
input_values:
@@ -620,15 +663,24 @@ outputs:
ip_address:
description: IP address of the server in the ctlplane network
value: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ external_ip_address:
+ description: IP address of the server in the external network
+ value: {get_attr: [ExternalPort, ip_address]}
internal_api_ip_address:
description: IP address of the server in the internal_api network
value: {get_attr: [InternalApiPort, ip_address]}
storage_ip_address:
description: IP address of the server in the storage network
value: {get_attr: [StoragePort, ip_address]}
+ storage_mgmt_ip_address:
+ description: IP address of the server in the storage_mgmt network
+ value: {get_attr: [StorageMgmtPort, ip_address]}
tenant_ip_address:
description: IP address of the server in the tenant network
value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
hostname:
description: Hostname of the server
value: {get_attr: [NovaCompute, name]}
diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml
index ed8129e7..d250dd70 100644
--- a/puppet/controller-post.yaml
+++ b/puppet/controller-post.yaml
@@ -35,6 +35,7 @@ resources:
type: OS::Heat::StructuredDeployments
depends_on: ControllerPrePuppet
properties:
+ name: ControllerLoadBalancerDeployment_Step1
servers: {get_param: servers}
config: {get_resource: ControllerPuppetConfig}
input_values:
@@ -46,6 +47,7 @@ resources:
type: OS::Heat::StructuredDeployments
depends_on: ControllerLoadBalancerDeployment_Step1
properties:
+ name: ControllerServicesBaseDeployment_Step2
servers: {get_param: servers}
config: {get_resource: ControllerPuppetConfig}
input_values:
@@ -71,6 +73,7 @@ resources:
type: OS::Heat::StructuredDeployments
depends_on: ControllerServicesBaseDeployment_Step2
properties:
+ name: ControllerRingbuilderDeployment_Step3
servers: {get_param: servers}
config: {get_resource: ControllerRingbuilderPuppetConfig}
input_values:
@@ -80,6 +83,7 @@ resources:
type: OS::Heat::StructuredDeployments
depends_on: ControllerRingbuilderDeployment_Step3
properties:
+ name: ControllerOvercloudServicesDeployment_Step4
servers: {get_param: servers}
config: {get_resource: ControllerPuppetConfig}
input_values:
@@ -90,6 +94,7 @@ resources:
type: OS::Heat::StructuredDeployments
depends_on: ControllerOvercloudServicesDeployment_Step4
properties:
+ name: ControllerOvercloudServicesDeployment_Step5
servers: {get_param: servers}
config: {get_resource: ControllerPuppetConfig}
input_values:
@@ -100,6 +105,7 @@ resources:
type: OS::Heat::StructuredDeployments
depends_on: ControllerOvercloudServicesDeployment_Step5
properties:
+ name: ControllerOvercloudServicesDeployment_Step6
servers: {get_param: servers}
config: {get_resource: ControllerPuppetConfig}
input_values:
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index d139003b..ea19c711 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -10,12 +10,10 @@ parameters:
type: string
hidden: true
AdminPassword:
- default: unset
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
AdminToken:
- default: unset
description: The keystone auth secret and db password.
type: string
hidden: true
@@ -27,18 +25,25 @@ parameters:
description: The ceilometer backend type.
type: string
CeilometerMeteringSecret:
- default: unset
description: Secret shared by the ceilometer services.
type: string
hidden: true
CeilometerPassword:
- default: unset
description: The password for the ceilometer service and db account.
type: string
hidden: true
CinderApiVirtualIP:
type: string
default: ''
+ CeilometerWorkers:
+ default: 0
+ description: Number of workers for Ceilometer service.
+ type: number
+ CinderEnableDBPurge:
+ default: true
+ description: |
+ Whether to create cron job for purging soft deleted rows in Cinder database.
+ type: boolean
CinderEnableNfsBackend:
default: false
description: Whether to enable or not the NFS backend for Cinder
@@ -72,7 +77,6 @@ parameters:
CinderEnableNfsBackend is true.
type: comma_delimited_list
CinderPassword:
- default: unset
description: The password for the cinder service and db account, used by cinder-api.
type: string
hidden: true
@@ -81,15 +85,24 @@ parameters:
description: Contains parameters to configure Cinder backends. Typically
set via parameter_defaults in the resource registry.
type: json
- CloudName:
- default: ''
- description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
- type: string
+ CinderWorkers:
+ default: 0
+ description: Number of workers for Cinder service.
+ type: number
ControllerExtraConfig:
default: {}
description: |
Controller specific hiera configuration data to inject into the cluster.
type: json
+ ControllerIPs:
+ default: {}
+ description: >
+ A network mapped list of IPs to assign to Controllers in the following form:
+ {
+ "internal_api": ["a.b.c.d", "e.f.g.h"],
+ ...
+ }
+ type: json
ControlVirtualInterface:
default: 'br-ex'
description: Interface where virtual ip will be assigned.
@@ -170,7 +183,6 @@ parameters:
type: string
default: ''
GlancePassword:
- default: unset
description: The password for the glance service and db account, used by the glance services.
type: string
hidden: true
@@ -209,15 +221,17 @@ parameters:
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
+ GlanceWorkers:
+ default: 0
+ description: Number of workers for Glance service.
+ type: number
HeatPassword:
- default: unset
description: The password for the Heat service and db account, used by the Heat services.
type: string
hidden: true
HeatStackDomainAdminPassword:
description: Password for heat_domain_admin user.
type: string
- default: ''
hidden: true
HeatAuthEncryptionKey:
description: Auth encryption key for heat-engine
@@ -227,6 +241,10 @@ parameters:
default: '*'
description: A list of IP/Hostname allowed to connect to horizon
type: comma_delimited_list
+ HeatWorkers:
+ default: 0
+ description: Number of workers for Heat service.
+ type: number
HorizonSecret:
description: Secret key for Django
type: string
@@ -254,6 +272,11 @@ parameters:
default: ''
description: Keystone self-signed certificate authority certificate.
type: string
+ KeystoneEnableDBPurge:
+ default: true
+ description: |
+ Whether to create cron job for purging soft deleted rows in Keystone database.
+ type: boolean
KeystoneSigningCertificate:
default: ''
description: Keystone certificate for verifying token validity.
@@ -294,6 +317,18 @@ parameters:
default: false
description: Whether IPtables rules should be purged before setting up the new ones.
type: boolean
+ KeystoneWorkers:
+ default: 0
+ description: Number of workers for Keystone service.
+ type: number
+ SaharaApiVirtualIP:
+ type: string
+ default: ''
+ SaharaPassword:
+ default: unset
+ description: The password for the sahara service account, used by sahara-api.
+ type: string
+ hidden: true
MysqlClusterUniquePart:
description: A unique identifier of the MySQL cluster the controller is in.
type: string
@@ -367,7 +402,6 @@ parameters:
description: Whether to configure Neutron Distributed Virtual Routers
type: string
NeutronMetadataProxySharedSecret:
- default: 'unset'
description: Shared secret to prevent spoofing
type: string
hidden: true
@@ -378,7 +412,7 @@ parameters:
from neutron.core_plugins namespace.
type: string
NeutronServicePlugins:
- default: "router"
+ default: "router,qos"
description: |
Comma-separated list of service plugin entrypoints to be loaded from the
neutron.service_plugins namespace.
@@ -422,14 +456,13 @@ parameters:
description: The tenant network type for Neutron.
type: comma_delimited_list
NeutronNetworkVLANRanges:
- default: 'datacentre'
+ default: 'datacentre:1:1000'
description: >
The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
Neutron documentation for permitted values. Defaults to permitting any
VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
type: comma_delimited_list
NeutronPassword:
- default: unset
description: The password for the neutron service and db account, used by neutron agents.
type: string
hidden: true
@@ -476,14 +509,36 @@ parameters:
of VXLAN VNI IDs that are available for tenant network allocation
default: ["1:4094", ]
type: comma_delimited_list
+ NeutronPluginExtensions:
+ default: "qos"
+ description: |
+ Comma-separated list of extensions enabled for the Neutron plugin.
+ type: comma_delimited_list
+ NeutronAgentExtensions:
+ default: "qos"
+ description: |
+ Comma-separated list of extensions enabled for the Neutron agents.
+ type: comma_delimited_list
NovaApiVirtualIP:
type: string
default: ''
+ NeutronWorkers:
+ default: 0
+ description: Number of workers for Neutron service.
+ type: number
+ NovaEnableDBPurge:
+ default: true
+ description: |
+ Whether to create cron job for purging soft deleted rows in Nova database.
+ type: boolean
NovaPassword:
- default: unset
description: The password for the nova service and db account, used by nova-api.
type: string
hidden: true
+ NovaWorkers:
+ default: 0
+ description: Number of workers for Nova service.
+ type: number
MongoDbNoJournal:
default: false
description: Should MongoDb journaling be disabled
@@ -540,12 +595,10 @@ parameters:
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
type: string
SnmpdReadonlyUserPassword:
- default: unset
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
SwiftHashSuffix:
- default: unset
description: A random string to be used as a salt when hashing to determine mappings
in the ring.
hidden: true
@@ -563,7 +616,6 @@ parameters:
description: Partition Power to use when building Swift rings
type: number
SwiftPassword:
- default: unset
description: The password for the swift service account, used by the swift proxy
services.
hidden: true
@@ -575,6 +627,14 @@ parameters:
type: number
default: 3
description: How many replicas to use in the swift rings.
+ SwiftWorkers:
+ default: 0
+ description: Number of workers for Swift service.
+ type: number
+ TimeZone:
+ default: 'UTC'
+ description: The timezone to be set on controller nodes.
+ type: string
VirtualIP: # DEPRECATED: use per service settings instead
type: string
default: '' # Has to be here because of the ignored empty value bug
@@ -630,6 +690,13 @@ parameters:
NodeIndex:
type: number
default: 0
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
default: ''
type: string
@@ -661,6 +728,7 @@ resources:
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
name: {get_param: Hostname}
+ software_config_transport: {get_param: SoftwareConfigTransport}
metadata: {get_param: ServerMetadata}
scheduler_hints: {get_param: SchedulerHints}
@@ -687,26 +755,41 @@ resources:
ExternalPort:
type: OS::TripleO::Controller::Ports::ExternalPort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
InternalApiPort:
type: OS::TripleO::Controller::Ports::InternalApiPort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
StoragePort:
type: OS::TripleO::Controller::Ports::StoragePort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
StorageMgmtPort:
type: OS::TripleO::Controller::Ports::StorageMgmtPort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
TenantPort:
type: OS::TripleO::Controller::Ports::TenantPort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
+ ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
+
+ ManagementPort:
+ type: OS::TripleO::Controller::Ports::ManagementPort
+ properties:
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
NetIpMap:
@@ -718,6 +801,7 @@ resources:
StorageIp: {get_attr: [StoragePort, ip_address]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
NetIpSubnetMap:
type: OS::TripleO::Network::Ports::NetIpSubnetMap
@@ -728,6 +812,7 @@ resources:
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetworkConfig:
type: OS::TripleO::Controller::Net::SoftwareConfig
@@ -738,10 +823,12 @@ resources:
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
properties:
+ name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: Controller}
actions: {get_param: NetworkDeploymentActions}
@@ -764,14 +851,24 @@ resources:
server: {get_resource: Controller}
NodeIndex: {get_param: NodeIndex}
+
ControllerDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: NetworkDeployment
properties:
+ name: ControllerDeployment
config: {get_resource: ControllerConfig}
server: {get_resource: Controller}
input_values:
bootstack_nodeid: {get_attr: [Controller, name]}
+ ceilometer_workers: {get_param: CeilometerWorkers}
+ cinder_workers: {get_param: CinderWorkers}
+ glance_workers: {get_param: GlanceWorkers}
+ heat_workers: {get_param: HeatWorkers}
+ keystone_workers: {get_param: KeystoneWorkers}
+ nova_workers: {get_param: NovaWorkers}
+ neutron_workers: {get_param: NeutronWorkers}
+ swift_workers: {get_param: SwiftWorkers}
neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
@@ -802,6 +899,7 @@ resources:
admin_token: {get_param: AdminToken}
neutron_public_interface_ip: {get_param: NeutronPublicInterfaceIP}
debug: {get_param: Debug}
+ cinder_enable_db_purge: {get_param: CinderEnableDBPurge}
cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend}
cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
@@ -818,7 +916,7 @@ resources:
cinder_dsn:
list_join:
- ''
- - - 'mysql://cinder:'
+ - - 'mysql+pymysql://cinder:'
- {get_param: CinderPassword}
- '@'
- {get_param: MysqlVirtualIP}
@@ -835,7 +933,7 @@ resources:
glance_dsn:
list_join:
- ''
- - - 'mysql://glance:'
+ - - 'mysql+pymysql://glance:'
- {get_param: GlancePassword}
- '@'
- {get_param: MysqlVirtualIP}
@@ -845,7 +943,7 @@ resources:
heat_dsn:
list_join:
- ''
- - - 'mysql://heat:'
+ - - 'mysql+pymysql://heat:'
- {get_param: HeatPassword}
- '@'
- {get_param: MysqlVirtualIP}
@@ -857,10 +955,11 @@ resources:
keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
keystone_notification_driver: {get_param: KeystoneNotificationDriver}
keystone_notification_format: {get_param: KeystoneNotificationFormat}
+ keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
keystone_dsn:
list_join:
- ''
- - - 'mysql://keystone:'
+ - - 'mysql+pymysql://keystone:'
- {get_param: AdminToken}
- '@'
- {get_param: MysqlVirtualIP}
@@ -951,12 +1050,22 @@ resources:
template: TYPES
params:
TYPES: {get_param: NeutronTunnelTypes}
+ neutron_plugin_extensions:
+ str_replace:
+ template: PLUGIN_EXTENSIONS
+ params:
+ PLUGIN_EXTENSIONS: {get_param: NeutronPluginExtensions}
+ neutron_agent_extensions:
+ str_replace:
+ template: AGENT_EXTENSIONS
+ params:
+ AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions}
neutron_password: {get_param: NeutronPassword}
neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions}
neutron_dsn:
list_join:
- ''
- - - 'mysql://neutron:'
+ - - 'mysql+pymysql://neutron:'
- {get_param: NeutronPassword}
- '@'
- {get_param: MysqlVirtualIP}
@@ -964,7 +1073,7 @@ resources:
neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] }
neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] }
neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] }
- neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri ] }
+ neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri_no_suffix ] }
nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] }
ceilometer_backend: {get_param: CeilometerBackend}
ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
@@ -978,18 +1087,19 @@ resources:
ceilometer_dsn:
list_join:
- ''
- - - 'mysql://ceilometer:'
+ - - 'mysql+pymysql://ceilometer:'
- {get_param: CeilometerPassword}
- '@'
- {get_param: MysqlVirtualIP}
- '/ceilometer'
snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
+ nova_enable_db_purge: {get_param: NovaEnableDBPurge}
nova_password: {get_param: NovaPassword}
nova_dsn:
list_join:
- ''
- - - 'mysql://nova:'
+ - - 'mysql+pymysql://nova:'
- {get_param: NovaPassword}
- '@'
- {get_param: MysqlVirtualIP}
@@ -1012,6 +1122,7 @@ resources:
params:
LIMIT: {get_param: RabbitFDLimit}
ntp_servers: {get_param: NtpServer}
+ timezone: {get_param: TimeZone}
control_virtual_interface: {get_param: ControlVirtualInterface}
public_virtual_interface: {get_param: PublicVirtualInterface}
swift_hash_suffix: {get_param: SwiftHashSuffix}
@@ -1022,6 +1133,15 @@ resources:
swift_mount_check: {get_param: SwiftMountCheck}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+ sahara_password: {get_param: SaharaPassword}
+ sahara_dsn:
+ list_join:
+ - ''
+ - - 'mysql://sahara:'
+ - {get_param: SaharaPassword}
+ - '@'
+ - {get_param: MysqlVirtualIP}
+ - '/sahara'
swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
cinder_iscsi_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]}
@@ -1044,6 +1164,7 @@ resources:
rabbitmq_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
redis_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]}
redis_vip: {get_param: RedisVirtualIP}
+ sahara_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
mysql_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
mysql_virtual_ip: {get_param: MysqlVirtualIP}
@@ -1074,11 +1195,14 @@ resources:
- vip_data # provided by vip-config
- '"%{::osfamily}"'
- common
+ - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre
- cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
+ - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre
- neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
- neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
- cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
- neutron_nuage_data # Optionally provided by ControllerExtraConfigPre
+ - midonet_data #Optionally provided by AllNodesExtraConfig
datafiles:
controller_extraconfig:
mapped_data: {get_param: ControllerExtraConfig}
@@ -1114,6 +1238,7 @@ resources:
swift::storage::all::storage_local_net_ip: {get_input: swift_management_network}
swift::swift_hash_suffix: {get_input: swift_hash_suffix}
swift::proxy::authtoken::admin_password: {get_input: swift_password}
+ swift::proxy::workers: {get_input: swift_workers}
tripleo::ringbuilder::part_power: {get_input: swift_part_power}
tripleo::ringbuilder::replicas: {get_input: swift_replicas}
tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours}
@@ -1124,6 +1249,7 @@ resources:
tripleo::ringbuilder::build_ring: True
# Cinder
+ cinder_enable_db_purge: {get_input: cinder_enable_db_purge}
cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend}
cinder_enable_rbd_backend: {get_input: cinder_enable_rbd_backend}
cinder_nfs_mount_options: {get_input: cinder_nfs_mount_options}
@@ -1154,6 +1280,7 @@ resources:
glance::api::registry_host: {get_input: glance_registry_host}
glance::api::keystone_password: {get_input: glance_password}
glance::api::debug: {get_input: debug}
+ glance::api::workers: {get_input: glance_workers}
glance_notifier_strategy: {get_input: glance_notifier_strategy}
glance_log_file: {get_input: glance_log_file}
glance_log_file: {get_input: glance_log_file}
@@ -1165,6 +1292,7 @@ resources:
glance::registry::identity_uri: {get_input: keystone_identity_uri}
glance::registry::debug: {get_input: debug}
glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_uri}
+ glance::registry::workers: {get_input: glance_workers}
glance::backend::swift::swift_store_user: service:glance
glance::backend::swift::swift_store_key: {get_input: glance_password}
glance_backend: {get_input: glance_backend}
@@ -1189,8 +1317,11 @@ resources:
heat::identity_uri: {get_input: keystone_identity_uri}
heat::keystone_password: {get_input: heat_password}
heat::api::bind_host: {get_input: heat_api_network}
+ heat::api::workers: {get_input: heat_workers}
heat::api_cloudwatch::bind_host: {get_input: heat_api_network}
+ heat::api_cloudwatch::workers: {get_input: heat_workers}
heat::api_cfn::bind_host: {get_input: heat_api_network}
+ heat::api_cfn::workers: {get_input: heat_workers}
heat::database_connection: {get_input: heat_dsn}
heat::debug: {get_input: debug}
heat::db::mysql::password: {get_input: heat_password}
@@ -1219,6 +1350,10 @@ resources:
keystone::endpoint::internal_url: {get_input: keystone_internal_url}
keystone::endpoint::admin_url: {get_input: keystone_identity_uri}
keystone::endpoint::region: {get_input: keystone_region}
+ keystone::admin_workers: {get_input: keystone_workers}
+ keystone::public_workers: {get_input: keystone_workers}
+ keystone_enable_db_purge: {get_input: keystone_enable_db_purge}
+
# MongoDB
mongodb::server::bind_ip: {get_input: mongo_db_network}
mongodb::server::nojournal: {get_input: mongodb_no_journal}
@@ -1244,6 +1379,7 @@ resources:
neutron::server::auth_uri: {get_input: keystone_auth_uri}
neutron::server::identity_uri: {get_input: keystone_identity_uri}
neutron::server::database_connection: {get_input: neutron_dsn}
+ neutron::server::api_workers: {get_input: neutron_workers}
neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge}
neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling}
neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
@@ -1252,6 +1388,7 @@ resources:
neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks}
neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network}
+ neutron::agents::metadata::metadata_workers: {get_input: neutron_workers}
neutron_agent_mode: {get_input: neutron_agent_mode}
neutron_router_distributed: {get_input: neutron_router_distributed}
neutron::core_plugin: {get_input: neutron_core_plugin}
@@ -1262,6 +1399,7 @@ resources:
neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent}
neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers}
neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers}
+ neutron::plugins::ml2::extension_drivers: {get_input: neutron_plugin_extensions}
neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover}
neutron::server::l3_ha: {get_input: neutron_l3_ha}
neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network}
@@ -1275,6 +1413,7 @@ resources:
neutron_public_interface_tag: {get_input: neutron_public_interface_tag}
neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types}
neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types}
+ neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions}
neutron::server::auth_password: {get_input: neutron_password}
neutron::agents::metadata::auth_password: {get_input: neutron_password}
neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options}
@@ -1289,6 +1428,7 @@ resources:
neutron::server::notifications::nova_url: {get_input: nova_internal_url}
neutron::server::notifications::auth_url: {get_input: neutron_admin_auth_url}
neutron::server::notifications::tenant_name: 'service'
+ neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_input: nova_password}
# Ceilometer
@@ -1322,6 +1462,9 @@ resources:
nova::api::api_bind_address: {get_input: nova_api_network}
nova::api::metadata_listen: {get_input: nova_metadata_network}
nova::api::admin_password: {get_input: nova_password}
+ nova::api::osapi_compute_workers: {get_input: nova_workers}
+ nova::api::ec2_workers: {get_input: nova_workers}
+ nova::api::metadata_workers: {get_input: nova_workers}
nova::database_connection: {get_input: nova_dsn}
nova::glance_api_servers: {get_input: glance_api_servers}
nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
@@ -1331,6 +1474,7 @@ resources:
nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url}
nova::vncproxy::host: {get_input: nova_api_network}
nova::db::mysql::password: {get_input: nova_password}
+ nova_enable_db_purge: {get_input: nova_enable_db_purge}
# Horizon
apache::ip: {get_input: horizon_network}
@@ -1340,6 +1484,29 @@ resources:
horizon::bind_address: {get_input: horizon_network}
horizon::keystone_url: {get_input: keystone_auth_uri}
+ # Sahara
+ sahara::host: {get_input: sahara_api_network}
+ sahara::plugins:
+ - cdh
+ - hdp
+ - mapr
+ - vanilla
+ - spark
+ - storm
+ sahara::admin_password: {get_input: sahara_password}
+ sahara::auth_uri: {get_input: keystone_auth_uri}
+ sahara::admin_user: sahara
+ sahara::identity_uri: {get_input: keystone_identity_uri}
+ sahara::use_neutron: true
+ sahara::database_connection: {get_input: sahara_dsn}
+ sahara::debug: {get_input: debug}
+ sahara::rpc_backend: rabbit
+ sahara::rabbit_userid: {get_input: rabbit_username}
+ sahara::rabbit_password: {get_input: rabbit_password}
+ sahara::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
+ sahara::rabbit_port: {get_input: rabbit_client_port}
+ sahara::db::mysql::password: {get_input: sahara_password}
+
# Rabbit
rabbitmq::node_ip_address: {get_input: rabbitmq_network}
rabbitmq::erlang_cookie: {get_input: rabbit_cookie}
@@ -1356,17 +1523,13 @@ resources:
memcached::listen_ip: {get_input: memcached_network}
neutron_public_interface_ip: {get_input: neutron_public_interface_ip}
ntp::servers: {get_input: ntp_servers}
+ timezone::timezone: {get_input: timezone}
control_virtual_interface: {get_input: control_virtual_interface}
public_virtual_interface: {get_input: public_virtual_interface}
tripleo::loadbalancer::control_virtual_interface: {get_input: control_virtual_interface}
tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface}
tripleo::loadbalancer::haproxy_log_address: {get_input: haproxy_log_address}
- # NOTE(jaosorior): The service certificate configuration for
- # HAProxy was left commented because to properly use this, we
- # need to be able to set up the keystone endpoints. And
- # currently that is not possible, but is being addressed by
- # other commits. A subsequent commit will uncomment this.
- #tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
+ tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
tripleo::packages::enable_install: {get_input: enable_package_install}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
@@ -1391,6 +1554,7 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
properties:
+ name: UpdateDeployment
config: {get_resource: UpdateConfig}
server: {get_resource: Controller}
input_values:
@@ -1416,6 +1580,9 @@ outputs:
tenant_ip_address:
description: IP address of the server in the tenant network
value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
hostname:
description: Hostname of the server
value: {get_attr: [Controller, name]}
@@ -1431,12 +1598,11 @@ outputs:
Server's IP address and hostname in the /etc/hosts format
value:
str_replace:
- template: IP HOST.DOMAIN HOST CLOUDNAME
+ template: IP HOST.DOMAIN HOST
params:
IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
HOST: {get_attr: [Controller, name]}
- CLOUDNAME: {get_param: CloudName}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
new file mode 100644
index 00000000..26ce7138
--- /dev/null
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -0,0 +1,119 @@
+heat_template_version: 2015-10-15
+
+description: Configure hieradata for all MidoNet nodes
+
+parameters:
+ # Parameters passed from the parent template
+ controller_servers:
+ type: json
+ compute_servers:
+ type: json
+ blockstorage_servers:
+ type: json
+ objectstorage_servers:
+ type: json
+ cephstorage_servers:
+ type: json
+
+ EnableZookeeperOnController:
+ label: Enable Zookeeper On Controller
+ description: 'Whether enable Zookeeper cluster on Controller'
+ type: boolean
+ default: false
+ EnableCassandraOnController:
+ label: Enable Cassandra On Controller
+ description: 'Whether enable Cassandra cluster on Controller'
+ type: boolean
+ default: false
+ CassandraStoragePort:
+ label: Cassandra Storage Port
+ description: 'The Cassandra port for inter-node communication'
+ type: string
+ default: '7000'
+ CassandraSslStoragePort:
+ label: Cassandra SSL Storage Port
+ description: 'The SSL port for encrypted communication. Unused unless enabled in encryption_options'
+ type: string
+ default: '7001'
+ CassandraClientPort:
+ label: Cassandra Client Port
+ description: 'Native Transport Port'
+ type: string
+ default: '9042'
+ CassandraClientPortThrift:
+ label: Cassandra Client Thrift Port
+ description: 'The port for the Thrift RPC service, which is used for client connections'
+ type: string
+ default: '9160'
+ TunnelZoneName:
+ label: Name of the Tunnelzone
+ description: 'Name of the tunnel zone used to tunnel packages'
+ type: string
+ default: 'tunnelzone_tripleo'
+ TunnelZoneType:
+ label: Type of the Tunnel
+ description: 'Type of the tunnels on the overlay. Choose between `gre` and `vxlan`'
+ type: string
+ default: 'vxlan'
+
+resources:
+
+ NetworkMidoNetConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ datafiles:
+ midonet_data:
+ mapped_data:
+ enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController}
+ enable_cassandra_on_controller: {get_param: EnableCassandraOnController}
+ midonet_tunnelzone_name: {get_param: TunnelZoneName}
+ midonet_tunnelzone_type: {get_param: TunnelZoneType}
+ midonet_libvirt_qemu_data: |
+ user = "root"
+ group = "root"
+ cgroup_device_acl = [
+ "/dev/null", "/dev/full", "/dev/zero",
+ "/dev/random", "/dev/urandom",
+ "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+ "/dev/rtc","/dev/hpet", "/dev/vfio/vfio",
+ "/dev/net/tun"
+ ]
+ tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort}
+ tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort}
+ tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort}
+ tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift}
+ tripleo::loadbalancer::midonet_api: true
+ # Missed Neutron Puppet data
+ neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver'
+ neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver'
+ neutron::plugins::midonet::midonet_api_port: 8081
+ neutron::params::midonet_server_package: 'python-networking-midonet'
+
+ # Make sure the l3 agent does not run
+ l3_agent_service: false
+ neutron::agents::l3::manage_service: false
+ neutron::agents::l3::enabled: false
+
+
+ NetworkMidonetDeploymentControllers:
+ type: OS::Heat::StructuredDeploymentGroup
+ properties:
+ config: {get_resource: NetworkMidoNetConfig}
+ servers: {get_param: controller_servers}
+
+ NetworkMidonetDeploymentComputes:
+ type: OS::Heat::StructuredDeploymentGroup
+ properties:
+ config: {get_resource: NetworkMidoNetConfig}
+ servers: {get_param: compute_servers}
+
+outputs:
+ config_identifier:
+ value:
+ list_join:
+ - ' '
+ - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]}
+ - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]}
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index 2413f5a4..655fd0f2 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -131,6 +131,7 @@ resources:
NetworkCiscoDeployment:
type: OS::Heat::StructuredDeployments
properties:
+ name: NetworkCiscoDeployment
config: {get_resource: NetworkCiscoConfig}
servers: {get_param: controller_servers}
input_values:
@@ -178,6 +179,7 @@ resources:
CollectMacDeploymentsController:
type: OS::Heat::SoftwareDeployments
properties:
+ name: CollectMacDeploymentsController
servers: {get_param: controller_servers}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -185,6 +187,7 @@ resources:
CollectMacDeploymentsCompute:
type: OS::Heat::SoftwareDeployments
properties:
+ name: CollectMacDeploymentsCompute
servers: {get_param: compute_servers}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -192,6 +195,7 @@ resources:
CollectMacDeploymentsBlockStorage:
type: OS::Heat::SoftwareDeployments
properties:
+ name: CollectMacDeploymentsBlockStorage
servers: {get_param: blockstorage_servers}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -199,6 +203,7 @@ resources:
CollectMacDeploymentsObjectStorage:
type: OS::Heat::SoftwareDeployments
properties:
+ name: CollectMacDeploymentsObjectStorage
servers: {get_param: objectstorage_servers}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -206,6 +211,7 @@ resources:
CollectMacDeploymentsCephStorage:
type: OS::Heat::SoftwareDeployments
properties:
+ name: CollectMacDeploymentsCephStorage
servers: {get_param: cephstorage_servers}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -280,6 +286,7 @@ resources:
MappingToNexusDeploymentsController:
type: OS::Heat::SoftwareDeployment
properties:
+ name: MappingToNexusDeploymentsController
server: {get_param: [controller_servers, '0']}
config: {get_resource: MappingToNexusConfig}
input_values:
@@ -323,6 +330,7 @@ resources:
type: OS::Heat::SoftwareDeployment
depends_on: MappingToNexusDeploymentsController
properties:
+ name: MappingToUCSMDeploymentsController
server: {get_param: [controller_servers, '0']}
config: {get_resource: MappingToUCSMConfig}
input_values:
diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
index 96368e37..5561c74a 100644
--- a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
@@ -70,6 +70,7 @@ resources:
NovaNuageDeployment:
type: OS::Heat::StructuredDeployment
properties:
+ name: NovaNuageDeployment
config: {get_resource: NovaNuageConfig}
server: {get_param: server}
input_values:
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
new file mode 100644
index 00000000..905f196d
--- /dev/null
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
@@ -0,0 +1,87 @@
+heat_template_version: 2015-11-12
+
+description: Configure hieradata for Cinder Dell Storage Center configuration
+
+parameters:
+ server:
+ description: ID of the controller node to apply this config to
+ type: string
+
+ # Config specific parameters, to be provided via parameter_defaults
+ CinderEnableDellScBackend:
+ type: boolean
+ default: true
+ CinderDellScBackendName:
+ type: string
+ default: 'tripleo_dellsc'
+ CinderDellScSanIp:
+ type: string
+ CinderDellScSanLogin:
+ type: string
+ default: 'Admin'
+ CinderDellScSanPassword:
+ type: string
+ hidden: true
+ CinderDellScSsn:
+ type: string
+ default: '64702'
+ CinderDellScIscsiIpAddress:
+ type: string
+ default: ''
+ CinderDellScIscsiPort:
+ type: string
+ default: '3260'
+ CinderDellScApiPort:
+ type: string
+ default: '3033'
+ CinderDellScServerFolder:
+ type: string
+ default: 'dellsc_server'
+ CinderDellScVolumeFolder:
+ type: string
+ default: 'dellsc_volume'
+
+resources:
+ CinderDellScConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ datafiles:
+ cinder_dellsc_data:
+ mapped_data:
+ cinder_enable_dellsc_backend: {get_input: EnableDellScBackend}
+ cinder::backend::dellsc_iscsi::volume_backend_name: {get_input: DellScBackendName}
+ cinder::backend::dellsc_iscsi::san_ip: {get_input: DellScSanIp}
+ cinder::backend::dellsc_iscsi::san_login: {get_input: DellScSanLogin}
+ cinder::backend::dellsc_iscsi::san_password: {get_input: DellScSanPassword}
+ cinder::backend::dellsc_iscsi::dell_sc_ssn: {get_input: DellScSsn}
+ cinder::backend::dellsc_iscsi::iscsi_ip_address: {get_input: DellScIscsiIpAddress}
+ cinder::backend::dellsc_iscsi::iscsi_port: {get_input: DellScIscsiPort}
+ cinder::backend::dellsc_iscsi::dell_sc_api_port: {get_input: DellScApiPort}
+ cinder::backend::dellsc_iscsi::dell_sc_server_folder: {get_input: DellScServerFolder}
+ cinder::backend::dellsc_iscsi::dell_sc_volume_folder: {get_input: DellScVolumeFolder}
+
+ CinderDellScDeployment:
+ type: OS::Heat::StructuredDeployment
+ properties:
+ config: {get_resource: CinderDellScConfig}
+ server: {get_param: server}
+ input_values:
+ EnableDellScBackend: {get_param: CinderEnableDellScBackend}
+ DellScBackendName: {get_param: CinderDellScBackendName}
+ DellScSanIp: {get_param: CinderDellScSanIp}
+ DellScSanLogin: {get_param: CinderDellScSanLogin}
+ DellScSanPassword: {get_param: CinderDellScSanPassword}
+ DellScSsn: {get_param: CinderDellScSsn}
+ DellScIscsiIpAddress: {get_param: CinderDellScIscsiIpAddress}
+ DellScIscsiPort: {get_param: CinderDellScIscsiPort}
+ DellScApiPort: {get_param: CinderDellScApiPort}
+ DellScServerFolder: {get_param: CinderDellScServerFolder}
+ DellScVolumeFolder: {get_param: CinderDellScVolumeFolder}
+
+outputs:
+ deploy_stdout:
+ description: Deployment reference, used to trigger puppet apply on changes
+ value: {get_attr: [CinderDellScDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
new file mode 100644
index 00000000..c73608f1
--- /dev/null
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
@@ -0,0 +1,86 @@
+heat_template_version: 2015-11-06
+
+description: Configure hieradata for Cinder Eqlx configuration
+
+parameters:
+ server:
+ description: ID of the controller node to apply this config to
+ type: string
+
+ # Config specific parameters, to be provided via parameter_defaults
+ CinderEnableEqlxBackend:
+ type: boolean
+ default: true
+ CinderEqlxBackendName:
+ type: string
+ default: 'tripleo_eqlx'
+ CinderEqlxSanIp:
+ type: string
+ CinderEqlxSanLogin:
+ type: string
+ CinderEqlxSanPassword:
+ type: string
+ hidden: true
+ CinderEqlxSanThinProvision:
+ type: boolean
+ default: true
+ CinderEqlxGroupname:
+ type: string
+ default: 'group-0'
+ CinderEqlxPool:
+ type: string
+ default: 'default'
+ CinderEqlxChapLogin:
+ type: string
+ default: ''
+ CinderEqlxChapPassword:
+ type: string
+ default: ''
+ CinderEqlxUseChap:
+ type: boolean
+ default: false
+
+resources:
+ CinderEqlxConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ datafiles:
+ cinder_eqlx_data:
+ mapped_data:
+ cinder_enable_eqlx_backend: {get_input: EnableEqlxBackend}
+ cinder::backend::eqlx::volume_backend_name: {get_input: EqlxBackendName}
+ cinder::backend::eqlx::san_ip: {get_input: EqlxSanIp}
+ cinder::backend::eqlx::san_login: {get_input: EqlxSanLogin}
+ cinder::backend::eqlx::san_password: {get_input: EqlxSanPassword}
+ cinder::backend::eqlx::san_thin_provision: {get_input: EqlxSanThinProvision}
+ cinder::backend::eqlx::eqlx_group_name: {get_input: EqlxGroupname}
+ cinder::backend::eqlx::eqlx_pool: {get_input: EqlxPool}
+ cinder::backend::eqlx::eqlx_use_chap: {get_input: EqlxUseChap}
+ cinder::backend::eqlx::eqlx_chap_login: {get_input: EqlxChapLogin}
+ cinder::backend::eqlx::eqlx_chap_password: {get_input: EqlxChapPassword}
+
+ CinderEqlxDeployment:
+ type: OS::Heat::StructuredDeployment
+ properties:
+ config: {get_resource: CinderEqlxConfig}
+ server: {get_param: server}
+ input_values:
+ EnableEqlxBackend: {get_param: CinderEnableEqlxBackend}
+ EqlxBackendName: {get_param: CinderEqlxBackendName}
+ EqlxSanIp: {get_param: CinderEqlxSanIp}
+ EqlxSanLogin: {get_param: CinderEqlxSanLogin}
+ EqlxSanPassword: {get_param: CinderEqlxSanPassword}
+ EqlxSanThinProvision: {get_param: CinderEqlxSanThinProvision}
+ EqlxGroupname: {get_param: CinderEqlxGroupname}
+ EqlxPool: {get_param: CinderEqlxPool}
+ EqlxUseChap: {get_param: CinderEqlxUseChap}
+ EqlxChapLogin: {get_param: CinderEqlxChapLogin}
+ EqlxChapPassword: {get_param: CinderEqlxChapPassword}
+
+outputs:
+ deploy_stdout:
+ description: Deployment reference, used to trigger puppet apply on changes
+ value: {get_attr: [CinderEqlxDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
index 7ec2190f..ab442f2b 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
@@ -114,6 +114,7 @@ resources:
CinderNetappDeployment:
type: OS::Heat::StructuredDeployment
properties:
+ name: CinderNetappDeployment
config: {get_resource: CinderNetappConfig}
server: {get_param: server}
input_values:
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
index bf06d25d..1e652960 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
@@ -56,6 +56,7 @@ resources:
NeutronBigswitchDeployment:
type: OS::Heat::StructuredDeployment
properties:
+ name: NeutronBigswitchDeployment
config: {get_resource: NeutronBigswitchConfig}
server: {get_param: server}
input_values:
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
index 6730ddf1..cec885cd 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
@@ -142,6 +142,7 @@ resources:
CiscoN1kvDeployment:
type: OS::Heat::StructuredDeployment
properties:
+ name: CiscoN1kvDeployment
config: {get_resource: CiscoN1kvConfig}
server: {get_param: server}
input_values:
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml
index 8378d2fc..a4cfea07 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml
@@ -71,6 +71,7 @@ resources:
NeutronNuageDeployment:
type: OS::Heat::StructuredDeployment
properties:
+ name: NeutronNuageDeployment
config: {get_resource: NeutronNuageConfig}
server: {get_param: server}
input_values:
diff --git a/puppet/extraconfig/pre_deploy/per_node.yaml b/puppet/extraconfig/pre_deploy/per_node.yaml
index 80c8ad6e..e236e336 100644
--- a/puppet/extraconfig/pre_deploy/per_node.yaml
+++ b/puppet/extraconfig/pre_deploy/per_node.yaml
@@ -45,6 +45,7 @@ resources:
NodeSpecificDeployment:
type: OS::Heat::SoftwareDeployment
properties:
+ name: NodeSpecificDeployment
config: {get_resource: NodeSpecificConfig}
server: {get_param: server}
input_values:
diff --git a/puppet/extraconfig/tls/ca-inject.yaml b/puppet/extraconfig/tls/ca-inject.yaml
index 7e34f071..5a36e951 100644
--- a/puppet/extraconfig/tls/ca-inject.yaml
+++ b/puppet/extraconfig/tls/ca-inject.yaml
@@ -53,6 +53,7 @@ resources:
CADeployment:
type: OS::Heat::SoftwareDeployment
properties:
+ name: CADeployment
config: {get_resource: CAConfig}
server: {get_param: server}
input_values:
diff --git a/puppet/extraconfig/tls/tls-cert-inject.yaml b/puppet/extraconfig/tls/tls-cert-inject.yaml
index ce524ba9..20bb3737 100644
--- a/puppet/extraconfig/tls/tls-cert-inject.yaml
+++ b/puppet/extraconfig/tls/tls-cert-inject.yaml
@@ -67,6 +67,7 @@ resources:
ControllerTLSDeployment:
type: OS::Heat::SoftwareDeployment
properties:
+ name: ControllerTLSDeployment
config: {get_resource: ControllerTLSConfig}
server: {get_param: server}
input_values:
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index b0e6ae96..f8ef6408 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -38,11 +38,18 @@ cinder::api::keystone_tenant: 'service'
swift::proxy::authtoken::admin_tenant_name: 'service'
ceilometer::api::keystone_tenant: 'service'
heat::keystone_tenant: 'service'
+sahara::admin_tenant_name: 'service'
# keystone
keystone::cron::token_flush::maxdelay: 3600
keystone::roles::admin::service_tenant: 'service'
keystone::roles::admin::admin_tenant: 'admin'
+keystone::cron::token_flush::destination: '/dev/null'
+keystone::config::keystone_config:
+ DEFAULT/secure_proxy_ssl_header:
+ value: 'HTTP_X_FORWARDED_PROTO'
+ ec2/driver:
+ value: 'keystone.contrib.ec2.backends.sql.Ec2'
#swift
swift::proxy::pipeline:
@@ -77,12 +84,15 @@ nova::notify_on_state_change: 'vm_and_task_state'
nova::api::default_floating_pool: 'public'
nova::api::osapi_v3: true
nova::scheduler::filter::ram_allocation_ratio: '1.0'
+nova::cron::archive_deleted_rows::hour: '*/12'
+nova::cron::archive_deleted_rows::destination: '/dev/null'
# ceilometer
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
# cinder
cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
+cinder::cron::db_purge::destination: '/dev/null'
# heat
heat::engine::configure_delegated_roles: false
@@ -118,6 +128,7 @@ tripleo::loadbalancer::nova_metadata: true
tripleo::loadbalancer::nova_novncproxy: true
tripleo::loadbalancer::mysql: true
tripleo::loadbalancer::redis: true
+tripleo::loadbalancer::sahara: true
tripleo::loadbalancer::swift_proxy_server: true
tripleo::loadbalancer::ceilometer: true
tripleo::loadbalancer::heat_api: true
diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml
index 7e925d90..89577505 100644
--- a/puppet/hieradata/database.yaml
+++ b/puppet/hieradata/database.yaml
@@ -53,3 +53,10 @@ ceilometer::db::mysql::dbname: ceilometer
ceilometer::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+
+sahara::db::mysql::user: sahara
+sahara::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
+sahara::db::mysql::dbname: sahara
+sahara::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index 7f8970cc..7444155c 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 {
include ::ntp
}
+include ::timezone
+
if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
exec { 'set selinux to permissive on boot':
command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index e0566ac1..bb3575cf 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 {
include ::ntp
}
+include ::timezone
+
file { ['/etc/libvirt/qemu/networks/autostart/default.xml',
'/etc/libvirt/qemu/networks/default.xml']:
ensure => absent,
@@ -68,11 +70,19 @@ if hiera('cinder_enable_nfs_backend', false) {
}
include ::nova::compute::libvirt
+if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ file {'/etc/libvirt/qemu.conf':
+ ensure => present,
+ content => hiera('midonet_libvirt_qemu_data')
+ }
+}
include ::nova::network::neutron
include ::neutron
# If the value of core plugin is set to 'nuage',
# include nuage agent,
+# If the value of core plugin is set to 'midonet',
+# include midonet agent,
# else use the default value of 'ml2'
if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
include ::nuage::vrs
@@ -84,7 +94,20 @@ if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
nova_metadata_ip => hiera('nova_metadata_node_ips'),
nova_auth_ip => hiera('keystone_public_api_virtual_ip'),
}
-} else {
+}
+elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+
+ # TODO(devvesa) provide non-controller ips for these services
+ $zookeeper_node_ips = hiera('neutron_api_node_ips')
+ $cassandra_node_ips = hiera('neutron_api_node_ips')
+
+ class {'::tripleo::network::midonet::agent':
+ zookeeper_servers => $zookeeper_node_ips,
+ cassandra_seeds => $cassandra_node_ips
+ }
+}
+else {
+
include ::neutron::plugins::ml2
include ::neutron::agents::ml2::ovs
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 7d3012e5..ea63b1a8 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -39,6 +39,8 @@ if hiera('step') >= 2 {
include ::ntp
}
+ include ::timezone
+
# MongoDB
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
include ::mongodb::globals
@@ -101,6 +103,7 @@ if hiera('step') >= 2 {
include ::neutron::db::mysql
include ::cinder::db::mysql
include ::heat::db::mysql
+ include ::sahara::db::mysql
if downcase(hiera('ceilometer_backend')) == 'mysql' {
include ::ceilometer::db::mysql
}
@@ -128,7 +131,7 @@ if hiera('step') >= 2 {
# pre-install swift here so we can build rings
include ::swift
- $enable_ceph = hiera('ceph_storage_count', 0) > 0
+ $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
class { '::ceph::profile::params':
@@ -164,13 +167,12 @@ if hiera('step') >= 2 {
if hiera('step') >= 3 {
include ::keystone
+ include ::keystone::config
include ::keystone::roles::admin
include ::keystone::endpoint
#TODO: need a cleanup-keystone-tokens.sh solution here
- keystone_config {
- 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
- }
+
file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
ensure => 'directory',
owner => 'keystone',
@@ -230,13 +232,61 @@ if hiera('step') >= 3 {
include ::nova::scheduler
include ::nova::scheduler::filter
- include ::neutron
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+
+ # TODO(devvesa) provide non-controller ips for these services
+ $zookeeper_node_ips = hiera('neutron_api_node_ips')
+ $cassandra_node_ips = hiera('neutron_api_node_ips')
+
+ # Run zookeeper in the controller if configured
+ if hiera('enable_zookeeper_on_controller') {
+ class {'::tripleo::cluster::zookeeper':
+ zookeeper_server_ips => $zookeeper_node_ips,
+ zookeeper_client_ip => $ipaddress,
+ zookeeper_hostnames => hiera('controller_node_names')
+ }
+ }
+
+ # Run cassandra in the controller if configured
+ if hiera('enable_cassandra_on_controller') {
+ class {'::tripleo::cluster::cassandra':
+ cassandra_servers => $cassandra_node_ips,
+ cassandra_ip => $ipaddress
+ }
+ }
+
+ class {'::tripleo::network::midonet::agent':
+ zookeeper_servers => $zookeeper_node_ips,
+ cassandra_seeds => $cassandra_node_ips
+ }
+
+ class {'::tripleo::network::midonet::api':
+ zookeeper_servers => $zookeeper_node_ips,
+ vip => $ipaddress,
+ keystone_ip => $ipaddress,
+ keystone_admin_token => hiera('keystone::admin_token'),
+ bind_address => $ipaddress,
+ admin_password => hiera('admin_password')
+ }
+
+ # TODO: find a way to get an empty list from hiera
+ class {'::neutron':
+ service_plugins => []
+ }
+
+ }
+ else {
+
+ # ML2 plugin
+ include ::neutron
+ }
+
include ::neutron::server
include ::neutron::server::notifications
# If the value of core plugin is set to 'nuage',
- # include nuage core plugin,
- # else use the default value of 'ml2'
+ # include nuage core plugin, and it does not
+ # need the l3, dhcp and metadata agents
if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
include ::neutron::plugins::nuage
} else {
@@ -252,45 +302,57 @@ if hiera('step') >= 3 {
require => Package['neutron'],
}
- include ::neutron::plugins::ml2
- include ::neutron::agents::ml2::ovs
+ # If the value of core plugin is set to 'midonet',
+ # skip all the ML2 configuration
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
- include ::neutron::plugins::ml2::cisco::nexus1000v
-
- class { '::neutron::agents::n1kv_vem':
- n1kv_source => hiera('n1kv_vem_source', undef),
- n1kv_version => hiera('n1kv_vem_version', undef),
+ class {'::neutron::plugins::midonet':
+ midonet_api_ip => $ipaddress,
+ keystone_tenant => hiera('neutron::server::auth_tenant'),
+ keystone_password => hiera('neutron::server::auth_password')
}
+ } else {
+
+ include ::neutron::plugins::ml2
+ include ::neutron::agents::ml2::ovs
+
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+ include ::neutron::plugins::ml2::cisco::nexus1000v
- class { '::n1k_vsm':
- n1kv_source => hiera('n1kv_vsm_source', undef),
- n1kv_version => hiera('n1kv_vsm_version', undef),
- pacemaker_control => false,
+ class { '::neutron::agents::n1kv_vem':
+ n1kv_source => hiera('n1kv_vem_source', undef),
+ n1kv_version => hiera('n1kv_vem_version', undef),
+ }
+
+ class { '::n1k_vsm':
+ n1kv_source => hiera('n1kv_vsm_source', undef),
+ n1kv_version => hiera('n1kv_vsm_version', undef),
+ pacemaker_control => false,
+ }
}
- }
- if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
- include ::neutron::plugins::ml2::cisco::ucsm
- }
- if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
- include ::neutron::plugins::ml2::cisco::nexus
- include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
- }
+ if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+ include ::neutron::plugins::ml2::cisco::ucsm
+ }
+ if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+ include ::neutron::plugins::ml2::cisco::nexus
+ include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
+ }
- if hiera('neutron_enable_bigswitch_ml2', false) {
- include ::neutron::plugins::ml2::bigswitch::restproxy
- }
- neutron_l3_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
- }
- neutron_dhcp_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
+ if hiera('neutron_enable_bigswitch_ml2', false) {
+ include ::neutron::plugins::ml2::bigswitch::restproxy
+ }
+ neutron_l3_agent_config {
+ 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
+ }
+ neutron_dhcp_agent_config {
+ 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
+ }
+ Service['neutron-server'] -> Service['neutron-ovs-agent-service']
}
Service['neutron-server'] -> Service['neutron-dhcp-service']
Service['neutron-server'] -> Service['neutron-l3']
- Service['neutron-server'] -> Service['neutron-ovs-agent-service']
Service['neutron-server'] -> Service['neutron-metadata']
}
@@ -339,6 +401,48 @@ if hiera('step') >= 3 {
}
}
+ if hiera('cinder_enable_eqlx_backend', false) {
+ $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
+
+ cinder_config {
+ "${cinder_eqlx_backend}/host": value => 'hostgroup';
+ }
+
+ cinder::backend::eqlx { $cinder_eqlx_backend :
+ volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
+ san_ip => hiera('cinder::backend::eqlx::san_ip', undef),
+ san_login => hiera('cinder::backend::eqlx::san_login', undef),
+ san_password => hiera('cinder::backend::eqlx::san_password', undef),
+ san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef),
+ eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
+ eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef),
+ eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
+ eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
+ eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
+ }
+ }
+
+ if hiera('cinder_enable_dellsc_backend', false) {
+ $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
+
+ cinder_config {
+ "${cinder_dellsc_backend}/host": value => 'hostgroup';
+ }
+
+ cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
+ volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
+ san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
+ san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
+ san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
+ dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
+ iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
+ iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
+ dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef),
+ dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
+ dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
+ }
+ }
+
if hiera('cinder_enable_netapp_backend', false) {
$cinder_netapp_backend = hiera('cinder::backend::netapp::title')
@@ -392,7 +496,7 @@ if hiera('step') >= 3 {
}
}
- $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend])
+ $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
class { '::cinder::backends' :
enabled_backends => $cinder_enabled_backends,
}
@@ -460,6 +564,11 @@ if hiera('step') >= 3 {
include ::heat::api_cloudwatch
include ::heat::engine
+ # Sahara
+ include ::sahara
+ include ::sahara::service::api
+ include ::sahara::service::engine
+
# Horizon
if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
$_profile_support = 'cisco'
@@ -488,7 +597,19 @@ if hiera('step') >= 3 {
} #END STEP 3
if hiera('step') >= 4 {
- include ::keystone::cron::token_flush
+ $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
+ $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
+ $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
+
+ if $keystone_enable_db_purge {
+ include ::keystone::cron::token_flush
+ }
+ if $nova_enable_db_purge {
+ include ::nova::cron::archive_deleted_rows
+ }
+ if $cinder_enable_db_purge {
+ include ::cinder::cron::db_purge
+ }
} #END STEP 4
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')])
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index cf607e53..f8d3fd76 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -41,6 +41,8 @@ if hiera('step') >= 1 {
create_resources(sysctl::value, hiera('sysctl_settings'), {})
+ include ::timezone
+
if count(hiera('ntp::servers')) > 0 {
include ::ntp
}
@@ -78,11 +80,11 @@ if hiera('step') >= 1 {
Class['tripleo::fencing'] -> Class['pacemaker::stonith']
}
- # FIXME(gfidente): sets 100secs as default start timeout op
+ # FIXME(gfidente): sets 200secs as default start timeout op
# param; until we can use pcmk global defaults we'll still
# need to add it to every resource which redefines op params
Pacemaker::Resource::Service {
- op_params => 'start timeout=100s stop timeout=100s',
+ op_params => 'start timeout=200s stop timeout=200s',
}
# Only configure RabbitMQ in this step, don't start it yet to
@@ -352,7 +354,7 @@ if hiera('step') >= 2 {
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
pacemaker::resource::service { $::mongodb::params::service_name :
- op_params => 'start timeout=120s stop timeout=100s',
+ op_params => 'start timeout=370s stop timeout=200s',
clone_params => true,
require => Class['::mongodb::server'],
}
@@ -443,13 +445,17 @@ MYSQL_HOST=localhost\n",
require => Exec['galera-ready'],
}
}
+
+ class { '::sahara::db::mysql':
+ require => Exec['galera-ready'],
+ }
}
# pre-install swift here so we can build rings
include ::swift
# Ceph
- $enable_ceph = hiera('ceph_storage_count', 0) > 0
+ $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
class { '::ceph::profile::params':
@@ -490,11 +496,10 @@ if hiera('step') >= 3 {
manage_service => false,
enabled => false,
}
+ include ::keystone::config
#TODO: need a cleanup-keystone-tokens.sh solution here
- keystone_config {
- 'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
- }
+
file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
ensure => 'directory',
owner => 'keystone',
@@ -592,8 +597,54 @@ if hiera('step') >= 3 {
}
include ::nova::network::neutron
- # Neutron class definitions
- include ::neutron
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+
+ # TODO(devvesa) provide non-controller ips for these services
+ $zookeeper_node_ips = hiera('neutron_api_node_ips')
+ $cassandra_node_ips = hiera('neutron_api_node_ips')
+
+ # Run zookeeper in the controller if configured
+ if hiera('enable_zookeeper_on_controller') {
+ class {'::tripleo::cluster::zookeeper':
+ zookeeper_server_ips => $zookeeper_node_ips,
+ zookeeper_client_ip => $ipaddress,
+ zookeeper_hostnames => hiera('controller_node_names')
+ }
+ }
+
+ # Run cassandra in the controller if configured
+ if hiera('enable_cassandra_on_controller') {
+ class {'::tripleo::cluster::cassandra':
+ cassandra_servers => $cassandra_node_ips,
+ cassandra_ip => $ipaddress
+ }
+ }
+
+ class {'::tripleo::network::midonet::agent':
+ zookeeper_servers => $zookeeper_node_ips,
+ cassandra_seeds => $cassandra_node_ips
+ }
+
+ class {'::tripleo::network::midonet::api':
+ zookeeper_servers => hiera('neutron_api_node_ips'),
+ vip => $public_vip,
+ keystone_ip => $public_vip,
+ keystone_admin_token => hiera('keystone::admin_token'),
+ bind_address => $ipaddress,
+ admin_password => hiera('admin_password')
+ }
+
+ # Configure Neutron
+ class {'::neutron':
+ service_plugins => []
+ }
+
+ }
+ else {
+ # Neutron class definitions
+ include ::neutron
+ }
+
class { '::neutron::server' :
sync_db => $sync_db,
manage_service => false,
@@ -603,6 +654,13 @@ if hiera('step') >= 3 {
if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
include ::neutron::plugins::nuage
}
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ class {'::neutron::plugins::midonet':
+ midonet_api_ip => $public_vip,
+ keystone_tenant => hiera('neutron::server::auth_tenant'),
+ keystone_password => hiera('neutron::server::auth_password')
+ }
+ }
if hiera('neutron::enable_dhcp_agent',true) {
class { '::neutron::agents::dhcp' :
manage_service => false,
@@ -720,6 +778,48 @@ if hiera('step') >= 3 {
}
}
+ if hiera('cinder_enable_eqlx_backend', false) {
+ $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
+
+ cinder_config {
+ "${cinder_eqlx_backend}/host": value => 'hostgroup';
+ }
+
+ cinder::backend::eqlx { $cinder_eqlx_backend :
+ volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
+ san_ip => hiera('cinder::backend::eqlx::san_ip', undef),
+ san_login => hiera('cinder::backend::eqlx::san_login', undef),
+ san_password => hiera('cinder::backend::eqlx::san_password', undef),
+ san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef),
+ eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
+ eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef),
+ eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
+ eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
+ eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
+ }
+ }
+
+ if hiera('cinder_enable_dellsc_backend', false) {
+ $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
+
+ cinder_config {
+ "${cinder_dellsc_backend}/host": value => 'hostgroup';
+ }
+
+ cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
+ volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
+ san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
+ san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
+ san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
+ dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
+ iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
+ iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
+ dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef),
+ dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
+ dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
+ }
+ }
+
if hiera('cinder_enable_netapp_backend', false) {
$cinder_netapp_backend = hiera('cinder::backend::netapp::title')
@@ -773,11 +873,23 @@ if hiera('step') >= 3 {
}
}
- $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend])
+ $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
class { '::cinder::backends' :
enabled_backends => $cinder_enabled_backends,
}
+ class { '::sahara':
+ sync_db => $sync_db,
+ }
+ class { '::sahara::service::api':
+ manage_service => false,
+ enabled => false,
+ }
+ class { '::sahara::service::engine':
+ manage_service => false,
+ enabled => false,
+ }
+
# swift proxy
class { '::swift::proxy' :
manage_service => $non_pcmk_start,
@@ -915,7 +1027,19 @@ if hiera('step') >= 3 {
} #END STEP 3
if hiera('step') >= 4 {
- include ::keystone::cron::token_flush
+ $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
+ $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
+ $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
+
+ if $keystone_enable_db_purge {
+ include ::keystone::cron::token_flush
+ }
+ if $nova_enable_db_purge {
+ include ::nova::cron::archive_deleted_rows
+ }
+ if $cinder_enable_db_purge {
+ include ::cinder::cron::db_purge
+ }
if $pacemaker_master {
@@ -1018,6 +1142,24 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::cinder::params::volume_service]],
}
+ # Sahara
+ pacemaker::resource::service { $::sahara::params::api_service_name :
+ clone_params => 'interleave=true',
+ require => Pacemaker::Resource::Service[$::keystone::params::service_name],
+ }
+ pacemaker::resource::service { $::sahara::params::engine_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
+ constraint_type => 'order',
+ first_resource => "${::keystone::params::service_name}-clone",
+ second_resource => "${::sahara::params::api_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
+ Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ }
+
# Glance
pacemaker::resource::service { $::glance::params::registry_service_name :
clone_params => 'interleave=true',
@@ -1053,15 +1195,32 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::glance::params::api_service_name]],
}
- # Neutron
- # NOTE(gfidente): Neutron will try to populate the database with some data
- # as soon as neutron-server is started; to avoid races we want to make this
- # happen only on one node, before normal Pacemaker initialization
- # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
- exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } ->
- pacemaker::resource::service { $::neutron::params::server_service:
- clone_params => 'interleave=true',
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
+ if hiera('step') == 4 {
+ # Neutron
+ # NOTE(gfidente): Neutron will try to populate the database with some data
+ # as soon as neutron-server is started; to avoid races we want to make this
+ # happen only on one node, before normal Pacemaker initialization
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
+ # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
+ # will try to start the service while it's already started by Pacemaker
+ # It would result to a deployment failure since systemd would return 1 to Puppet
+ # and the overcloud would fail to deploy (6 would be returned).
+ # This conditional prevents from a race condition during the deployment.
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
+ exec { 'neutron-server-systemd-start-sleep' :
+ command => 'systemctl start neutron-server && /usr/bin/sleep 5',
+ path => '/usr/bin',
+ unless => '/sbin/pcs resource show neutron-server',
+ } ->
+ pacemaker::resource::service { $::neutron::params::server_service:
+ clone_params => 'interleave=true',
+ require => Pacemaker::Resource::Service[$::keystone::params::service_name]
+ }
+ } else {
+ pacemaker::resource::service { $::neutron::params::server_service:
+ clone_params => 'interleave=true',
+ require => Pacemaker::Resource::Service[$::keystone::params::service_name]
+ }
}
if hiera('neutron::enable_l3_agent', true) {
pacemaker::resource::service { $::neutron::params::l3_agent_service:
@@ -1078,6 +1237,11 @@ if hiera('step') >= 4 {
clone_params => 'interleave=true',
}
}
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ pacemaker::resource::service {'tomcat':
+ clone_params => 'interleave=true',
+ }
+ }
if hiera('neutron::enable_metadata_agent', true) {
pacemaker::resource::service { $::neutron::params::metadata_agent_service:
clone_params => 'interleave=true',
@@ -1128,7 +1292,6 @@ if hiera('step') >= 4 {
}
}
- #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3
pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
constraint_type => 'order',
first_resource => "${::keystone::params::service_name}-clone",
@@ -1204,28 +1367,65 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
}
}
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
+ pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::server_service}-clone",
+ second_resource => "${::neutron::params::dhcp_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
+ Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
+ }
+ pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::dhcp_agent_service}-clone",
+ second_resource => "${::neutron::params::metadata_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
+ }
+ pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::metadata_agent_service}-clone",
+ second_resource => 'tomcat-clone',
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
+ Pacemaker::Resource::Service['tomcat']],
+ }
+ pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
+ source => "${::neutron::params::metadata_agent_service}-clone",
+ target => "${::neutron::params::dhcp_agent_service}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
+ }
+ }
# Nova
pacemaker::resource::service { $::nova::params::api_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::conductor_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::consoleauth_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
require => Pacemaker::Resource::Service[$::keystone::params::service_name],
}
pacemaker::resource::service { $::nova::params::vncproxy_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::scheduler_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp
index 1eabddf1..63ac396e 100644
--- a/puppet/manifests/overcloud_object.pp
+++ b/puppet/manifests/overcloud_object.pp
@@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 {
include ::ntp
}
+include ::timezone
+
include ::swift
class { '::swift::storage::all':
mount_check => str2bool(hiera('swift_mount_check')),
diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp
index 2bdd8a9c..5a69725a 100644
--- a/puppet/manifests/overcloud_volume.pp
+++ b/puppet/manifests/overcloud_volume.pp
@@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 {
include ::ntp
}
+include ::timezone
+
include ::cinder
include ::cinder::config
include ::cinder::glance
diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml
index d22f5386..a55b3959 100644
--- a/puppet/swift-storage-post.yaml
+++ b/puppet/swift-storage-post.yaml
@@ -29,6 +29,7 @@ resources:
StorageDeployment_Step1:
type: OS::Heat::StructuredDeployments
properties:
+ name: StorageDeployment_Step1
servers: {get_param: servers}
config: {get_resource: StoragePuppetConfig}
input_values:
@@ -49,6 +50,7 @@ resources:
type: OS::Heat::StructuredDeployments
depends_on: StorageDeployment_Step1
properties:
+ name: StorageRingbuilderDeployment_Step2
servers: {get_param: servers}
config: {get_resource: StorageRingbuilderPuppetConfig}
input_values:
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index 721dcba4..142e47cc 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -7,7 +7,6 @@ parameters:
constraints:
- custom_constraint: nova.flavor
HashSuffix:
- default: unset
description: A random string to be used as a salt when hashing to determine mappings
in the ring.
hidden: true
@@ -40,7 +39,6 @@ parameters:
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
type: string
SnmpdReadonlyUserPassword:
- default: unset
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
@@ -63,6 +61,10 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ TimeZone:
+ default: 'UTC'
+ description: The timezone to be set on Ceph nodes.
+ type: string
Hostname:
type: string
default: '' # Defaults to Heat created hostname
@@ -82,6 +84,13 @@ parameters:
description: >
Heat action when to apply network configuration changes
default: ['CREATE']
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
default: ''
type: string
@@ -112,6 +121,7 @@ resources:
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
name: {get_param: Hostname}
+ software_config_transport: {get_param: SoftwareConfigTransport}
metadata: {get_param: ServerMetadata}
scheduler_hints: {get_param: SchedulerHints}
@@ -135,6 +145,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ ExternalPort:
+ type: OS::TripleO::SwiftStorage::Ports::ExternalPort
+ properties:
+ ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+
InternalApiPort:
type: OS::TripleO::SwiftStorage::Ports::InternalApiPort
properties:
@@ -150,25 +165,42 @@ resources:
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ TenantPort:
+ type: OS::TripleO::SwiftStorage::Ports::TenantPort
+ properties:
+ ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+
+ ManagementPort:
+ type: OS::TripleO::SwiftStorage::Ports::ManagementPort
+ properties:
+ ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+
NetworkConfig:
type: OS::TripleO::ObjectStorage::Net::SoftwareConfig
properties:
ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
properties:
ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
StorageIp: {get_attr: [StoragePort, ip_address]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ TenantIp: {get_attr: [TenantPort, ip_address]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
properties:
+ name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: SwiftStorage}
actions: {get_param: NetworkDeploymentActions}
@@ -207,6 +239,7 @@ resources:
swift_mount_check: {get_input: swift_mount_check }
tripleo::ringbuilder::min_part_hours: { get_input: swift_min_part_hours }
ntp::servers: {get_input: ntp_servers}
+ timezone::timezone: {get_input: timezone}
# NOTE(dprince): build_ring support is currently not wired in.
# See: https://review.openstack.org/#/c/109225/
tripleo::ringbuilder::build_ring: True
@@ -220,6 +253,7 @@ resources:
type: OS::Heat::StructuredDeployment
depends_on: NetworkDeployment
properties:
+ name: SwiftStorageHieraDeploy
server: {get_resource: SwiftStorage}
config: {get_resource: SwiftStorageHieraConfig}
input_values:
@@ -232,6 +266,7 @@ resources:
swift_part_power: {get_param: PartPower}
swift_replicas: { get_param: Replicas}
ntp_servers: {get_param: NtpServer}
+ timezone: {get_param: TimeZone}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
@@ -283,6 +318,9 @@ outputs:
template: 'r1z1-IP:%PORT%/d1'
params:
IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
+ external_ip_address:
+ description: IP address of the server in the external network
+ value: {get_attr: [ExternalPort, ip_address]}
internal_api_ip_address:
description: IP address of the server in the internal_api network
value: {get_attr: [InternalApiPort, ip_address]}
@@ -292,6 +330,12 @@ outputs:
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ tenant_ip_address:
+ description: IP address of the server in the tenant network
+ value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
config_identifier:
description: identifier which changes if the node configuration may need re-applying
value:
diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml
index 1dec489c..c49a1047 100644
--- a/puppet/vip-config.yaml
+++ b/puppet/vip-config.yaml
@@ -19,6 +19,7 @@ resources:
cinder_api_vip: {get_input: cinder_api_vip}
glance_api_vip: {get_input: glance_api_vip}
glance_registry_vip: {get_input: glance_registry_vip}
+ sahara_api_vip: {get_input: sahara_api_vip}
swift_proxy_vip: {get_input: swift_proxy_vip}
nova_api_vip: {get_input: nova_api_vip}
nova_metadata_vip: {get_input: nova_metadata_vip}