aboutsummaryrefslogtreecommitdiffstats
path: root/puppet
diff options
context:
space:
mode:
Diffstat (limited to 'puppet')
-rw-r--r--puppet/ceph-cluster-config.yaml15
-rw-r--r--puppet/ceph-storage.yaml18
-rw-r--r--puppet/cinder-storage.yaml18
-rw-r--r--puppet/compute.yaml33
-rw-r--r--puppet/controller.yaml6
-rw-r--r--puppet/extraconfig/ceph/ceph-external-config.yaml8
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml2
-rw-r--r--puppet/hieradata/compute.yaml1
-rw-r--r--puppet/hieradata/controller.yaml7
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp8
-rw-r--r--puppet/manifests/overcloud_compute.pp51
-rw-r--r--puppet/manifests/overcloud_controller.pp48
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp44
-rw-r--r--puppet/swift-storage.yaml18
15 files changed, 257 insertions, 22 deletions
diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml
index 96198c3f..dc2f98ed 100644
--- a/puppet/ceph-cluster-config.yaml
+++ b/puppet/ceph-cluster-config.yaml
@@ -39,6 +39,9 @@ parameters:
CephClientUserName:
default: openstack
type: string
+ CephIPv6:
+ default: False
+ type: boolean
resources:
CephClusterConfigImpl:
@@ -50,15 +53,25 @@ resources:
datafiles:
ceph_cluster:
mapped_data:
+ ceph_ipv6: {get_param: CephIPv6}
ceph_storage_count: {get_param: ceph_storage_count}
ceph_mon_initial_members:
list_join:
- ','
- {get_param: ceph_mon_names}
- ceph::profile::params::mon_host:
+ ceph_mon_host:
list_join:
- ','
- {get_param: ceph_mon_ips}
+ ceph_mon_host_v6:
+ str_replace:
+ template: "'[IPS_LIST]'"
+ params:
+ IPS_LIST:
+ list_join:
+ - '],['
+ - {get_param: ceph_mon_ips}
+ ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
ceph::profile::params::fsid: {get_param: ceph_fsid}
ceph::profile::params::mon_key: {get_param: ceph_mon_key}
# We should use a separated key for the non-admin clients
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index 88120b9c..d2988926 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -62,6 +62,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ CephStorageIPs:
+ default: {}
+ type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -90,6 +93,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
CephStorage:
@@ -135,31 +141,43 @@ resources:
type: OS::TripleO::CephStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::CephStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::CephStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::CephStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::CephStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::CephStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::CephStorage::Net::SoftwareConfig
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index 0c22b575..888f3cf8 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -38,6 +38,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ BlockStorageIPs:
+ default: {}
+ type: json
Flavor:
description: Flavor for block storage nodes to request when deploying.
type: string
@@ -141,6 +144,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
@@ -187,31 +193,43 @@ resources:
type: OS::TripleO::BlockStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::BlockStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::BlockStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::BlockStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::BlockStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::BlockStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::BlockStorage::Net::SoftwareConfig
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index 58ca71e7..ee5bced6 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -195,6 +195,9 @@ parameters:
default: 'dvr_snat'
description: Agent mode for the neutron-l3-agent on the controller hosts
type: string
+ NodeIndex:
+ type: number
+ default: 0
NovaApiHost:
type: string
default: '' # Has to be here because of the ignored empty value bug
@@ -207,6 +210,9 @@ parameters:
NovaCompute specific configuration to inject into the cluster. Same
structure as ExtraConfig.
type: json
+ NovaComputeIPs:
+ default: {}
+ type: json
NovaComputeLibvirtType:
type: string
default: kvm
@@ -218,6 +224,10 @@ parameters:
default: false
description: Whether to enable or not the Rbd backend for Nova
type: boolean
+ NovaIPv6:
+ default: false
+ description: Enable IPv6 features in Nova
+ type: boolean
NovaPassword:
description: The password for the nova service account, used by nova-api.
type: string
@@ -378,31 +388,43 @@ resources:
type: OS::TripleO::Compute::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::Compute::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::Compute::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::Compute::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::Compute::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::Compute::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
@@ -478,6 +500,7 @@ resources:
raw_data: {get_file: hieradata/compute.yaml}
mapped_data:
cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend}
+ nova::use_ipv6: {get_input: nova_ipv6}
nova::debug: {get_input: debug}
nova::rabbit_userid: {get_input: rabbit_username}
nova::rabbit_password: {get_input: rabbit_password}
@@ -567,10 +590,18 @@ resources:
nova_api_host: {get_param: NovaApiHost}
nova_password: {get_param: NovaPassword}
nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend}
+ nova_ipv6: {get_param: NovaIPv6}
cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]}
nova_vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
- nova_vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host]}
+ # Remove brackets that may come if the IP address is IPv6.
+ # For DNS names and IPv4, this will just get the NovaVNCProxyPublic value
+ nova_vncproxy_host:
+ str_replace:
+ template: {get_param: [EndpointMap, NovaVNCProxyPublic, host]}
+ params:
+ '[': ''
+ ']': ''
nova_vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
nova_ovs_bridge: {get_param: NovaOVSBridge}
nova_security_group_api: {get_param: NovaSecurityGroupAPI}
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index 0cc3fd71..a873ce8a 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -238,7 +238,7 @@ parameters:
type: string
hidden: true
HeatStackDomainAdminPassword:
- description: Password for heat_domain_admin user.
+ description: Password for heat_stack_domain_admin user.
type: string
hidden: true
HeatAuthEncryptionKey:
@@ -1301,7 +1301,7 @@ resources:
mapped_data:
ceph::profile::params::cluster_network: {get_input: ceph_cluster_network}
ceph::profile::params::public_network: {get_input: ceph_public_network}
- ceph::mon::public_addr: {get_input: ceph_public_ip}
+ ceph::profile::params::public_addr: {get_input: ceph_public_ip}
database:
raw_data: {get_file: hieradata/database.yaml}
object:
@@ -1416,9 +1416,11 @@ resources:
heat::debug: {get_input: debug}
heat::db::mysql::password: {get_input: heat_password}
heat_enable_db_purge: {get_input: heat_enable_db_purge}
+ heat::keystone::domain::domain_password: {get_input: heat_stack_domain_admin_password}
# Keystone
keystone::admin_token: {get_input: admin_token}
+ keystone::roles::admin::password: {get_input: admin_password}
keystone_ca_certificate: {get_input: keystone_ca_certificate}
keystone_signing_key: {get_input: keystone_signing_key}
keystone_signing_certificate: {get_input: keystone_signing_certificate}
diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml
index ebd6c251..312d49a0 100644
--- a/puppet/extraconfig/ceph/ceph-external-config.yaml
+++ b/puppet/extraconfig/ceph/ceph-external-config.yaml
@@ -41,6 +41,9 @@ parameters:
CephClientUserName:
default: openstack
type: string
+ CephIPv6:
+ default: False
+ type: boolean
resources:
CephClusterConfigImpl:
@@ -54,7 +57,9 @@ resources:
mapped_data:
ceph_storage_count: {get_param: ceph_storage_count}
enable_external_ceph: true
- ceph::profile::params::mon_host: {get_param: ceph_external_mon_ips}
+ ceph_ipv6: {get_param: CephIPv6}
+ ceph_mon_host: {get_param: ceph_external_mon_ips}
+ ceph_mon_host_v6: {get_param: ceph_external_mon_ips}
ceph::profile::params::fsid: {get_param: ceph_fsid}
ceph::profile::params::client_keys:
str_replace:
@@ -72,6 +77,7 @@ resources:
NOVA_POOL: {get_param: NovaRbdPoolName}
CINDER_POOL: {get_param: CinderRbdPoolName}
GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
index 905f196d..9b6981bb 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-11-12
+heat_template_version: 2015-10-15
description: Configure hieradata for Cinder Dell Storage Center configuration
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
index c73608f1..36db334e 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-11-06
+heat_template_version: 2015-10-15
description: Configure hieradata for Cinder Eqlx configuration
diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml
index 572eef9f..865210c9 100644
--- a/puppet/hieradata/compute.yaml
+++ b/puppet/hieradata/compute.yaml
@@ -7,7 +7,6 @@ nova::compute::instance_usage_audit: true
nova::compute::instance_usage_audit_period: 'hour'
nova::compute::vnc_enabled: true
-nova::compute::libvirt::vncserver_listen: '0.0.0.0'
nova::compute::libvirt::migration_support: true
nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index e80bee07..288d224f 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -1,4 +1,5 @@
# Hiera data here applies to all controller nodes
+
nova::api::enabled: true
nova::conductor::enabled: true
nova::consoleauth::enabled: true
@@ -29,6 +30,9 @@ redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
+# keystone
+keystone::roles::admin::email: 'root@localhost'
+
# service tenant
glance::api::keystone_tenant: 'service'
glance::registry::keystone_tenant: 'service'
@@ -110,6 +114,9 @@ heat::cron::purge_deleted::age: 30
heat::cron::purge_deleted::age_type: 'days'
heat::cron::purge_deleted::maxdelay: 3600
heat::cron::purge_deleted::destination: '/dev/null'
+heat::keystone::domain::domain_name: 'heat_stack'
+heat::keystone::domain::domain_admin: 'heat_stack_domain_admin'
+heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost'
# pacemaker
pacemaker::corosync::cluster_name: 'tripleo_cluster'
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index 0db5b45a..fd7faff1 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -40,6 +40,14 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
} -> Class['ceph::profile::osd']
}
+if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+} else {
+ $mon_host = hiera('ceph_mon_host')
+}
+class { '::ceph::profile::params':
+ mon_host => $mon_host,
+}
include ::ceph::conf
include ::ceph::profile::client
include ::ceph::profile::osd
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index 99220ffd..b7f65f53 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -53,15 +53,17 @@ include ::nova
include ::nova::config
include ::nova::compute
-nova_config {
- 'DEFAULT/my_ip': value => $ipaddress;
- 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
- 'DEFAULT/host': value => $fqdn;
-}
-
$rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false)
$rbd_persistent_storage = hiera('rbd_persistent_storage', false)
if $rbd_ephemeral_storage or $rbd_persistent_storage {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
@@ -83,7 +85,42 @@ if hiera('cinder_enable_nfs_backend', false) {
package {'nfs-utils': } -> Service['nova-compute']
}
-include ::nova::compute::libvirt
+if str2bool(hiera('nova::use_ipv6', false)) {
+ $vncserver_listen = '::0'
+} else {
+ $vncserver_listen = '0.0.0.0'
+}
+class { '::nova::compute::libvirt' :
+ vncserver_listen => $vncserver_listen,
+}
+
+# TUNNELLED mode provides a security enhancement when using shared storage but is not
+# supported when not using shared storage.
+# See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
+if $rbd_ephemeral_storage {
+ $block_migration_flag = 'VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_NON_SHARED_INC'
+ $live_migration_flag = 'VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED'
+} else {
+ $block_migration_flag = 'VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC'
+ $live_migration_flag = 'VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE'
+}
+
+nova_config {
+ 'DEFAULT/my_ip': value => $ipaddress;
+ 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
+ 'DEFAULT/host': value => $fqdn;
+ # In future versions of Nova, the live/block migration flags will be deprecated [1].
+ # Tunnelling (encryption) will be handled via a single _new_ Nova
+ # config attribute 'live_migration_tunnelled'[2], thus
+ # avoiding users to have to supply libvirt flags.
+ # In future versions of QEMU (2.6, mostly), Dan's native encryption
+ # work will obsolete the need to use TUNNELLED transport mode.
+ # [1] https://review.openstack.org/#/c/263436/
+ # [2] https://review.openstack.org/#/c/263434/
+ 'libvirt/block_migration_flag': value => $block_migration_flag;
+ 'libvirt/live_migration_flag': value => $live_migration_flag;
+}
+
if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
file {'/etc/libvirt/qemu.conf':
ensure => present,
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index ecab5b5a..9e5c556a 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -48,14 +48,24 @@ if hiera('step') >= 2 {
include ::mongodb::globals
include ::mongodb::server
- $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and
+ # without the brackets as 'members' argument for the 'mongodb_replset'
+ # resource.
+ if str2bool(hiera('mongodb::server::ipv6', false)) {
+ $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ } else {
+ $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ }
$mongo_node_string = join($mongo_node_ips_with_port, ',')
$mongodb_replset = hiera('mongodb::server::replset')
$ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
if downcase(hiera('bootstrap_nodeid')) == $::hostname {
mongodb_replset { $mongodb_replset :
- members => $mongo_node_ips_with_port,
+ members => $mongo_node_ips_with_port_nobr,
}
}
}
@@ -151,8 +161,15 @@ if hiera('step') >= 2 {
$enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
+ $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
class { '::ceph::profile::params':
- mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
+ mon_initial_members => $mon_initial_members,
+ mon_host => $mon_host,
}
include ::ceph::conf
include ::ceph::profile::mon
@@ -178,6 +195,14 @@ if hiera('step') >= 2 {
}
if str2bool(hiera('enable_external_ceph', false)) {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
}
@@ -638,6 +663,23 @@ if hiera('step') >= 4 {
if $heat_enable_db_purge {
include ::heat::cron::purge_deleted
}
+
+ if downcase(hiera('bootstrap_nodeid')) == $::hostname {
+ include ::keystone::roles::admin
+ # Class ::heat::keystone::domain has to run on bootstrap node
+ # because it creates DB entities via API calls.
+ include ::heat::keystone::domain
+
+ Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain']
+ } else {
+ # On non-bootstrap node we don't need to create Keystone resources again
+ class { '::heat::keystone::domain':
+ manage_domain => false,
+ manage_user => false,
+ manage_role => false,
+ }
+ }
+
} #END STEP 4
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')])
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index abc0543f..402a3bc8 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -201,8 +201,19 @@ if hiera('step') >= 1 {
if hiera('step') >= 2 {
# NOTE(gfidente): the following vars are needed on all nodes so they
- # need to stay out of pacemaker_master conditional
- $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ # need to stay out of pacemaker_master conditional.
+ # The addresses mangling will hopefully go away when we'll be able to
+ # configure the connection string via hostnames, until then, we need to pass
+ # the list of IPv6 addresses *with* port and without the brackets as 'members'
+ # argument for the 'mongodb_replset' resource.
+ if str2bool(hiera('mongodb::server::ipv6', false)) {
+ $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ } else {
+ $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ }
$mongodb_replset = hiera('mongodb::server::replset')
if $pacemaker_master {
@@ -431,7 +442,7 @@ if hiera('step') >= 2 {
before => Mongodb_replset[$mongodb_replset],
}
mongodb_replset { $mongodb_replset :
- members => $mongo_node_ips_with_port,
+ members => $mongo_node_ips_with_port_nobr,
}
}
@@ -526,8 +537,15 @@ MYSQL_HOST=localhost\n",
$enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
+ $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
class { '::ceph::profile::params':
- mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
+ mon_initial_members => $mon_initial_members,
+ mon_host => $mon_host,
}
include ::ceph::conf
include ::ceph::profile::mon
@@ -553,6 +571,14 @@ MYSQL_HOST=localhost\n",
}
if str2bool(hiera('enable_external_ceph', false)) {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
}
@@ -1825,6 +1851,16 @@ if hiera('step') >= 5 {
class {'::keystone::endpoint' :
require => Pacemaker::Resource::Service[$::apache::params::service_name],
}
+ include ::heat::keystone::domain
+ Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain']
+
+ } else {
+ # On non-master controller we don't need to create Keystone resources again
+ class { '::heat::keystone::domain':
+ manage_domain => false,
+ manage_user => false,
+ manage_role => false,
+ }
}
} #END STEP 5
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index 3b04be83..c26aca77 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -83,6 +83,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ SwiftStorageIPs:
+ default: {}
+ type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -111,6 +114,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
@@ -156,31 +162,43 @@ resources:
type: OS::TripleO::SwiftStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::SwiftStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::SwiftStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::SwiftStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::SwiftStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::SwiftStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::ObjectStorage::Net::SoftwareConfig