aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--environments/ips-from-pool-all.yaml75
-rw-r--r--environments/major-upgrade-pacemaker-init.yaml (renamed from environments/major-upgrade-script-delivery.yaml)2
-rw-r--r--environments/network-isolation-v6.yaml2
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh35
-rw-r--r--extraconfig/tasks/major_upgrade_object_storage.sh7
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_init.yaml128
-rw-r--r--extraconfig/tasks/major_upgrade_script_delivery.yaml65
-rw-r--r--overcloud.yaml5
-rw-r--r--puppet/ceph-cluster-config.yaml15
-rw-r--r--puppet/ceph-storage.yaml18
-rw-r--r--puppet/cinder-storage.yaml18
-rw-r--r--puppet/compute.yaml33
-rw-r--r--puppet/extraconfig/ceph/ceph-external-config.yaml8
-rw-r--r--puppet/hieradata/compute.yaml1
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp8
-rw-r--r--puppet/manifests/overcloud_compute.pp17
-rw-r--r--puppet/manifests/overcloud_controller.pp31
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp34
-rw-r--r--puppet/swift-storage.yaml18
19 files changed, 439 insertions, 81 deletions
diff --git a/environments/ips-from-pool-all.yaml b/environments/ips-from-pool-all.yaml
new file mode 100644
index 00000000..f660d501
--- /dev/null
+++ b/environments/ips-from-pool-all.yaml
@@ -0,0 +1,75 @@
+# Environment file demonstrating how to pre-assign IPs to all node types
+resource_registry:
+ OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
+ OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+ OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+ OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+ OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+ OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+parameter_defaults:
+ ControllerIPs:
+ # Each controller will get an IP from the lists below, first controller, first IP
+ external:
+ - 10.0.0.251
+ internal_api:
+ - 172.16.2.251
+ storage:
+ - 172.16.1.251
+ storage_mgmt:
+ - 172.16.3.251
+ tenant:
+ - 172.16.0.251
+ NovaComputeIPs:
+ # Each compute will get an IP from the lists below, first compute, first IP
+ internal_api:
+ - 172.16.2.252
+ storage:
+ - 172.16.1.252
+ tenant:
+ - 172.16.0.252
+ CephStorageIPs:
+ # Each ceph node will get an IP from the lists below, first node, first IP
+ storage:
+ - 172.16.1.253
+ storage_mgmt:
+ - 172.16.3.253
+ SwiftStorageIPs:
+ # Each swift node will get an IP from the lists below, first node, first IP
+ internal_api:
+ - 172.16.2.254
+ storage:
+ - 172.16.1.254
+ storage_mgmt:
+ - 172.16.3.254
+ BlockStorageIPs:
+ # Each cinder node will get an IP from the lists below, first node, first IP
+ internal_api:
+ - 172.16.2.250
+ storage:
+ - 172.16.1.250
+ storage_mgmt:
+ - 172.16.3.250
diff --git a/environments/major-upgrade-script-delivery.yaml b/environments/major-upgrade-pacemaker-init.yaml
index ba128d84..d98a9cdd 100644
--- a/environments/major-upgrade-script-delivery.yaml
+++ b/environments/major-upgrade-pacemaker-init.yaml
@@ -2,7 +2,7 @@ parameter_defaults:
UpgradeLevelNovaCompute: liberty
resource_registry:
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_script_delivery.yaml
+ OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml
OS::TripleO::Tasks::PackageUpdate: ../extraconfig/tasks/yum_update_noop.yaml
OS::TripleO::ControllerPostDeployment: OS::Heat::None
OS::TripleO::ComputePostDeployment: OS::Heat::None
diff --git a/environments/network-isolation-v6.yaml b/environments/network-isolation-v6.yaml
index 4c071747..599a08b1 100644
--- a/environments/network-isolation-v6.yaml
+++ b/environments/network-isolation-v6.yaml
@@ -43,6 +43,8 @@ resource_registry:
OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml
parameter_defaults:
+ # Enable IPv6 for Ceph.
+ CephIPv6: True
# Enable IPv6 for Corosync. This is required when Corosync is using an IPv6 IP in the cluster.
CorosyncIPv6: True
# Enable IPv6 for MongoDB. This is required when MongoDB is using an IPv6 IP.
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
new file mode 100644
index 00000000..de42b16d
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# This delivers the ceph-storage upgrade script to be invoked as part of the tripleo
+# major upgrade workflow.
+#
+set -eu
+
+UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
+
+cat > $UPGRADE_SCRIPT << ENDOFCAT
+### DO NOT MODIFY THIS FILE
+### This file is automatically delivered to the ceph-storage nodes as part of the
+### tripleo upgrades workflow
+
+
+function systemctl_ceph {
+ action=\$1
+ systemctl \$action ceph
+}
+
+# "so that mirrors aren't rebalanced as if the OSD died" - gfidente
+ceph osd set noout
+
+systemctl_ceph stop
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y update
+systemctl_ceph start
+
+ceph osd unset noout
+
+ENDOFCAT
+
+# ensure the permissions are OK
+chmod 0755 $UPGRADE_SCRIPT
+
diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh
index 0f6d091e..931f4f42 100644
--- a/extraconfig/tasks/major_upgrade_object_storage.sh
+++ b/extraconfig/tasks/major_upgrade_object_storage.sh
@@ -14,17 +14,18 @@ cat > $UPGRADE_SCRIPT << ENDOFCAT
function systemctl_swift {
- action=$1
+ action=\$1
for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
- openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy; do
- systemctl $action $S
+ openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object; do
+ systemctl \$action \$S
done
}
systemctl_swift stop
+yum -y install python-zaqarclient # needed for os-collect-config
yum -y update
systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml
new file mode 100644
index 00000000..f662bc3d
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml
@@ -0,0 +1,128 @@
+heat_template_version: 2014-10-16
+description: 'Upgrade for Pacemaker deployments'
+
+parameters:
+
+ controller_servers:
+ type: json
+ compute_servers:
+ type: json
+ blockstorage_servers:
+ type: json
+ objectstorage_servers:
+ type: json
+ cephstorage_servers:
+ type: json
+ input_values:
+ type: json
+ description: input values for the software deployments
+
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+ UpgradeLevelNovaCompute:
+ type: string
+ description: Nova Compute upgrade level
+ default: ''
+
+resources:
+
+ UpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - get_param: UpgradeInitCommand
+
+ UpgradeInitControllerDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: controller_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitComputeDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: compute_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitBlockStorageDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: blockstorage_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitObjectStorageDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: objectstorage_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitCephStorageDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: cephstorage_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ # TODO(jistr): for Mitaka->Newton upgrades and further we can use
+ # map_merge with input_values instead of feeding params into scripts
+ # via str_replace on bash snippets
+
+ ComputeDeliverUpgradeScriptConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
+ params:
+ UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+ - get_file: major_upgrade_compute.sh
+
+ ComputeDeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: compute_servers}
+ config: {get_resource: ComputeDeliverUpgradeScriptConfig}
+ input_values: {get_param: input_values}
+
+ ObjectStorageDeliverUpgradeScriptConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: major_upgrade_object_storage.sh}
+
+ ObjectStorageDeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: objectstorage_servers}
+ config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig}
+ input_values: {get_param: input_values}
+
+ CephStorageDeliverUpgradeScriptConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: major_upgrade_ceph_storage.sh}
+
+ CephStorageDeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: cephstorage_servers}
+ config: {get_resource: CephStorageDeliverUpgradeScriptConfig}
+ input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_script_delivery.yaml b/extraconfig/tasks/major_upgrade_script_delivery.yaml
deleted file mode 100644
index f7faa7fc..00000000
--- a/extraconfig/tasks/major_upgrade_script_delivery.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-heat_template_version: 2014-10-16
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
-
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-
- UpgradeLevelNovaCompute:
- type: string
- description: Nova Compute upgrade level
- default: ''
-
-resources:
- # TODO(jistr): for Mitaka->Newton upgrades and further we can use
- # map_merge with input_values instead of feeding params into scripts
- # via str_replace on bash snippets
-
- ComputeDeliverUpgradeScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - get_file: major_upgrade_compute.sh
-
- ComputeDeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: compute_servers}
- config: {get_resource: ComputeDeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
-
-
- ObjectStoreDeliverUpgradeScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: major_upgrade_object_storage.sh}
-
- ObjectStoreDeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: objectstorage_servers}
- config: {get_resource: ObjectStoreDeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
-
diff --git a/overcloud.yaml b/overcloud.yaml
index e540b9c4..cceb2018 100644
--- a/overcloud.yaml
+++ b/overcloud.yaml
@@ -1083,6 +1083,7 @@ resources:
NovaComputeLibvirtType: {get_param: NovaComputeLibvirtType}
NovaComputeLibvirtVifDriver: {get_param: NovaComputeLibvirtVifDriver}
NovaEnableRbdBackend: {get_param: NovaEnableRbdBackend}
+ NovaIPv6: {get_param: NovaIPv6}
NovaPublicIP: {get_attr: [VipMap, net_ip_map, external]}
NovaPassword: {get_param: NovaPassword}
NovaOVSBridge: {get_param: NovaOVSBridge}
@@ -1107,6 +1108,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: NovaComputeSchedulerHints}
+ NodeIndex: '%index%'
BlockStorage:
type: OS::Heat::ResourceGroup
@@ -1148,6 +1150,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: BlockStorageSchedulerHints}
+ NodeIndex: '%index%'
ObjectStorage:
type: OS::Heat::ResourceGroup
@@ -1180,6 +1183,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: ObjectStorageSchedulerHints}
+ NodeIndex: '%index%'
CephStorage:
type: OS::Heat::ResourceGroup
@@ -1207,6 +1211,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: CephStorageSchedulerHints}
+ NodeIndex: '%index%'
ControllerIpListMap:
type: OS::TripleO::Network::Ports::NetIpListMap
diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml
index 96198c3f..dc2f98ed 100644
--- a/puppet/ceph-cluster-config.yaml
+++ b/puppet/ceph-cluster-config.yaml
@@ -39,6 +39,9 @@ parameters:
CephClientUserName:
default: openstack
type: string
+ CephIPv6:
+ default: False
+ type: boolean
resources:
CephClusterConfigImpl:
@@ -50,15 +53,25 @@ resources:
datafiles:
ceph_cluster:
mapped_data:
+ ceph_ipv6: {get_param: CephIPv6}
ceph_storage_count: {get_param: ceph_storage_count}
ceph_mon_initial_members:
list_join:
- ','
- {get_param: ceph_mon_names}
- ceph::profile::params::mon_host:
+ ceph_mon_host:
list_join:
- ','
- {get_param: ceph_mon_ips}
+ ceph_mon_host_v6:
+ str_replace:
+ template: "'[IPS_LIST]'"
+ params:
+ IPS_LIST:
+ list_join:
+ - '],['
+ - {get_param: ceph_mon_ips}
+ ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
ceph::profile::params::fsid: {get_param: ceph_fsid}
ceph::profile::params::mon_key: {get_param: ceph_mon_key}
# We should use a separated key for the non-admin clients
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index 88120b9c..d2988926 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -62,6 +62,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ CephStorageIPs:
+ default: {}
+ type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -90,6 +93,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
CephStorage:
@@ -135,31 +141,43 @@ resources:
type: OS::TripleO::CephStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::CephStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::CephStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::CephStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::CephStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::CephStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::CephStorage::Net::SoftwareConfig
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index 0c22b575..888f3cf8 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -38,6 +38,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ BlockStorageIPs:
+ default: {}
+ type: json
Flavor:
description: Flavor for block storage nodes to request when deploying.
type: string
@@ -141,6 +144,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
@@ -187,31 +193,43 @@ resources:
type: OS::TripleO::BlockStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::BlockStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::BlockStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::BlockStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::BlockStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::BlockStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::BlockStorage::Net::SoftwareConfig
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index 58ca71e7..ee5bced6 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -195,6 +195,9 @@ parameters:
default: 'dvr_snat'
description: Agent mode for the neutron-l3-agent on the controller hosts
type: string
+ NodeIndex:
+ type: number
+ default: 0
NovaApiHost:
type: string
default: '' # Has to be here because of the ignored empty value bug
@@ -207,6 +210,9 @@ parameters:
NovaCompute specific configuration to inject into the cluster. Same
structure as ExtraConfig.
type: json
+ NovaComputeIPs:
+ default: {}
+ type: json
NovaComputeLibvirtType:
type: string
default: kvm
@@ -218,6 +224,10 @@ parameters:
default: false
description: Whether to enable or not the Rbd backend for Nova
type: boolean
+ NovaIPv6:
+ default: false
+ description: Enable IPv6 features in Nova
+ type: boolean
NovaPassword:
description: The password for the nova service account, used by nova-api.
type: string
@@ -378,31 +388,43 @@ resources:
type: OS::TripleO::Compute::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::Compute::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::Compute::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::Compute::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::Compute::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::Compute::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
@@ -478,6 +500,7 @@ resources:
raw_data: {get_file: hieradata/compute.yaml}
mapped_data:
cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend}
+ nova::use_ipv6: {get_input: nova_ipv6}
nova::debug: {get_input: debug}
nova::rabbit_userid: {get_input: rabbit_username}
nova::rabbit_password: {get_input: rabbit_password}
@@ -567,10 +590,18 @@ resources:
nova_api_host: {get_param: NovaApiHost}
nova_password: {get_param: NovaPassword}
nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend}
+ nova_ipv6: {get_param: NovaIPv6}
cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]}
nova_vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
- nova_vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host]}
+ # Remove brackets that may come if the IP address is IPv6.
+ # For DNS names and IPv4, this will just get the NovaVNCProxyPublic value
+ nova_vncproxy_host:
+ str_replace:
+ template: {get_param: [EndpointMap, NovaVNCProxyPublic, host]}
+ params:
+ '[': ''
+ ']': ''
nova_vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
nova_ovs_bridge: {get_param: NovaOVSBridge}
nova_security_group_api: {get_param: NovaSecurityGroupAPI}
diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml
index ebd6c251..312d49a0 100644
--- a/puppet/extraconfig/ceph/ceph-external-config.yaml
+++ b/puppet/extraconfig/ceph/ceph-external-config.yaml
@@ -41,6 +41,9 @@ parameters:
CephClientUserName:
default: openstack
type: string
+ CephIPv6:
+ default: False
+ type: boolean
resources:
CephClusterConfigImpl:
@@ -54,7 +57,9 @@ resources:
mapped_data:
ceph_storage_count: {get_param: ceph_storage_count}
enable_external_ceph: true
- ceph::profile::params::mon_host: {get_param: ceph_external_mon_ips}
+ ceph_ipv6: {get_param: CephIPv6}
+ ceph_mon_host: {get_param: ceph_external_mon_ips}
+ ceph_mon_host_v6: {get_param: ceph_external_mon_ips}
ceph::profile::params::fsid: {get_param: ceph_fsid}
ceph::profile::params::client_keys:
str_replace:
@@ -72,6 +77,7 @@ resources:
NOVA_POOL: {get_param: NovaRbdPoolName}
CINDER_POOL: {get_param: CinderRbdPoolName}
GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml
index 572eef9f..865210c9 100644
--- a/puppet/hieradata/compute.yaml
+++ b/puppet/hieradata/compute.yaml
@@ -7,7 +7,6 @@ nova::compute::instance_usage_audit: true
nova::compute::instance_usage_audit_period: 'hour'
nova::compute::vnc_enabled: true
-nova::compute::libvirt::vncserver_listen: '0.0.0.0'
nova::compute::libvirt::migration_support: true
nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index 0db5b45a..fd7faff1 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -40,6 +40,14 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
} -> Class['ceph::profile::osd']
}
+if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+} else {
+ $mon_host = hiera('ceph_mon_host')
+}
+class { '::ceph::profile::params':
+ mon_host => $mon_host,
+}
include ::ceph::conf
include ::ceph::profile::client
include ::ceph::profile::osd
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index 99220ffd..7c8cda71 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -62,6 +62,14 @@ nova_config {
$rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false)
$rbd_persistent_storage = hiera('rbd_persistent_storage', false)
if $rbd_ephemeral_storage or $rbd_persistent_storage {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
@@ -83,7 +91,14 @@ if hiera('cinder_enable_nfs_backend', false) {
package {'nfs-utils': } -> Service['nova-compute']
}
-include ::nova::compute::libvirt
+if str2bool(hiera('nova::use_ipv6', false)) {
+ $vncserver_listen = '::0'
+} else {
+ $vncserver_listen = '0.0.0.0'
+}
+class { '::nova::compute::libvirt' :
+ vncserver_listen => $vncserver_listen,
+}
if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
file {'/etc/libvirt/qemu.conf':
ensure => present,
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index ecab5b5a..4a03fefa 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -48,14 +48,24 @@ if hiera('step') >= 2 {
include ::mongodb::globals
include ::mongodb::server
- $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and
+ # without the brackets as 'members' argument for the 'mongodb_replset'
+ # resource.
+ if str2bool(hiera('mongodb::server::ipv6', false)) {
+ $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ } else {
+ $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ }
$mongo_node_string = join($mongo_node_ips_with_port, ',')
$mongodb_replset = hiera('mongodb::server::replset')
$ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
if downcase(hiera('bootstrap_nodeid')) == $::hostname {
mongodb_replset { $mongodb_replset :
- members => $mongo_node_ips_with_port,
+ members => $mongo_node_ips_with_port_nobr,
}
}
}
@@ -151,8 +161,15 @@ if hiera('step') >= 2 {
$enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
+ $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
class { '::ceph::profile::params':
- mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
+ mon_initial_members => $mon_initial_members,
+ mon_host => $mon_host,
}
include ::ceph::conf
include ::ceph::profile::mon
@@ -178,6 +195,14 @@ if hiera('step') >= 2 {
}
if str2bool(hiera('enable_external_ceph', false)) {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
}
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index abc0543f..7637029c 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -201,8 +201,19 @@ if hiera('step') >= 1 {
if hiera('step') >= 2 {
# NOTE(gfidente): the following vars are needed on all nodes so they
- # need to stay out of pacemaker_master conditional
- $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ # need to stay out of pacemaker_master conditional.
+ # The addresses mangling will hopefully go away when we'll be able to
+ # configure the connection string via hostnames, until then, we need to pass
+ # the list of IPv6 addresses *with* port and without the brackets as 'members'
+ # argument for the 'mongodb_replset' resource.
+ if str2bool(hiera('mongodb::server::ipv6', false)) {
+ $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ } else {
+ $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ }
$mongodb_replset = hiera('mongodb::server::replset')
if $pacemaker_master {
@@ -431,7 +442,7 @@ if hiera('step') >= 2 {
before => Mongodb_replset[$mongodb_replset],
}
mongodb_replset { $mongodb_replset :
- members => $mongo_node_ips_with_port,
+ members => $mongo_node_ips_with_port_nobr,
}
}
@@ -526,8 +537,15 @@ MYSQL_HOST=localhost\n",
$enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
+ $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
class { '::ceph::profile::params':
- mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
+ mon_initial_members => $mon_initial_members,
+ mon_host => $mon_host,
}
include ::ceph::conf
include ::ceph::profile::mon
@@ -553,6 +571,14 @@ MYSQL_HOST=localhost\n",
}
if str2bool(hiera('enable_external_ceph', false)) {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
}
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index 3b04be83..c26aca77 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -83,6 +83,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ SwiftStorageIPs:
+ default: {}
+ type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -111,6 +114,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
@@ -156,31 +162,43 @@ resources:
type: OS::TripleO::SwiftStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::SwiftStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::SwiftStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::SwiftStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::SwiftStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::SwiftStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::ObjectStorage::Net::SoftwareConfig