summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--capabilities-map.yaml4
-rw-r--r--docker/compute-post.yaml44
-rw-r--r--docker/firstboot/start_docker_agents.sh19
-rw-r--r--environments/docker-network-isolation.yaml6
-rw-r--r--environments/docker.yaml2
-rw-r--r--environments/ips-from-pool-all.yaml75
-rw-r--r--environments/major-upgrade-pacemaker-init.yaml (renamed from environments/major-upgrade-script-delivery.yaml)2
-rw-r--r--environments/network-isolation-v6.yaml2
-rw-r--r--extraconfig/tasks/major_upgrade_block_storage.sh8
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh35
-rw-r--r--extraconfig/tasks/major_upgrade_object_storage.sh7
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml16
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_init.yaml128
-rw-r--r--extraconfig/tasks/major_upgrade_script_delivery.yaml65
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh21
-rw-r--r--network/management.yaml2
-rw-r--r--network/ports/external_from_pool.yaml2
-rw-r--r--network/ports/from_service.yaml8
-rw-r--r--network/ports/internal_api_from_pool.yaml2
-rw-r--r--network/ports/storage_from_pool.yaml2
-rw-r--r--network/ports/storage_mgmt_from_pool.yaml2
-rw-r--r--network/ports/tenant_from_pool.yaml2
-rw-r--r--overcloud.yaml14
-rw-r--r--puppet/ceph-cluster-config.yaml15
-rw-r--r--puppet/ceph-storage.yaml18
-rw-r--r--puppet/cinder-storage.yaml19
-rw-r--r--puppet/compute.yaml34
-rw-r--r--puppet/controller.yaml27
-rw-r--r--puppet/extraconfig/ceph/ceph-external-config.yaml8
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml2
-rw-r--r--puppet/extraconfig/tls/ca-inject.yaml2
-rw-r--r--puppet/extraconfig/tls/tls-cert-inject.yaml4
-rw-r--r--puppet/hieradata/compute.yaml1
-rw-r--r--puppet/hieradata/controller.yaml7
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp8
-rw-r--r--puppet/manifests/overcloud_compute.pp36
-rw-r--r--puppet/manifests/overcloud_controller.pp50
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp47
-rw-r--r--puppet/swift-storage.yaml18
40 files changed, 597 insertions, 169 deletions
diff --git a/capabilities-map.yaml b/capabilities-map.yaml
index f47eb9ad..c7816b7e 100644
--- a/capabilities-map.yaml
+++ b/capabilities-map.yaml
@@ -5,7 +5,7 @@
# root_template: identifies repository's root template
# root_environment: identifies root_environment, this one is special in terms of
# order in which the environments are merged before deploying. This one serves as
-# a base and it's parameters/resource_registry gets overriden by other environments
+# a base and it's parameters/resource_registry gets overridden by other environments
# if used.
# topics:
@@ -21,7 +21,7 @@
# Attributes:
# title: (optional)
# description: (optional)
-# tags: a list of tags to provide aditional information for e.g. filtering (optional)
+# tags: a list of tags to provide additional information for e.g. filtering (optional)
# environments: (required)
# environments:
diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml
index 8f9e9627..4532549f 100644
--- a/docker/compute-post.yaml
+++ b/docker/compute-post.yaml
@@ -17,8 +17,6 @@ parameters:
type: string
DockerLibvirtImage:
type: string
- DockerNeutronAgentImage:
- type: string
DockerOpenvswitchImage:
type: string
DockerOvsVswitchdImage:
@@ -33,17 +31,12 @@ parameters:
default: "/etc/nova/nova.conf"
NeutronOpenvswitchAgentConfig:
type: string
- default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/ml2/ml2_conf.ini"
- NeutronAgentConfig:
- type: string
default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
- NeutronAgentPluginVolume:
+ NeutronOpenvswitchAgentPluginVolume:
type: string
- description: The neutron agent plugin to mount into the neutron-agents container
default: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/ovs_neutron_plugin.ini:ro"
- NeutronAgentOvsVolume:
+ NeutronOpenvswitchAgentOvsVolume:
type: string
- description: The neutron agent ovs agents to mount into the neutron-agents container
default: " "
resources:
@@ -99,7 +92,6 @@ resources:
- name: libvirt_config
- name: nova_config
- name: neutron_openvswitch_agent_config
- - name: neutron_agent_config
config: |
#!/bin/python
import json
@@ -112,13 +104,11 @@ resources:
libvirt_config = os.getenv('libvirt_config').split(',')
nova_config = os.getenv('nova_config').split(',')
neutron_openvswitch_agent_config = os.getenv('neutron_openvswitch_agent_config').split(',')
- neutron_agent_config = os.getenv('neutron_agent_config').split(',')
# Command, Config_files, Owner, Perms
services = {'nova-libvirt': ['/usr/sbin/libvirtd', libvirt_config, 'root', libvirt_perms],
'nova-compute': ['/usr/bin/nova-compute', nova_config, 'nova', file_perms],
'neutron-openvswitch-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_openvswitch_agent_config, 'neutron', file_perms],
- 'neutron-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_agent_config, 'neutron', file_perms],
'ovs-vswitchd': ['/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/openvswitch/ovs-vswitchd.log'],
'ovsdb-server': ['/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --log-file=/var/log/openvswitch/ovsdb-server.log']
}
@@ -171,7 +161,6 @@ resources:
libvirt_config: {get_param: LibvirtConfig}
nova_config: {get_param: NovaConfig}
neutron_openvswitch_agent_config: {get_param: NeutronOpenvswitchAgentConfig}
- neutron_agent_config: {get_param: NeutronAgentConfig}
NovaComputeContainersDeploymentOVS:
type: OS::Heat::StructuredDeployments
@@ -291,27 +280,7 @@ resources:
properties:
group: docker-compose
config:
- openvswitch:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
- net: host
- privileged: true
- restart: always
- volumes:
- - /run:/run
- - /lib/modules:/lib/modules:ro
- - /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json
- - /var/lib/etc-data/neutron/neutron.conf:/etc/kolla/neutron-openvswitch-agent/:ro
- - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro
- - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- volumes_from:
- - computedata
-
- neutronagent:
+ neutronovsagent:
image:
list_join:
- '/'
@@ -326,10 +295,11 @@ resources:
- list_join:
- ","
- [ "/run:/run", "/lib/modules:/lib/modules:ro",
- "/var/lib/etc-data/json-config/neutron-agent.json:/var/lib/kolla/config_files/config.json",
+ "/var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json",
"/var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro",
- {get_param: NeutronAgentPluginVolume},
- {get_param: NeutronAgentOvsVolume} ]
+ "/var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro",
+ {get_param: NeutronOpenvswitchAgentPluginVolume},
+ {get_param: NeutronOpenvswitchAgentOvsVolume} ]
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
volumes_from:
diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh
index 963c7eee..bb458a68 100644
--- a/docker/firstboot/start_docker_agents.sh
+++ b/docker/firstboot/start_docker_agents.sh
@@ -73,3 +73,22 @@ chmod 0640 /etc/systemd/system/heat-docker-agents.service
# Disable NetworkManager and let the ifup/down scripts work properly.
/usr/bin/systemctl disable NetworkManager
/usr/bin/systemctl stop NetworkManager
+
+# Atomic's root partition & logical volume defaults to 3G. In order to launch
+# larger VMs, we need to enlarge the root logical volume and scale down the
+# docker_pool logical volume. We are allocating 80% of the disk space for
+# vm data and the remaining 20% for docker images.
+ATOMIC_ROOT='/dev/mapper/atomicos-root'
+ROOT_DEVICE=`pvs -o vg_name,pv_name --no-headings | grep atomicos | awk '{ print $2}'`
+
+growpart $( echo "${ROOT_DEVICE}" | sed -r 's/([^0-9]*)([0-9]+)/\1 \2/' )
+pvresize "${ROOT_DEVICE}"
+lvresize -l +80%FREE "${ATOMIC_ROOT}"
+xfs_growfs "${ATOMIC_ROOT}"
+
+cat <<EOF > /etc/sysconfig/docker-storage-setup
+GROWPART=true
+AUTO_EXTEND_POOL=yes
+POOL_AUTOEXTEND_PERCENT=30
+POOL_AUTOEXTEND_THRESHOLD=70
+EOF
diff --git a/environments/docker-network-isolation.yaml b/environments/docker-network-isolation.yaml
index 257d03dc..87c81d0b 100644
--- a/environments/docker-network-isolation.yaml
+++ b/environments/docker-network-isolation.yaml
@@ -1,4 +1,4 @@
parameter_defaults:
- NeutronAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini"
- NeutronAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro"
- NeutronAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro"
+ NeutronOpenvswitchAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini"
+ NeutronOpenvswitchAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro"
+ NeutronOpenvswitchAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro"
diff --git a/environments/docker.yaml b/environments/docker.yaml
index 7c6dc407..be21d842 100644
--- a/environments/docker.yaml
+++ b/environments/docker.yaml
@@ -14,9 +14,9 @@ parameter_defaults:
DockerNamespaceIsRegistry: false
# Compute Node Images
DockerComputeImage: centos-binary-nova-compute:latest
+ DockerAgentImage: heat-docker-agents:latest
DockerComputeDataImage: centos-binary-data:latest
DockerLibvirtImage: centos-binary-nova-libvirt:latest
- DockerNeutronAgentImage: centos-binary-neutron-agents:latest
DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:latest
DockerOvsVswitchdImage: centos-binary-openvswitch-vswitchd:latest
DockerOpenvswitchDBImage: centos-binary-openvswitch-db-server:latest
diff --git a/environments/ips-from-pool-all.yaml b/environments/ips-from-pool-all.yaml
new file mode 100644
index 00000000..f660d501
--- /dev/null
+++ b/environments/ips-from-pool-all.yaml
@@ -0,0 +1,75 @@
+# Environment file demonstrating how to pre-assign IPs to all node types
+resource_registry:
+ OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
+ OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+ OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+ OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+ OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+ OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+parameter_defaults:
+ ControllerIPs:
+ # Each controller will get an IP from the lists below, first controller, first IP
+ external:
+ - 10.0.0.251
+ internal_api:
+ - 172.16.2.251
+ storage:
+ - 172.16.1.251
+ storage_mgmt:
+ - 172.16.3.251
+ tenant:
+ - 172.16.0.251
+ NovaComputeIPs:
+ # Each compute will get an IP from the lists below, first compute, first IP
+ internal_api:
+ - 172.16.2.252
+ storage:
+ - 172.16.1.252
+ tenant:
+ - 172.16.0.252
+ CephStorageIPs:
+ # Each ceph node will get an IP from the lists below, first node, first IP
+ storage:
+ - 172.16.1.253
+ storage_mgmt:
+ - 172.16.3.253
+ SwiftStorageIPs:
+ # Each swift node will get an IP from the lists below, first node, first IP
+ internal_api:
+ - 172.16.2.254
+ storage:
+ - 172.16.1.254
+ storage_mgmt:
+ - 172.16.3.254
+ BlockStorageIPs:
+ # Each cinder node will get an IP from the lists below, first node, first IP
+ internal_api:
+ - 172.16.2.250
+ storage:
+ - 172.16.1.250
+ storage_mgmt:
+ - 172.16.3.250
diff --git a/environments/major-upgrade-script-delivery.yaml b/environments/major-upgrade-pacemaker-init.yaml
index ba128d84..d98a9cdd 100644
--- a/environments/major-upgrade-script-delivery.yaml
+++ b/environments/major-upgrade-pacemaker-init.yaml
@@ -2,7 +2,7 @@ parameter_defaults:
UpgradeLevelNovaCompute: liberty
resource_registry:
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_script_delivery.yaml
+ OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml
OS::TripleO::Tasks::PackageUpdate: ../extraconfig/tasks/yum_update_noop.yaml
OS::TripleO::ControllerPostDeployment: OS::Heat::None
OS::TripleO::ComputePostDeployment: OS::Heat::None
diff --git a/environments/network-isolation-v6.yaml b/environments/network-isolation-v6.yaml
index 4c071747..599a08b1 100644
--- a/environments/network-isolation-v6.yaml
+++ b/environments/network-isolation-v6.yaml
@@ -43,6 +43,8 @@ resource_registry:
OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml
parameter_defaults:
+ # Enable IPv6 for Ceph.
+ CephIPv6: True
# Enable IPv6 for Corosync. This is required when Corosync is using an IPv6 IP in the cluster.
CorosyncIPv6: True
# Enable IPv6 for MongoDB. This is required when MongoDB is using an IPv6 IP.
diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh
new file mode 100644
index 00000000..07666245
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_block_storage.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+#
+# This runs an upgrade of Cinder Block Storage nodes.
+#
+set -eu
+
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
new file mode 100644
index 00000000..de42b16d
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# This delivers the ceph-storage upgrade script to be invoked as part of the tripleo
+# major upgrade workflow.
+#
+set -eu
+
+UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
+
+cat > $UPGRADE_SCRIPT << ENDOFCAT
+### DO NOT MODIFY THIS FILE
+### This file is automatically delivered to the ceph-storage nodes as part of the
+### tripleo upgrades workflow
+
+
+function systemctl_ceph {
+ action=\$1
+ systemctl \$action ceph
+}
+
+# "so that mirrors aren't rebalanced as if the OSD died" - gfidente
+ceph osd set noout
+
+systemctl_ceph stop
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y update
+systemctl_ceph start
+
+ceph osd unset noout
+
+ENDOFCAT
+
+# ensure the permissions are OK
+chmod 0755 $UPGRADE_SCRIPT
+
diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh
index 0f6d091e..931f4f42 100644
--- a/extraconfig/tasks/major_upgrade_object_storage.sh
+++ b/extraconfig/tasks/major_upgrade_object_storage.sh
@@ -14,17 +14,18 @@ cat > $UPGRADE_SCRIPT << ENDOFCAT
function systemctl_swift {
- action=$1
+ action=\$1
for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
- openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy; do
- systemctl $action $S
+ openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object; do
+ systemctl \$action \$S
done
}
systemctl_swift stop
+yum -y install python-zaqarclient # needed for os-collect-config
yum -y update
systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index b867d107..4af3186c 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -50,6 +50,20 @@ resources:
config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
input_values: {get_param: input_values}
+ BlockStorageUpgradeConfig:
+ type: OS::Heat::SoftwareConfig
+ depends_on: ControllerPacemakerUpgradeDeployment_Step1
+ properties:
+ group: script
+ config: {get_file: major_upgrade_block_storage.sh}
+
+ BlockStorageUpgradeDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: blockstorage_servers}
+ config: {get_resource: BlockStorageUpgradeConfig}
+ input_values: {get_param: input_values}
+
ControllerPacemakerUpgradeConfig_Step2:
type: OS::Heat::SoftwareConfig
properties:
@@ -63,7 +77,7 @@ resources:
ControllerPacemakerUpgradeDeployment_Step2:
type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step1
+ depends_on: BlockStorageUpgradeDeployment
properties:
servers: {get_param: controller_servers}
config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml
new file mode 100644
index 00000000..f662bc3d
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml
@@ -0,0 +1,128 @@
+heat_template_version: 2014-10-16
+description: 'Upgrade for Pacemaker deployments'
+
+parameters:
+
+ controller_servers:
+ type: json
+ compute_servers:
+ type: json
+ blockstorage_servers:
+ type: json
+ objectstorage_servers:
+ type: json
+ cephstorage_servers:
+ type: json
+ input_values:
+ type: json
+ description: input values for the software deployments
+
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+ UpgradeLevelNovaCompute:
+ type: string
+ description: Nova Compute upgrade level
+ default: ''
+
+resources:
+
+ UpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - get_param: UpgradeInitCommand
+
+ UpgradeInitControllerDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: controller_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitComputeDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: compute_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitBlockStorageDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: blockstorage_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitObjectStorageDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: objectstorage_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitCephStorageDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: cephstorage_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ # TODO(jistr): for Mitaka->Newton upgrades and further we can use
+ # map_merge with input_values instead of feeding params into scripts
+ # via str_replace on bash snippets
+
+ ComputeDeliverUpgradeScriptConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
+ params:
+ UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+ - get_file: major_upgrade_compute.sh
+
+ ComputeDeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: compute_servers}
+ config: {get_resource: ComputeDeliverUpgradeScriptConfig}
+ input_values: {get_param: input_values}
+
+ ObjectStorageDeliverUpgradeScriptConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: major_upgrade_object_storage.sh}
+
+ ObjectStorageDeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: objectstorage_servers}
+ config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig}
+ input_values: {get_param: input_values}
+
+ CephStorageDeliverUpgradeScriptConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: major_upgrade_ceph_storage.sh}
+
+ CephStorageDeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: cephstorage_servers}
+ config: {get_resource: CephStorageDeliverUpgradeScriptConfig}
+ input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_script_delivery.yaml b/extraconfig/tasks/major_upgrade_script_delivery.yaml
deleted file mode 100644
index f7faa7fc..00000000
--- a/extraconfig/tasks/major_upgrade_script_delivery.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-heat_template_version: 2014-10-16
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
-
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-
- UpgradeLevelNovaCompute:
- type: string
- description: Nova Compute upgrade level
- default: ''
-
-resources:
- # TODO(jistr): for Mitaka->Newton upgrades and further we can use
- # map_merge with input_values instead of feeding params into scripts
- # via str_replace on bash snippets
-
- ComputeDeliverUpgradeScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - get_file: major_upgrade_compute.sh
-
- ComputeDeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: compute_servers}
- config: {get_resource: ComputeDeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
-
-
- ObjectStoreDeliverUpgradeScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: major_upgrade_object_storage.sh}
-
- ObjectStoreDeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: objectstorage_servers}
- config: {get_resource: ObjectStoreDeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
-
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index ee3216e4..0808763e 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -39,10 +39,23 @@ function echo_error {
}
function systemctl_swift {
+ services=( openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
+ openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
+ openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy )
action=$1
- for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
- openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
- openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy; do
- systemctl $action $S
+ case $action in
+ stop)
+ services=$(systemctl | grep swift | grep running | awk '{print $1}')
+ ;;
+ start)
+ enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml 'enable_swift_storage')
+ if [[ $enable_swift_storage != "true" ]]; then
+ services=( openstack-swift-proxy )
+ fi
+ ;;
+ *) services=() ;; # for safetly, should never happen
+ esac
+ for S in ${services[@]}; do
+ systemctl $action $S
done
}
diff --git a/network/management.yaml b/network/management.yaml
index 9bfaafa2..1800b57a 100644
--- a/network/management.yaml
+++ b/network/management.yaml
@@ -16,7 +16,7 @@ parameters:
type: string
ManagementNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: The admin state of the network.
type: boolean
ManagementNetEnableDHCP:
default: false
diff --git a/network/ports/external_from_pool.yaml b/network/ports/external_from_pool.yaml
index 98f2aa35..867176e3 100644
--- a/network/ports/external_from_pool.yaml
+++ b/network/ports/external_from_pool.yaml
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/ports/from_service.yaml b/network/ports/from_service.yaml
index 359d77a7..3d61910e 100644
--- a/network/ports/from_service.yaml
+++ b/network/ports/from_service.yaml
@@ -8,19 +8,19 @@ parameters:
description: Name of the service to lookup
default: ''
type: string
- NetworkName: # Here for compatability with ctlplane_vip.yaml
+ NetworkName: # Here for compatibility with ctlplane_vip.yaml
description: Name of the network where the VIP will be created
default: ctlplane
type: string
- PortName: # Here for compatability with ctlplane_vip.yaml
+ PortName: # Here for compatibility with ctlplane_vip.yaml
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with ctlplane_vip.yaml
+ ControlPlaneIP: # Here for compatibility with ctlplane_vip.yaml
description: IP address on the control plane
default: ''
type: string
- ControlPlaneNetwork: # Here for compatability with ctlplane_vip.yaml
+ ControlPlaneNetwork: # Here for compatibility with ctlplane_vip.yaml
description: The name of the undercloud Neutron control plane
default: ctlplane
type: string
diff --git a/network/ports/internal_api_from_pool.yaml b/network/ports/internal_api_from_pool.yaml
index c7b04847..d7b67e26 100644
--- a/network/ports/internal_api_from_pool.yaml
+++ b/network/ports/internal_api_from_pool.yaml
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/ports/storage_from_pool.yaml b/network/ports/storage_from_pool.yaml
index dfc9e752..0a3d394c 100644
--- a/network/ports/storage_from_pool.yaml
+++ b/network/ports/storage_from_pool.yaml
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/ports/storage_mgmt_from_pool.yaml b/network/ports/storage_mgmt_from_pool.yaml
index 9c757a6e..c3f0f4e2 100644
--- a/network/ports/storage_mgmt_from_pool.yaml
+++ b/network/ports/storage_mgmt_from_pool.yaml
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/ports/tenant_from_pool.yaml b/network/ports/tenant_from_pool.yaml
index d5f3156e..d5fd7080 100644
--- a/network/ports/tenant_from_pool.yaml
+++ b/network/ports/tenant_from_pool.yaml
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/overcloud.yaml b/overcloud.yaml
index e540b9c4..5ad68913 100644
--- a/overcloud.yaml
+++ b/overcloud.yaml
@@ -278,13 +278,11 @@ parameters:
type: string
default: unset
description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
- # FIXME: 'guest' is provisioned in RabbitMQ by default, we should create a user if these are changed
RabbitUserName:
default: guest
description: The username for RabbitMQ
type: string
RabbitPassword:
- default: guest
description: The password for RabbitMQ
type: string
hidden: true
@@ -307,6 +305,10 @@ parameters:
default: false
description: Enable IPv6 in RabbitMQ
type: boolean
+ RedisPassword:
+ description: The password for Redis
+ type: string
+ hidden: true
SnmpdReadonlyUserName:
default: ro_snmp_user
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
@@ -482,7 +484,7 @@ parameters:
type: string
hidden: true
HeatStackDomainAdminPassword:
- description: Password for heat_domain_admin user.
+ description: Password for heat_stack_domain_admin user.
type: string
hidden: true
InstanceNameTemplate:
@@ -988,6 +990,7 @@ resources:
RabbitClientPort: {get_param: RabbitClientPort}
RabbitFDLimit: {get_param: RabbitFDLimit}
RabbitIPv6: {get_param: RabbitIPv6}
+ RedisPassword: {get_param: RedisPassword}
SaharaPassword: {get_param: SaharaPassword}
SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName}
SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword}
@@ -1083,6 +1086,7 @@ resources:
NovaComputeLibvirtType: {get_param: NovaComputeLibvirtType}
NovaComputeLibvirtVifDriver: {get_param: NovaComputeLibvirtVifDriver}
NovaEnableRbdBackend: {get_param: NovaEnableRbdBackend}
+ NovaIPv6: {get_param: NovaIPv6}
NovaPublicIP: {get_attr: [VipMap, net_ip_map, external]}
NovaPassword: {get_param: NovaPassword}
NovaOVSBridge: {get_param: NovaOVSBridge}
@@ -1107,6 +1111,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: NovaComputeSchedulerHints}
+ NodeIndex: '%index%'
BlockStorage:
type: OS::Heat::ResourceGroup
@@ -1148,6 +1153,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: BlockStorageSchedulerHints}
+ NodeIndex: '%index%'
ObjectStorage:
type: OS::Heat::ResourceGroup
@@ -1180,6 +1186,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: ObjectStorageSchedulerHints}
+ NodeIndex: '%index%'
CephStorage:
type: OS::Heat::ResourceGroup
@@ -1207,6 +1214,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: CephStorageSchedulerHints}
+ NodeIndex: '%index%'
ControllerIpListMap:
type: OS::TripleO::Network::Ports::NetIpListMap
diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml
index 96198c3f..dc2f98ed 100644
--- a/puppet/ceph-cluster-config.yaml
+++ b/puppet/ceph-cluster-config.yaml
@@ -39,6 +39,9 @@ parameters:
CephClientUserName:
default: openstack
type: string
+ CephIPv6:
+ default: False
+ type: boolean
resources:
CephClusterConfigImpl:
@@ -50,15 +53,25 @@ resources:
datafiles:
ceph_cluster:
mapped_data:
+ ceph_ipv6: {get_param: CephIPv6}
ceph_storage_count: {get_param: ceph_storage_count}
ceph_mon_initial_members:
list_join:
- ','
- {get_param: ceph_mon_names}
- ceph::profile::params::mon_host:
+ ceph_mon_host:
list_join:
- ','
- {get_param: ceph_mon_ips}
+ ceph_mon_host_v6:
+ str_replace:
+ template: "'[IPS_LIST]'"
+ params:
+ IPS_LIST:
+ list_join:
+ - '],['
+ - {get_param: ceph_mon_ips}
+ ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
ceph::profile::params::fsid: {get_param: ceph_fsid}
ceph::profile::params::mon_key: {get_param: ceph_mon_key}
# We should use a separated key for the non-admin clients
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index 88120b9c..d2988926 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -62,6 +62,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ CephStorageIPs:
+ default: {}
+ type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -90,6 +93,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
CephStorage:
@@ -135,31 +141,43 @@ resources:
type: OS::TripleO::CephStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::CephStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::CephStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::CephStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::CephStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::CephStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::CephStorage::Net::SoftwareConfig
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index 0c22b575..878b31c2 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -38,6 +38,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ BlockStorageIPs:
+ default: {}
+ type: json
Flavor:
description: Flavor for block storage nodes to request when deploying.
type: string
@@ -48,7 +51,6 @@ parameters:
description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
RabbitPassword:
- default: 'guest'
type: string
hidden: true
RabbitUserName:
@@ -141,6 +143,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
@@ -187,31 +192,43 @@ resources:
type: OS::TripleO::BlockStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::BlockStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::BlockStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::BlockStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::BlockStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::BlockStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::BlockStorage::Net::SoftwareConfig
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index 58ca71e7..f5c848c8 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -195,6 +195,9 @@ parameters:
default: 'dvr_snat'
description: Agent mode for the neutron-l3-agent on the controller hosts
type: string
+ NodeIndex:
+ type: number
+ default: 0
NovaApiHost:
type: string
default: '' # Has to be here because of the ignored empty value bug
@@ -207,6 +210,9 @@ parameters:
NovaCompute specific configuration to inject into the cluster. Same
structure as ExtraConfig.
type: json
+ NovaComputeIPs:
+ default: {}
+ type: json
NovaComputeLibvirtType:
type: string
default: kvm
@@ -218,6 +224,10 @@ parameters:
default: false
description: Whether to enable or not the Rbd backend for Nova
type: boolean
+ NovaIPv6:
+ default: false
+ description: Enable IPv6 features in Nova
+ type: boolean
NovaPassword:
description: The password for the nova service account, used by nova-api.
type: string
@@ -241,7 +251,6 @@ parameters:
type: string
default: '' # Has to be here because of the ignored empty value bug
RabbitPassword:
- default: guest
description: The password for RabbitMQ
type: string
hidden: true
@@ -378,31 +387,43 @@ resources:
type: OS::TripleO::Compute::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::Compute::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::Compute::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::Compute::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::Compute::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::Compute::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
@@ -478,6 +499,7 @@ resources:
raw_data: {get_file: hieradata/compute.yaml}
mapped_data:
cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend}
+ nova::use_ipv6: {get_input: nova_ipv6}
nova::debug: {get_input: debug}
nova::rabbit_userid: {get_input: rabbit_username}
nova::rabbit_password: {get_input: rabbit_password}
@@ -567,10 +589,18 @@ resources:
nova_api_host: {get_param: NovaApiHost}
nova_password: {get_param: NovaPassword}
nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend}
+ nova_ipv6: {get_param: NovaIPv6}
cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]}
nova_vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
- nova_vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host]}
+ # Remove brackets that may come if the IP address is IPv6.
+ # For DNS names and IPv4, this will just get the NovaVNCProxyPublic value
+ nova_vncproxy_host:
+ str_replace:
+ template: {get_param: [EndpointMap, NovaVNCProxyPublic, host]}
+ params:
+ '[': ''
+ ']': ''
nova_vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
nova_ovs_bridge: {get_param: NovaOVSBridge}
nova_security_group_api: {get_param: NovaSecurityGroupAPI}
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index adb55896..28aac934 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -245,7 +245,7 @@ parameters:
type: string
hidden: true
HeatStackDomainAdminPassword:
- description: Password for heat_domain_admin user.
+ description: Password for heat_stack_domain_admin user.
type: string
hidden: true
HeatAuthEncryptionKey:
@@ -602,7 +602,6 @@ parameters:
default: '' # Has to be here because of the ignored empty value bug
hidden: true
RabbitPassword:
- default: guest
description: The password for RabbitMQ
type: string
hidden: true
@@ -628,6 +627,10 @@ parameters:
default: false
description: Enable IPv6 in RabbitMQ
type: boolean
+ RedisPassword:
+ type: string
+ description: The password to access the Redis service
+ hidden: true
RedisVirtualIP:
type: string
default: '' # Has to be here because of the ignored empty value bug
@@ -1158,7 +1161,8 @@ resources:
- ''
- - 'redis://'
- {get_param: RedisVirtualIPUri}
- - ':6379'
+ - ':6379/?password='
+ - {get_param: RedisPassword}
ceilometer_dsn:
list_join:
- ''
@@ -1199,16 +1203,9 @@ resources:
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
rabbit_client_port: {get_param: RabbitClientPort}
rabbit_ipv6: {get_param: RabbitIPv6}
+ rabbit_fd_limit: {get_param: RabbitFDLimit}
mongodb_no_journal: {get_param: MongoDbNoJournal}
mongodb_ipv6: {get_param: MongoDbIPv6}
- # We need to force this into quotes or hiera will return integer causing
- # the puppet module validation regexp to fail.
- # Remove when: https://github.com/puppetlabs/puppetlabs-rabbitmq/pull/401
- rabbit_fd_limit:
- str_replace:
- template: "'LIMIT'"
- params:
- LIMIT: {get_param: RabbitFDLimit}
ntp_servers: {get_param: NtpServer}
timezone: {get_param: TimeZone}
control_virtual_interface: {get_param: ControlVirtualInterface}
@@ -1251,6 +1248,7 @@ resources:
horizon_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
rabbitmq_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
redis_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]}
+ redis_password: {get_param: RedisPassword}
redis_vip: {get_param: RedisVirtualIP}
sahara_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
@@ -1304,7 +1302,7 @@ resources:
mapped_data:
ceph::profile::params::cluster_network: {get_input: ceph_cluster_network}
ceph::profile::params::public_network: {get_input: ceph_public_network}
- ceph::mon::public_addr: {get_input: ceph_public_ip}
+ ceph::profile::params::public_addr: {get_input: ceph_public_ip}
database:
raw_data: {get_file: hieradata/database.yaml}
object:
@@ -1419,9 +1417,11 @@ resources:
heat::debug: {get_input: debug}
heat::db::mysql::password: {get_input: heat_password}
heat_enable_db_purge: {get_input: heat_enable_db_purge}
+ heat::keystone::domain::domain_password: {get_input: heat_stack_domain_admin_password}
# Keystone
keystone::admin_token: {get_input: admin_token}
+ keystone::roles::admin::password: {get_input: admin_password}
keystone_ca_certificate: {get_input: keystone_ca_certificate}
keystone_signing_key: {get_input: keystone_signing_key}
keystone_signing_certificate: {get_input: keystone_signing_certificate}
@@ -1620,6 +1620,9 @@ resources:
rabbit_ipv6: {get_input: rabbit_ipv6}
# Redis
redis::bind: {get_input: redis_network}
+ redis::requirepass: {get_input: redis_password}
+ redis::masterauth: {get_input: redis_password}
+ redis::sentinel_auth_pass: {get_input: redis_password}
redis_vip: {get_input: redis_vip}
# Firewall
tripleo::firewall::manage_firewall: {get_input: manage_firewall}
diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml
index ebd6c251..312d49a0 100644
--- a/puppet/extraconfig/ceph/ceph-external-config.yaml
+++ b/puppet/extraconfig/ceph/ceph-external-config.yaml
@@ -41,6 +41,9 @@ parameters:
CephClientUserName:
default: openstack
type: string
+ CephIPv6:
+ default: False
+ type: boolean
resources:
CephClusterConfigImpl:
@@ -54,7 +57,9 @@ resources:
mapped_data:
ceph_storage_count: {get_param: ceph_storage_count}
enable_external_ceph: true
- ceph::profile::params::mon_host: {get_param: ceph_external_mon_ips}
+ ceph_ipv6: {get_param: CephIPv6}
+ ceph_mon_host: {get_param: ceph_external_mon_ips}
+ ceph_mon_host_v6: {get_param: ceph_external_mon_ips}
ceph::profile::params::fsid: {get_param: ceph_fsid}
ceph::profile::params::client_keys:
str_replace:
@@ -72,6 +77,7 @@ resources:
NOVA_POOL: {get_param: NovaRbdPoolName}
CINDER_POOL: {get_param: CinderRbdPoolName}
GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
index 905f196d..9b6981bb 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-11-12
+heat_template_version: 2015-10-15
description: Configure hieradata for Cinder Dell Storage Center configuration
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
index c73608f1..36db334e 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-11-06
+heat_template_version: 2015-10-15
description: Configure hieradata for Cinder Eqlx configuration
diff --git a/puppet/extraconfig/tls/ca-inject.yaml b/puppet/extraconfig/tls/ca-inject.yaml
index aab42849..f955034d 100644
--- a/puppet/extraconfig/tls/ca-inject.yaml
+++ b/puppet/extraconfig/tls/ca-inject.yaml
@@ -4,7 +4,7 @@ description: >
This is a template which will inject the trusted anchor.
parameters:
- # Can be overriden via parameter_defaults in the environment
+ # Can be overridden via parameter_defaults in the environment
SSLRootCertificate:
description: >
The content of a CA's SSL certificate file in PEM format.
diff --git a/puppet/extraconfig/tls/tls-cert-inject.yaml b/puppet/extraconfig/tls/tls-cert-inject.yaml
index 20bb3737..77b11378 100644
--- a/puppet/extraconfig/tls/tls-cert-inject.yaml
+++ b/puppet/extraconfig/tls/tls-cert-inject.yaml
@@ -5,7 +5,7 @@ description: >
for the load balancer using the given parameters.
parameters:
- # Can be overriden via parameter_defaults in the environment
+ # Can be overridden via parameter_defaults in the environment
SSLCertificate:
description: >
The content of the SSL certificate (without Key) in PEM format.
@@ -21,7 +21,7 @@ parameters:
type: string
hidden: true
- # Can be overriden by parameter_defaults if the user wants to try deploying
+ # Can be overridden by parameter_defaults if the user wants to try deploying
# this in a distro that doesn't support this path.
DeployedSSLCertificatePath:
default: '/etc/pki/tls/private/overcloud_endpoint.pem'
diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml
index 572eef9f..865210c9 100644
--- a/puppet/hieradata/compute.yaml
+++ b/puppet/hieradata/compute.yaml
@@ -7,7 +7,6 @@ nova::compute::instance_usage_audit: true
nova::compute::instance_usage_audit_period: 'hour'
nova::compute::vnc_enabled: true
-nova::compute::libvirt::vncserver_listen: '0.0.0.0'
nova::compute::libvirt::migration_support: true
nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index e80bee07..288d224f 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -1,4 +1,5 @@
# Hiera data here applies to all controller nodes
+
nova::api::enabled: true
nova::conductor::enabled: true
nova::consoleauth::enabled: true
@@ -29,6 +30,9 @@ redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
+# keystone
+keystone::roles::admin::email: 'root@localhost'
+
# service tenant
glance::api::keystone_tenant: 'service'
glance::registry::keystone_tenant: 'service'
@@ -110,6 +114,9 @@ heat::cron::purge_deleted::age: 30
heat::cron::purge_deleted::age_type: 'days'
heat::cron::purge_deleted::maxdelay: 3600
heat::cron::purge_deleted::destination: '/dev/null'
+heat::keystone::domain::domain_name: 'heat_stack'
+heat::keystone::domain::domain_admin: 'heat_stack_domain_admin'
+heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost'
# pacemaker
pacemaker::corosync::cluster_name: 'tripleo_cluster'
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index 0db5b45a..fd7faff1 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -40,6 +40,14 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
} -> Class['ceph::profile::osd']
}
+if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+} else {
+ $mon_host = hiera('ceph_mon_host')
+}
+class { '::ceph::profile::params':
+ mon_host => $mon_host,
+}
include ::ceph::conf
include ::ceph::profile::client
include ::ceph::profile::osd
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index 99220ffd..13ae31c5 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -53,15 +53,17 @@ include ::nova
include ::nova::config
include ::nova::compute
-nova_config {
- 'DEFAULT/my_ip': value => $ipaddress;
- 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
- 'DEFAULT/host': value => $fqdn;
-}
-
$rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false)
$rbd_persistent_storage = hiera('rbd_persistent_storage', false)
if $rbd_ephemeral_storage or $rbd_persistent_storage {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
@@ -83,7 +85,27 @@ if hiera('cinder_enable_nfs_backend', false) {
package {'nfs-utils': } -> Service['nova-compute']
}
-include ::nova::compute::libvirt
+if str2bool(hiera('nova::use_ipv6', false)) {
+ $vncserver_listen = '::0'
+} else {
+ $vncserver_listen = '0.0.0.0'
+}
+class { '::nova::compute::libvirt' :
+ vncserver_listen => $vncserver_listen,
+}
+
+nova_config {
+ 'DEFAULT/my_ip': value => $ipaddress;
+ 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
+ 'DEFAULT/host': value => $fqdn;
+ # TUNNELLED mode provides a security enhancement when using shared storage but is not
+ # supported when not using shared storage.
+ # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
+ # In future versions of QEMU (2.6, mostly), Dan's native encryption
+ # work will obsolete the need to use TUNNELLED transport mode.
+ 'libvirt/live_migration_tunnelled': value => $rbd_ephemeral_storage;
+}
+
if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
file {'/etc/libvirt/qemu.conf':
ensure => present,
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index ecab5b5a..5556a40c 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -46,16 +46,26 @@ if hiera('step') >= 2 {
# MongoDB
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
include ::mongodb::globals
-
+ include ::mongodb::client
include ::mongodb::server
- $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and
+ # without the brackets as 'members' argument for the 'mongodb_replset'
+ # resource.
+ if str2bool(hiera('mongodb::server::ipv6', false)) {
+ $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ } else {
+ $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ }
$mongo_node_string = join($mongo_node_ips_with_port, ',')
$mongodb_replset = hiera('mongodb::server::replset')
$ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
if downcase(hiera('bootstrap_nodeid')) == $::hostname {
mongodb_replset { $mongodb_replset :
- members => $mongo_node_ips_with_port,
+ members => $mongo_node_ips_with_port_nobr,
}
}
}
@@ -151,8 +161,15 @@ if hiera('step') >= 2 {
$enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
+ $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
class { '::ceph::profile::params':
- mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
+ mon_initial_members => $mon_initial_members,
+ mon_host => $mon_host,
}
include ::ceph::conf
include ::ceph::profile::mon
@@ -178,6 +195,14 @@ if hiera('step') >= 2 {
}
if str2bool(hiera('enable_external_ceph', false)) {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
}
@@ -638,6 +663,23 @@ if hiera('step') >= 4 {
if $heat_enable_db_purge {
include ::heat::cron::purge_deleted
}
+
+ if downcase(hiera('bootstrap_nodeid')) == $::hostname {
+ include ::keystone::roles::admin
+ # Class ::heat::keystone::domain has to run on bootstrap node
+ # because it creates DB entities via API calls.
+ include ::heat::keystone::domain
+
+ Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain']
+ } else {
+ # On non-bootstrap node we don't need to create Keystone resources again
+ class { '::heat::keystone::domain':
+ manage_domain => false,
+ manage_user => false,
+ manage_role => false,
+ }
+ }
+
} #END STEP 4
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')])
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index abc0543f..db3d8652 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -34,7 +34,7 @@ $enable_load_balancer = hiera('enable_load_balancer', true)
# When to start and enable services which haven't been Pacemakerized
# FIXME: remove when we start all OpenStack services using Pacemaker
-# (occurences of this variable will be gradually replaced with false)
+# (occurrences of this variable will be gradually replaced with false)
$non_pcmk_start = hiera('step') >= 4
if hiera('step') >= 1 {
@@ -127,6 +127,7 @@ if hiera('step') >= 1 {
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
include ::mongodb::globals
+ include ::mongodb::client
class { '::mongodb::server' :
service_manage => false,
}
@@ -201,8 +202,19 @@ if hiera('step') >= 1 {
if hiera('step') >= 2 {
# NOTE(gfidente): the following vars are needed on all nodes so they
- # need to stay out of pacemaker_master conditional
- $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ # need to stay out of pacemaker_master conditional.
+ # The addresses mangling will hopefully go away when we'll be able to
+ # configure the connection string via hostnames, until then, we need to pass
+ # the list of IPv6 addresses *with* port and without the brackets as 'members'
+ # argument for the 'mongodb_replset' resource.
+ if str2bool(hiera('mongodb::server::ipv6', false)) {
+ $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ } else {
+ $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ }
$mongodb_replset = hiera('mongodb::server::replset')
if $pacemaker_master {
@@ -431,7 +443,7 @@ if hiera('step') >= 2 {
before => Mongodb_replset[$mongodb_replset],
}
mongodb_replset { $mongodb_replset :
- members => $mongo_node_ips_with_port,
+ members => $mongo_node_ips_with_port_nobr,
}
}
@@ -526,8 +538,15 @@ MYSQL_HOST=localhost\n",
$enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
+ $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
class { '::ceph::profile::params':
- mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
+ mon_initial_members => $mon_initial_members,
+ mon_host => $mon_host,
}
include ::ceph::conf
include ::ceph::profile::mon
@@ -553,6 +572,14 @@ MYSQL_HOST=localhost\n",
}
if str2bool(hiera('enable_external_ceph', false)) {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
}
@@ -1825,6 +1852,16 @@ if hiera('step') >= 5 {
class {'::keystone::endpoint' :
require => Pacemaker::Resource::Service[$::apache::params::service_name],
}
+ include ::heat::keystone::domain
+ Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain']
+
+ } else {
+ # On non-master controller we don't need to create Keystone resources again
+ class { '::heat::keystone::domain':
+ manage_domain => false,
+ manage_user => false,
+ manage_role => false,
+ }
}
} #END STEP 5
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index 3b04be83..c26aca77 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -83,6 +83,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ SwiftStorageIPs:
+ default: {}
+ type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -111,6 +114,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
@@ -156,31 +162,43 @@ resources:
type: OS::TripleO::SwiftStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::SwiftStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::SwiftStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::SwiftStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::SwiftStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::SwiftStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::ObjectStorage::Net::SoftwareConfig