aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--capabilities-map.yaml (renamed from capabilities_map.yaml)6
-rw-r--r--docker/compute-post.yaml52
-rw-r--r--docker/firstboot/install_docker_agents.yaml7
-rw-r--r--docker/firstboot/start_docker_agents.sh40
-rw-r--r--environments/docker-network-isolation.yaml6
-rw-r--r--environments/docker.yaml6
-rw-r--r--environments/enable-tls.yaml14
-rw-r--r--environments/external-loadbalancer-vip-v6.yaml38
-rw-r--r--environments/external-loadbalancer-vip.yaml4
-rw-r--r--environments/ips-from-pool-all.yaml75
-rw-r--r--environments/major-upgrade-pacemaker-converge.yaml6
-rw-r--r--environments/major-upgrade-pacemaker-init.yaml11
-rw-r--r--environments/major-upgrade-pacemaker.yaml11
-rw-r--r--environments/manage-firewall.yaml2
-rw-r--r--environments/mongodb-nojournal.yaml2
-rw-r--r--environments/net-bond-with-vlans-no-external.yaml2
-rw-r--r--environments/net-bond-with-vlans-v6.yaml20
-rw-r--r--environments/net-bond-with-vlans.yaml4
-rw-r--r--environments/net-multiple-nics-v6.yaml13
-rw-r--r--environments/net-single-nic-linux-bridge-with-vlans.yaml19
-rw-r--r--environments/net-single-nic-with-vlans-no-external.yaml2
-rw-r--r--environments/net-single-nic-with-vlans-v6.yaml19
-rw-r--r--environments/net-single-nic-with-vlans.yaml4
-rw-r--r--environments/network-environment.yaml50
-rw-r--r--environments/network-isolation-v6.yaml57
-rw-r--r--environments/network-isolation.yaml2
-rw-r--r--environments/neutron-ml2-bigswitch.yaml4
-rw-r--r--environments/neutron-ml2-cisco-nexus-ucsm.yaml2
-rw-r--r--environments/neutron-opencontrail.yaml22
-rwxr-xr-xenvironments/neutron-plumgrid.yaml29
-rw-r--r--environments/puppet-ceph-devel.yaml8
-rw-r--r--environments/puppet-ceph-external.yaml4
-rw-r--r--environments/puppet-pacemaker.yaml6
-rw-r--r--environments/puppet-tenant-vlan.yaml4
-rw-r--r--environments/storage-environment.yaml2
-rw-r--r--environments/updates/update-from-keystone-admin-internal-api.yaml2
-rw-r--r--extraconfig/all_nodes/swap-partition.yaml90
-rw-r--r--extraconfig/all_nodes/swap.yaml108
-rw-r--r--extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml1
-rw-r--r--extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml4
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration44
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-unregistration2
-rw-r--r--extraconfig/tasks/major_upgrade_block_storage.sh8
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh35
-rw-r--r--extraconfig/tasks/major_upgrade_compute.sh26
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh55
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh67
-rw-r--r--extraconfig/tasks/major_upgrade_object_storage.sh39
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml85
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_init.yaml131
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh75
-rw-r--r--extraconfig/tasks/noop.yaml10
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh62
-rwxr-xr-xextraconfig/tasks/pacemaker_maintenance_mode.sh19
-rwxr-xr-xextraconfig/tasks/pacemaker_resource_restart.sh32
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker.yaml6
-rw-r--r--extraconfig/tasks/pre_puppet_pacemaker.yaml9
-rwxr-xr-xextraconfig/tasks/yum_update.sh15
-rw-r--r--extraconfig/tasks/yum_update_noop.yaml29
-rw-r--r--net-config-bond.yaml6
-rw-r--r--network/config/bond-with-vlans/controller-v6.yaml180
-rw-r--r--network/config/multiple-nics/controller-v6.yaml174
-rw-r--r--network/config/single-nic-linux-bridge-vlans/README.md19
-rw-r--r--network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml106
-rw-r--r--network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml117
-rw-r--r--network/config/single-nic-linux-bridge-vlans/compute.yaml118
-rw-r--r--network/config/single-nic-linux-bridge-vlans/controller.yaml149
-rw-r--r--network/config/single-nic-linux-bridge-vlans/swift-storage.yaml117
-rw-r--r--network/config/single-nic-vlans/controller-v6.yaml158
-rw-r--r--network/config/single-nic-vlans/controller.yaml2
-rwxr-xr-xnetwork/endpoints/build_endpoint_map.py277
-rw-r--r--network/endpoints/endpoint.yaml60
-rw-r--r--network/endpoints/endpoint_data.yaml192
-rw-r--r--network/endpoints/endpoint_map.yaml2508
-rw-r--r--network/external_v6.yaml69
-rw-r--r--network/internal_api_v6.yaml69
-rw-r--r--network/management.yaml2
-rw-r--r--network/noop.yaml3
-rw-r--r--network/ports/ctlplane_vip.yaml9
-rw-r--r--network/ports/external.yaml9
-rw-r--r--network/ports/external_from_pool.yaml11
-rw-r--r--network/ports/external_from_pool_v6.yaml54
-rw-r--r--network/ports/external_v6.yaml68
-rw-r--r--network/ports/from_service.yaml11
-rw-r--r--network/ports/from_service_v6.yaml42
-rw-r--r--network/ports/internal_api.yaml10
-rw-r--r--network/ports/internal_api_from_pool.yaml11
-rw-r--r--network/ports/internal_api_from_pool_v6.yaml54
-rw-r--r--network/ports/internal_api_v6.yaml63
-rw-r--r--network/ports/management.yaml15
-rw-r--r--network/ports/management_from_pool.yaml46
-rw-r--r--network/ports/management_v6.yaml54
-rw-r--r--network/ports/net_ip_map.yaml36
-rw-r--r--network/ports/net_vip_map_external.yaml25
-rw-r--r--network/ports/net_vip_map_external_v6.yaml95
-rw-r--r--network/ports/noop.yaml3
-rw-r--r--network/ports/storage.yaml9
-rw-r--r--network/ports/storage_from_pool.yaml11
-rw-r--r--network/ports/storage_from_pool_v6.yaml54
-rw-r--r--network/ports/storage_mgmt.yaml10
-rw-r--r--network/ports/storage_mgmt_from_pool.yaml11
-rw-r--r--network/ports/storage_mgmt_from_pool_v6.yaml54
-rw-r--r--network/ports/storage_mgmt_v6.yaml63
-rw-r--r--network/ports/storage_v6.yaml63
-rw-r--r--network/ports/tenant.yaml9
-rw-r--r--network/ports/tenant_from_pool.yaml11
-rw-r--r--network/ports/tenant_from_pool_v6.yaml53
-rw-r--r--network/ports/tenant_v6.yaml63
-rw-r--r--network/ports/vip.yaml9
-rw-r--r--network/ports/vip_v6.yaml65
-rw-r--r--network/storage_mgmt_v6.yaml69
-rw-r--r--network/storage_v6.yaml69
-rw-r--r--network/tenant_v6.yaml69
-rw-r--r--overcloud-resource-registry-puppet.yaml24
-rw-r--r--overcloud.yaml261
-rw-r--r--puppet/all-nodes-config.yaml41
-rw-r--r--puppet/ceph-cluster-config.yaml32
-rw-r--r--puppet/ceph-storage-post.yaml14
-rw-r--r--puppet/ceph-storage.yaml82
-rw-r--r--puppet/cinder-storage-post.yaml12
-rw-r--r--puppet/cinder-storage.yaml97
-rw-r--r--puppet/compute-post.yaml12
-rw-r--r--puppet/compute.yaml128
-rw-r--r--puppet/controller-config-pacemaker.yaml10
-rw-r--r--puppet/controller-config.yaml10
-rw-r--r--puppet/controller-post.yaml60
-rw-r--r--puppet/controller.yaml443
-rw-r--r--puppet/deploy-artifacts.sh26
-rw-r--r--puppet/deploy-artifacts.yaml32
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml11
-rw-r--r--puppet/extraconfig/ceph/ceph-external-config.yaml24
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml45
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml47
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml1
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml62
-rwxr-xr-xpuppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml113
-rw-r--r--puppet/extraconfig/tls/ca-inject.yaml4
-rw-r--r--puppet/extraconfig/tls/tls-cert-inject.yaml12
-rw-r--r--puppet/hieradata/common.yaml28
-rw-r--r--puppet/hieradata/compute.yaml3
-rw-r--r--puppet/hieradata/controller.yaml44
-rw-r--r--puppet/hieradata/database.yaml15
-rw-r--r--puppet/hieradata/volume.yaml2
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp10
-rw-r--r--puppet/manifests/overcloud_compute.pp89
-rw-r--r--puppet/manifests/overcloud_controller.pp243
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp746
-rw-r--r--puppet/manifests/overcloud_object.pp4
-rw-r--r--puppet/manifests/overcloud_volume.pp6
-rw-r--r--puppet/manifests/ringbuilder.pp8
-rw-r--r--puppet/services/README.rst50
-rw-r--r--puppet/services/keystone.yaml135
-rw-r--r--puppet/services/pacemaker/keystone.yaml34
-rw-r--r--puppet/services/services.yaml40
-rw-r--r--puppet/swift-storage-post.yaml13
-rw-r--r--puppet/swift-storage.yaml94
-rw-r--r--puppet/vip-config.yaml4
-rwxr-xr-xtools/yaml-validate.py52
-rw-r--r--tox.ini2
-rw-r--r--validation-scripts/all-nodes.sh58
162 files changed, 8923 insertions, 1505 deletions
diff --git a/capabilities_map.yaml b/capabilities-map.yaml
index 30ee211e..c7816b7e 100644
--- a/capabilities_map.yaml
+++ b/capabilities-map.yaml
@@ -5,7 +5,7 @@
# root_template: identifies repository's root template
# root_environment: identifies root_environment, this one is special in terms of
# order in which the environments are merged before deploying. This one serves as
-# a base and it's parameters/resource_registry gets overriden by other environments
+# a base and it's parameters/resource_registry gets overridden by other environments
# if used.
# topics:
@@ -21,7 +21,7 @@
# Attributes:
# title: (optional)
# description: (optional)
-# tags: a list of tags to provide aditional information for e.g. filtering (optional)
+# tags: a list of tags to provide additional information for e.g. filtering (optional)
# environments: (required)
# environments:
@@ -66,7 +66,7 @@ topics:
description: >
Docker container with heat agents for containerized compute node
environments:
- - file: environments/docker-rdo.yaml
+ - file: environments/docker.yaml
title: Docker RDO
description:
requires:
diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml
index 8f9e9627..09d0e9fd 100644
--- a/docker/compute-post.yaml
+++ b/docker/compute-post.yaml
@@ -17,8 +17,6 @@ parameters:
type: string
DockerLibvirtImage:
type: string
- DockerNeutronAgentImage:
- type: string
DockerOpenvswitchImage:
type: string
DockerOvsVswitchdImage:
@@ -30,20 +28,15 @@ parameters:
default: "/etc/libvirt/libvirtd.conf"
NovaConfig:
type: string
- default: "/etc/nova/nova.conf"
+ default: "/etc/nova/nova.conf,/etc/nova/rootwrap.conf"
NeutronOpenvswitchAgentConfig:
type: string
- default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/ml2/ml2_conf.ini"
- NeutronAgentConfig:
- type: string
default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
- NeutronAgentPluginVolume:
+ NeutronOpenvswitchAgentPluginVolume:
type: string
- description: The neutron agent plugin to mount into the neutron-agents container
default: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/ovs_neutron_plugin.ini:ro"
- NeutronAgentOvsVolume:
+ NeutronOpenvswitchAgentOvsVolume:
type: string
- description: The neutron agent ovs agents to mount into the neutron-agents container
default: " "
resources:
@@ -99,7 +92,6 @@ resources:
- name: libvirt_config
- name: nova_config
- name: neutron_openvswitch_agent_config
- - name: neutron_agent_config
config: |
#!/bin/python
import json
@@ -112,13 +104,11 @@ resources:
libvirt_config = os.getenv('libvirt_config').split(',')
nova_config = os.getenv('nova_config').split(',')
neutron_openvswitch_agent_config = os.getenv('neutron_openvswitch_agent_config').split(',')
- neutron_agent_config = os.getenv('neutron_agent_config').split(',')
# Command, Config_files, Owner, Perms
services = {'nova-libvirt': ['/usr/sbin/libvirtd', libvirt_config, 'root', libvirt_perms],
'nova-compute': ['/usr/bin/nova-compute', nova_config, 'nova', file_perms],
'neutron-openvswitch-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_openvswitch_agent_config, 'neutron', file_perms],
- 'neutron-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_agent_config, 'neutron', file_perms],
'ovs-vswitchd': ['/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/openvswitch/ovs-vswitchd.log'],
'ovsdb-server': ['/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --log-file=/var/log/openvswitch/ovsdb-server.log']
}
@@ -171,7 +161,6 @@ resources:
libvirt_config: {get_param: LibvirtConfig}
nova_config: {get_param: NovaConfig}
neutron_openvswitch_agent_config: {get_param: NeutronOpenvswitchAgentConfig}
- neutron_agent_config: {get_param: NeutronAgentConfig}
NovaComputeContainersDeploymentOVS:
type: OS::Heat::StructuredDeployments
@@ -270,6 +259,8 @@ resources:
volumes:
- /run:/run
- /lib/modules:/lib/modules:ro
+ - /dev:/dev
+ - /lib/udev:/lib/udev
- /sys/fs/cgroup:/sys/fs/cgroup
- /var/lib/etc-data/json-config/nova-libvirt.json:/var/lib/kolla/config_files/config.json
- /var/lib/etc-data/libvirt/libvirtd.conf:/var/lib/kolla/config_files/libvirtd.conf
@@ -291,27 +282,7 @@ resources:
properties:
group: docker-compose
config:
- openvswitch:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
- net: host
- privileged: true
- restart: always
- volumes:
- - /run:/run
- - /lib/modules:/lib/modules:ro
- - /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json
- - /var/lib/etc-data/neutron/neutron.conf:/etc/kolla/neutron-openvswitch-agent/:ro
- - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro
- - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- volumes_from:
- - computedata
-
- neutronagent:
+ neutronovsagent:
image:
list_join:
- '/'
@@ -326,10 +297,11 @@ resources:
- list_join:
- ","
- [ "/run:/run", "/lib/modules:/lib/modules:ro",
- "/var/lib/etc-data/json-config/neutron-agent.json:/var/lib/kolla/config_files/config.json",
+ "/var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json",
"/var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro",
- {get_param: NeutronAgentPluginVolume},
- {get_param: NeutronAgentOvsVolume} ]
+ "/var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro",
+ {get_param: NeutronOpenvswitchAgentPluginVolume},
+ {get_param: NeutronOpenvswitchAgentOvsVolume} ]
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
volumes_from:
@@ -346,8 +318,12 @@ resources:
volumes:
- /run:/run
- /lib/modules:/lib/modules:ro
+ - /dev:/dev
+ - /lib/udev:/lib/udev
+ - /etc/iscsi:/etc/iscsi
- /var/lib/etc-data/json-config/nova-compute.json:/var/lib/kolla/config_files/config.json
- /var/lib/etc-data/nova/nova.conf:/var/lib/kolla/config_files/nova.conf:ro
+ - /var/lib/etc-data/nova/rootwrap.conf:/var/lib/kolla/config_files/rootwrap.conf:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
volumes_from:
diff --git a/docker/firstboot/install_docker_agents.yaml b/docker/firstboot/install_docker_agents.yaml
index 348c1755..2858552f 100644
--- a/docker/firstboot/install_docker_agents.yaml
+++ b/docker/firstboot/install_docker_agents.yaml
@@ -3,7 +3,7 @@ heat_template_version: 2014-10-16
parameters:
DockerAgentImage:
type: string
- default: tripleoupstream/heat-docker-agents
+ default: heat-docker-agents
DockerNamespace:
type: string
default: kollaglue
@@ -26,7 +26,10 @@ resources:
config:
str_replace:
params:
- $agent_image: {get_param: DockerAgentImage}
+ $agent_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAgentImage} ]
$docker_registry: {get_param: DockerNamespace}
$docker_namespace_is_registry: {get_param: DockerNamespaceIsRegistry}
template: {get_file: ./start_docker_agents.sh}
diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh
index a0e95d11..027aed40 100644
--- a/docker/firstboot/start_docker_agents.sh
+++ b/docker/firstboot/start_docker_agents.sh
@@ -39,31 +39,22 @@ EOF
# Local docker registry 1.8
if [ $docker_namespace_is_registry ]; then
- /bin/sed -i "s/# INSECURE_REGISTRY='--insecure-registry '/INSECURE_REGISTRY='--insecure-registry $docker_registry'/g" /etc/sysconfig/docker
+ # if namespace is used with local registry, trim all namespacing
+ trim_var=$docker_registry
+ registry_host="${trim_var%%/*}"
+ /bin/sed -i "s/# INSECURE_REGISTRY='--insecure-registry'/INSECURE_REGISTRY='--insecure-registry $registry_host'/g" /etc/sysconfig/docker
fi
/sbin/setenforce 0
/sbin/modprobe ebtables
-echo nameserver 8.8.8.8 > /etc/resolv.conf
+# CentOS sets ptmx to 000. Withoutit being 666, we can't use Cinder volumes
+chmod 666 /dev/pts/ptmx
# We need hostname -f to return in a centos container for the puppet hook
HOSTNAME=$(hostname)
echo "127.0.0.1 $HOSTNAME.localdomain $HOSTNAME" >> /etc/hosts
-# Another hack.. we need a different docker version
-# (should obviously be dropped once the atomic image contains docker 1.8.2)
-/usr/bin/systemctl stop docker.service
-/bin/curl -o /tmp/docker https://get.docker.com/builds/Linux/x86_64/docker-1.8.2
-/bin/mount -o remount,rw /usr
-/bin/rm /bin/docker
-/bin/cp /tmp/docker /bin/docker
-/bin/chmod 755 /bin/docker
-
-# enable and start docker
-/usr/bin/systemctl enable docker.service
-/usr/bin/systemctl restart --no-block docker.service
-
# enable and start heat-docker-agents
chmod 0640 /etc/systemd/system/heat-docker-agents.service
/usr/bin/systemctl enable heat-docker-agents.service
@@ -72,3 +63,22 @@ chmod 0640 /etc/systemd/system/heat-docker-agents.service
# Disable NetworkManager and let the ifup/down scripts work properly.
/usr/bin/systemctl disable NetworkManager
/usr/bin/systemctl stop NetworkManager
+
+# Atomic's root partition & logical volume defaults to 3G. In order to launch
+# larger VMs, we need to enlarge the root logical volume and scale down the
+# docker_pool logical volume. We are allocating 80% of the disk space for
+# vm data and the remaining 20% for docker images.
+ATOMIC_ROOT='/dev/mapper/atomicos-root'
+ROOT_DEVICE=`pvs -o vg_name,pv_name --no-headings | grep atomicos | awk '{ print $2}'`
+
+growpart $( echo "${ROOT_DEVICE}" | sed -r 's/([^0-9]*)([0-9]+)/\1 \2/' )
+pvresize "${ROOT_DEVICE}"
+lvresize -l +80%FREE "${ATOMIC_ROOT}"
+xfs_growfs "${ATOMIC_ROOT}"
+
+cat <<EOF > /etc/sysconfig/docker-storage-setup
+GROWPART=true
+AUTO_EXTEND_POOL=yes
+POOL_AUTOEXTEND_PERCENT=30
+POOL_AUTOEXTEND_THRESHOLD=70
+EOF
diff --git a/environments/docker-network-isolation.yaml b/environments/docker-network-isolation.yaml
index 257d03dc..87c81d0b 100644
--- a/environments/docker-network-isolation.yaml
+++ b/environments/docker-network-isolation.yaml
@@ -1,4 +1,4 @@
parameter_defaults:
- NeutronAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini"
- NeutronAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro"
- NeutronAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro"
+ NeutronOpenvswitchAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini"
+ NeutronOpenvswitchAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro"
+ NeutronOpenvswitchAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro"
diff --git a/environments/docker.yaml b/environments/docker.yaml
index 6376b749..a7e2504c 100644
--- a/environments/docker.yaml
+++ b/environments/docker.yaml
@@ -3,20 +3,20 @@ resource_registry:
OS::TripleO::ComputePostDeployment: ../docker/compute-post.yaml
OS::TripleO::NodeUserData: ../docker/firstboot/install_docker_agents.yaml
-parameters:
+parameter_defaults:
NovaImage: atomic-image
parameter_defaults:
# Defaults to 'tripleoupstream'. Specify a local docker registry
- # Example: 192.168.122.131:8787
+ # Example: 192.0.2.1:8787/tripleoupstream
DockerNamespace: tripleoupstream
# Enable local Docker registry
DockerNamespaceIsRegistry: false
# Compute Node Images
DockerComputeImage: centos-binary-nova-compute:latest
+ DockerAgentImage: heat-docker-agents:latest
DockerComputeDataImage: centos-binary-data:latest
DockerLibvirtImage: centos-binary-nova-libvirt:latest
- DockerNeutronAgentImage: centos-binary-neutron-agents:latest
DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:latest
DockerOvsVswitchdImage: centos-binary-openvswitch-vswitchd:latest
DockerOpenvswitchDBImage: centos-binary-openvswitch-db-server:latest
diff --git a/environments/enable-tls.yaml b/environments/enable-tls.yaml
index bc4d1bef..6986a0c8 100644
--- a/environments/enable-tls.yaml
+++ b/environments/enable-tls.yaml
@@ -5,6 +5,9 @@ parameter_defaults:
SSLKey: |
The contents of the private key go here
EndpointMap:
+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
@@ -14,9 +17,10 @@ parameter_defaults:
GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
- GlanceRegistryAdmin: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
- GlanceRegistryPublic: {protocol: 'https', port: '9191', host: 'IP_ADDRESS'} # Not set on the loadbalancer yet.
+ GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
@@ -33,6 +37,12 @@ parameter_defaults:
NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
NovaEC2Public: {protocol: 'https', port: '13773', host: 'CLOUDNAME'}
+ NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
diff --git a/environments/external-loadbalancer-vip-v6.yaml b/environments/external-loadbalancer-vip-v6.yaml
new file mode 100644
index 00000000..5a2ef505
--- /dev/null
+++ b/environments/external-loadbalancer-vip-v6.yaml
@@ -0,0 +1,38 @@
+resource_registry:
+ OS::TripleO::Network::Ports::NetVipMap: ../network/ports/net_vip_map_external_v6.yaml
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/noop.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/noop.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/noop.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/noop.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/from_service_v6.yaml
+ OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool_v6.yaml
+ OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool_v6.yaml
+ OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool_v6.yaml
+ OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool_v6.yaml
+ # OVS doesn't support IPv6 endpoints for tunneling yet, so this remains IPv4 for now.
+ OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+parameter_defaults:
+ # When using an external loadbalancer set the following in parameter_defaults
+ # to control your VIPs (currently one per network)
+ # NOTE: we will eventually move to one VIP per service
+ #
+ ControlPlaneIP: 192.0.2.251
+ ExternalNetworkVip: 2001:db8:fd00:1000:0000:0000:0000:0005
+ InternalApiNetworkVip: fd00:fd00:fd00:2000:0000:0000:0000:0005
+ StorageNetworkVip: fd00:fd00:fd00:3000:0000:0000:0000:0005
+ StorageMgmtNetworkVip: fd00:fd00:fd00:4000:0000:0000:0000:0005
+ ServiceVips:
+ redis: fd00:fd00:fd00:2000:0000:0000:0000:0006
+ ControllerIPs:
+ external:
+ - 2001:db8:fd00:1000:0000:0000:0000:0007
+ internal_api:
+ - fd00:fd00:fd00:2000:0000:0000:0000:0007
+ storage:
+ - fd00:fd00:fd00:3000:0000:0000:0000:0007
+ storage_mgmt:
+ - fd00:fd00:fd00:4000:0000:0000:0000:0007
+ tenant:
+ - 172.16.0.253
+ EnableLoadBalancer: false
diff --git a/environments/external-loadbalancer-vip.yaml b/environments/external-loadbalancer-vip.yaml
index 1cf59825..8656ba1a 100644
--- a/environments/external-loadbalancer-vip.yaml
+++ b/environments/external-loadbalancer-vip.yaml
@@ -10,6 +10,8 @@ resource_registry:
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+ # Management network is optional and disabled by default
+ #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management_from_pool.yaml
parameter_defaults:
# When using an external loadbalancer set the following in parameter_defaults
@@ -34,4 +36,6 @@ parameter_defaults:
- 172.16.3.253
tenant:
- 172.16.0.253
+ #management:
+ #- 172.16.4.253
EnableLoadBalancer: false
diff --git a/environments/ips-from-pool-all.yaml b/environments/ips-from-pool-all.yaml
new file mode 100644
index 00000000..f660d501
--- /dev/null
+++ b/environments/ips-from-pool-all.yaml
@@ -0,0 +1,75 @@
+# Environment file demonstrating how to pre-assign IPs to all node types
+resource_registry:
+ OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
+ OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+ OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+ OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+ OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+ OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+parameter_defaults:
+ ControllerIPs:
+ # Each controller will get an IP from the lists below, first controller, first IP
+ external:
+ - 10.0.0.251
+ internal_api:
+ - 172.16.2.251
+ storage:
+ - 172.16.1.251
+ storage_mgmt:
+ - 172.16.3.251
+ tenant:
+ - 172.16.0.251
+ NovaComputeIPs:
+ # Each compute will get an IP from the lists below, first compute, first IP
+ internal_api:
+ - 172.16.2.252
+ storage:
+ - 172.16.1.252
+ tenant:
+ - 172.16.0.252
+ CephStorageIPs:
+ # Each ceph node will get an IP from the lists below, first node, first IP
+ storage:
+ - 172.16.1.253
+ storage_mgmt:
+ - 172.16.3.253
+ SwiftStorageIPs:
+ # Each swift node will get an IP from the lists below, first node, first IP
+ internal_api:
+ - 172.16.2.254
+ storage:
+ - 172.16.1.254
+ storage_mgmt:
+ - 172.16.3.254
+ BlockStorageIPs:
+ # Each cinder node will get an IP from the lists below, first node, first IP
+ internal_api:
+ - 172.16.2.250
+ storage:
+ - 172.16.1.250
+ storage_mgmt:
+ - 172.16.3.250
diff --git a/environments/major-upgrade-pacemaker-converge.yaml b/environments/major-upgrade-pacemaker-converge.yaml
new file mode 100644
index 00000000..dfcb9654
--- /dev/null
+++ b/environments/major-upgrade-pacemaker-converge.yaml
@@ -0,0 +1,6 @@
+parameter_defaults:
+ UpdateIdentifier: 'true'
+ UpgradeLevelNovaCompute: ''
+
+resource_registry:
+ OS::TripleO::Tasks::PackageUpdate: ../extraconfig/tasks/yum_update_noop.yaml
diff --git a/environments/major-upgrade-pacemaker-init.yaml b/environments/major-upgrade-pacemaker-init.yaml
new file mode 100644
index 00000000..d98a9cdd
--- /dev/null
+++ b/environments/major-upgrade-pacemaker-init.yaml
@@ -0,0 +1,11 @@
+parameter_defaults:
+ UpgradeLevelNovaCompute: liberty
+
+resource_registry:
+ OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml
+ OS::TripleO::Tasks::PackageUpdate: ../extraconfig/tasks/yum_update_noop.yaml
+ OS::TripleO::ControllerPostDeployment: OS::Heat::None
+ OS::TripleO::ComputePostDeployment: OS::Heat::None
+ OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None
+ OS::TripleO::BlockStoragePostDeployment: OS::Heat::None
+ OS::TripleO::CephStoragePostDeployment: OS::Heat::None
diff --git a/environments/major-upgrade-pacemaker.yaml b/environments/major-upgrade-pacemaker.yaml
new file mode 100644
index 00000000..61186bb0
--- /dev/null
+++ b/environments/major-upgrade-pacemaker.yaml
@@ -0,0 +1,11 @@
+parameter_defaults:
+ UpgradeLevelNovaCompute: liberty
+
+resource_registry:
+ OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker.yaml
+ OS::TripleO::Tasks::PackageUpdate: ../extraconfig/tasks/yum_update_noop.yaml
+ OS::TripleO::ControllerPostDeployment: OS::Heat::None
+ OS::TripleO::ComputePostDeployment: OS::Heat::None
+ OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None
+ OS::TripleO::BlockStoragePostDeployment: OS::Heat::None
+ OS::TripleO::CephStoragePostDeployment: OS::Heat::None
diff --git a/environments/manage-firewall.yaml b/environments/manage-firewall.yaml
index 071f4108..5d48698e 100644
--- a/environments/manage-firewall.yaml
+++ b/environments/manage-firewall.yaml
@@ -1,2 +1,2 @@
-parameters:
+parameter_defaults:
ManageFirewall: true
diff --git a/environments/mongodb-nojournal.yaml b/environments/mongodb-nojournal.yaml
index 1e13e452..92cef532 100644
--- a/environments/mongodb-nojournal.yaml
+++ b/environments/mongodb-nojournal.yaml
@@ -1,5 +1,5 @@
# A Heat environment file which can be used to disable journal in MongoDb.
# Since, when journaling is enabled, MongoDb will create big journal file
# it can take time. In a CI environment for example journaling is not necessary.
-parameters:
+parameter_defaults:
MongoDbNoJournal: true
diff --git a/environments/net-bond-with-vlans-no-external.yaml b/environments/net-bond-with-vlans-no-external.yaml
index 0da119d9..75959a0b 100644
--- a/environments/net-bond-with-vlans-no-external.yaml
+++ b/environments/net-bond-with-vlans-no-external.yaml
@@ -12,7 +12,7 @@
resource_registry:
# Set external ports to noop
- OS::TripleO::Network::External: ../network/noop.yaml
+ OS::TripleO::Network::External: OS::Heat::None
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/noop.yaml
OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/cinder-storage.yaml
diff --git a/environments/net-bond-with-vlans-v6.yaml b/environments/net-bond-with-vlans-v6.yaml
new file mode 100644
index 00000000..73dda3d9
--- /dev/null
+++ b/environments/net-bond-with-vlans-v6.yaml
@@ -0,0 +1,20 @@
+# This template configures each role to use a pair of bonded nics (nic2 and
+# nic3) and configures an IP address on each relevant isolated network
+# for each role, with IPv6 on the External network. This template assumes
+# use of network-isolation-v6.yaml.
+#
+# FIXME: if/when we add functionality to heatclient to include heat
+# environment files we should think about using it here to automatically
+# include network-isolation-v6.yaml.
+resource_registry:
+ OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/cinder-storage.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/bond-with-vlans/compute.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-v6.yaml
+ OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml
+ OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml
+
+parameter_defaults:
+ # This sets 'external_network_bridge' in l3_agent.ini to an empty string
+ # so that external networks act like provider bridge networks (they
+ # will plug into br-int instead of br-ex)
+ NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-bond-with-vlans.yaml b/environments/net-bond-with-vlans.yaml
index 9600fc7e..de8f8f74 100644
--- a/environments/net-bond-with-vlans.yaml
+++ b/environments/net-bond-with-vlans.yaml
@@ -12,10 +12,6 @@ resource_registry:
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml
-# We use parameter_defaults instead of parameters here because Tuskar munges
-# the names of top level and role level parameters with the role name and a
-# version. Using parameter_defaults makes it such that if the parameter name is
-# not defined in the template, we don't get an error.
parameter_defaults:
# This sets 'external_network_bridge' in l3_agent.ini to an empty string
# so that external networks act like provider bridge networks (they
diff --git a/environments/net-multiple-nics-v6.yaml b/environments/net-multiple-nics-v6.yaml
new file mode 100644
index 00000000..a2bb0bba
--- /dev/null
+++ b/environments/net-multiple-nics-v6.yaml
@@ -0,0 +1,13 @@
+# This template configures each role to use a separate NIC for
+# each isolated network with IPv6 on the External network.
+# This template assumes use of network-isolation.yaml.
+#
+# FIXME: if/when we add functionality to heatclient to include heat
+# environment files we should think about using it here to automatically
+# include network-isolation-v6.yaml.
+resource_registry:
+ OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/multiple-nics/cinder-storage.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/multiple-nics/compute.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/multiple-nics/controller-v6.yaml
+ OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/multiple-nics/swift-storage.yaml
+ OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/multiple-nics/ceph-storage.yaml
diff --git a/environments/net-single-nic-linux-bridge-with-vlans.yaml b/environments/net-single-nic-linux-bridge-with-vlans.yaml
new file mode 100644
index 00000000..fd80bb9b
--- /dev/null
+++ b/environments/net-single-nic-linux-bridge-with-vlans.yaml
@@ -0,0 +1,19 @@
+# This template configures each role to use Vlans on a single nic for
+# each isolated network.
+# This template assumes use of network-isolation.yaml.
+#
+# FIXME: if/when we add functionality to heatclient to include heat
+# environment files we should think about using it here to automatically
+# include network-isolation.yaml.
+resource_registry:
+ OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/compute.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/controller.yaml
+ OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
+ OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
+
+parameter_defaults:
+ # This sets 'external_network_bridge' in l3_agent.ini to an empty string
+ # so that external networks act like provider bridge networks (they
+ # will plug into br-int instead of br-ex)
+ NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-with-vlans-no-external.yaml b/environments/net-single-nic-with-vlans-no-external.yaml
index a173df4e..c7594b32 100644
--- a/environments/net-single-nic-with-vlans-no-external.yaml
+++ b/environments/net-single-nic-with-vlans-no-external.yaml
@@ -10,7 +10,7 @@
resource_registry:
# Set external ports to noop
- OS::TripleO::Network::External: ../network/noop.yaml
+ OS::TripleO::Network::External: OS::Heat::None
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/noop.yaml
# Configure other ports as normal
diff --git a/environments/net-single-nic-with-vlans-v6.yaml b/environments/net-single-nic-with-vlans-v6.yaml
new file mode 100644
index 00000000..8210bad3
--- /dev/null
+++ b/environments/net-single-nic-with-vlans-v6.yaml
@@ -0,0 +1,19 @@
+# This template configures each role to use Vlans on a single nic for
+# each isolated network with IPv6 on the External network.
+# This template assumes use of network-isolation.yaml.
+#
+# FIXME: if/when we add functionality to heatclient to include heat
+# environment files we should think about using it here to automatically
+# include network-isolation-v6.yaml.
+resource_registry:
+ OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/cinder-storage.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/single-nic-vlans/compute.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-v6.yaml
+ OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml
+ OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml
+
+parameter_defaults:
+ # This sets 'external_network_bridge' in l3_agent.ini to an empty string
+ # so that external networks act like provider bridge networks (they
+ # will plug into br-int instead of br-ex)
+ NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-with-vlans.yaml b/environments/net-single-nic-with-vlans.yaml
index bdfeadd3..a61bc6e1 100644
--- a/environments/net-single-nic-with-vlans.yaml
+++ b/environments/net-single-nic-with-vlans.yaml
@@ -12,10 +12,6 @@ resource_registry:
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml
-# We use parameter_defaults instead of parameters here because Tuskar munges
-# the names of top level and role level parameters with the role name and a
-# version. Using parameter_defaults makes it such that if the parameter name is
-# not defined in the template, we don't get an error.
parameter_defaults:
# This sets 'external_network_bridge' in l3_agent.ini to an empty string
# so that external networks act like provider bridge networks (they
diff --git a/environments/network-environment.yaml b/environments/network-environment.yaml
new file mode 100644
index 00000000..005310c7
--- /dev/null
+++ b/environments/network-environment.yaml
@@ -0,0 +1,50 @@
+#This file is an example of an environment file for defining the isolated
+#networks and related parameters.
+resource_registry:
+ # Network Interface templates to use (these files must exist)
+ OS::TripleO::BlockStorage::Net::SoftwareConfig:
+ ../network/config/single-nic-vlans/cinder-storage.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig:
+ ../network/config/single-nic-vlans/compute.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig:
+ ../network/config/single-nic-vlans/controller.yaml
+ OS::TripleO::ObjectStorage::Net::SoftwareConfig:
+ ../network/config/single-nic-vlans/swift-storage.yaml
+ OS::TripleO::CephStorage::Net::SoftwareConfig:
+ ../network/config/single-nic-vlans/ceph-storage.yaml
+
+parameter_defaults:
+ # This section is where deployment-specific configuration is done
+ # Customize the IP subnets to match the local environment
+ InternalApiNetCidr: 172.17.0.0/24
+ StorageNetCidr: 172.18.0.0/24
+ StorageMgmtNetCidr: 172.19.0.0/24
+ TenantNetCidr: 172.16.0.0/24
+ ExternalNetCidr: 10.0.0.0/24
+ # CIDR subnet mask length for provisioning network
+ ControlPlaneSubnetCidr: 24
+ # Customize the IP ranges on each network to use for static IPs and VIPs
+ InternalApiAllocationPools: [{'start': '172.17.0.10', 'end': '172.17.0.200'}]
+ StorageAllocationPools: [{'start': '172.18.0.10', 'end': '172.18.0.200'}]
+ StorageMgmtAllocationPools: [{'start': '172.19.0.10', 'end': '172.19.0.200'}]
+ TenantAllocationPools: [{'start': '172.16.0.10', 'end': '172.16.0.200'}]
+ # Leave room if the external network is also used for floating IPs
+ ExternalAllocationPools: [{'start': '10.0.0.10', 'end': '10.0.0.50'}]
+ # Gateway router for the external network
+ ExternalInterfaceDefaultRoute: 10.0.0.1
+ # Gateway router for the provisioning network (or Undercloud IP)
+ ControlPlaneDefaultRoute: 192.0.2.254
+ # Generally the IP of the Undercloud
+ EC2MetadataIp: 192.0.2.1
+ # Define the DNS servers (maximum 2) for the overcloud nodes
+ DnsServers: ["8.8.8.8","8.8.4.4"]
+ # Customize the VLAN IDs to match the local environment
+ InternalApiNetworkVlanID: 10
+ StorageNetworkVlanID: 20
+ StorageMgmtNetworkVlanID: 30
+ TenantNetworkVlanID: 40
+ ExternalNetworkVlanID: 50
+ # Set to empty string to enable multiple external networks or VLANs
+ NeutronExternalNetworkBridge: "''"
+ # Customize bonding options, e.g. "mode=4 lacp_rate=1 updelay=1000 miimon=100"
+ BondInterfaceOvsOptions: "mode=active-backup"
diff --git a/environments/network-isolation-v6.yaml b/environments/network-isolation-v6.yaml
new file mode 100644
index 00000000..11ca5b31
--- /dev/null
+++ b/environments/network-isolation-v6.yaml
@@ -0,0 +1,57 @@
+# Enable the creation of IPv6 Neutron networks for isolated Overcloud
+# traffic and configure each role to assign ports (related
+# to that role) on these networks.
+resource_registry:
+ OS::TripleO::Network::External: ../network/external_v6.yaml
+ OS::TripleO::Network::InternalApi: ../network/internal_api_v6.yaml
+ OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt_v6.yaml
+ OS::TripleO::Network::Storage: ../network/storage_v6.yaml
+ # IPv4 until OVS and Neutron support IPv6 tunnel endpoints
+ OS::TripleO::Network::Tenant: ../network/tenant.yaml
+
+ # Port assignments for the VIPs
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip_v6.yaml
+
+ # Port assignments for the controller role
+ OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_v6.yaml
+ OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_v6.yaml
+ OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml
+ OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml
+
+ # Port assignments for the compute role
+ OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage_v6.yaml
+ OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml
+
+ # Port assignments for the ceph storage role
+ OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage_v6.yaml
+ OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml
+
+ # Port assignments for the swift storage role
+ OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage_v6.yaml
+ OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml
+
+ # Port assignments for the block storage role
+ OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage_v6.yaml
+ OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml
+
+parameter_defaults:
+ # Enable IPv6 for Ceph.
+ CephIPv6: True
+ # Enable IPv6 for Corosync. This is required when Corosync is using an IPv6 IP in the cluster.
+ CorosyncIPv6: True
+ # Enable IPv6 for MongoDB. This is required when MongoDB is using an IPv6 IP.
+ MongoDbIPv6: True
+ # Enable various IPv6 features in Nova.
+ NovaIPv6: True
+ # Enable IPv6 environment for RabbitMQ.
+ RabbitIPv6: true
+ # Enable IPv6 environment for Memcached.
+ MemcachedIPv6: true
diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml
index 87fc22f5..c0420c5c 100644
--- a/environments/network-isolation.yaml
+++ b/environments/network-isolation.yaml
@@ -10,7 +10,7 @@ resource_registry:
OS::TripleO::Network::Storage: ../network/storage.yaml
OS::TripleO::Network::Tenant: ../network/tenant.yaml
# Management network is optional and disabled by default
- OS::TripleO::Network::Management: ../network/noop.yaml
+ OS::TripleO::Network::Management: OS::Heat::None
# Port assignments for the VIPs
OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
diff --git a/environments/neutron-ml2-bigswitch.yaml b/environments/neutron-ml2-bigswitch.yaml
index 69c91326..750d3c4e 100644
--- a/environments/neutron-ml2-bigswitch.yaml
+++ b/environments/neutron-ml2-bigswitch.yaml
@@ -2,11 +2,13 @@
# extensions, configured via puppet
resource_registry:
OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
+ OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
parameter_defaults:
# Required to fill in:
NeutronBigswitchRestproxyServers:
NeutronBigswitchRestproxyServerAuth:
+ NeutronMechanismDrivers: bsn_ml2
# Optional:
# NeutronBigswitchRestproxyAutoSyncOnFailure:
@@ -14,4 +16,6 @@ parameter_defaults:
# NeutronBigswitchRestproxyNeutronId:
# NeutronBigswitchRestproxyServerSsl:
# NeutronBigswitchRestproxySslCertDirectory:
+ # NeutronBigswitchAgentEnabled:
+ # NeutronBigswitchLLDPEnabled:
diff --git a/environments/neutron-ml2-cisco-nexus-ucsm.yaml b/environments/neutron-ml2-cisco-nexus-ucsm.yaml
index 5a1a32a3..ad111757 100644
--- a/environments/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/environments/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -19,7 +19,7 @@ parameter_defaults:
NetworkNexusSwitchReplayCount: 3
NetworkNexusProviderVlanAutoCreate: 'true'
NetworkNexusProviderVlanAutoTrunk: 'true'
- NetworkNexusVxlanGlobalConfig: 'true'
+ NetworkNexusVxlanGlobalConfig: 'false'
NetworkNexusHostKeyChecks: 'false'
NetworkNexusVxlanVniRanges: '0:0'
NetworkNexusVxlanMcastRanges: '0.0.0.0:0.0.0.0'
diff --git a/environments/neutron-opencontrail.yaml b/environments/neutron-opencontrail.yaml
new file mode 100644
index 00000000..4704dbc8
--- /dev/null
+++ b/environments/neutron-opencontrail.yaml
@@ -0,0 +1,22 @@
+# A Heat environment file which can be used to enable OpenContrail
+# extensions, configured via puppet
+resource_registry:
+ OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml
+ OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
+
+parameter_defaults:
+ NeutronCorePlugin: neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2
+ NeutronServicePlugins: neutron_plugin_contrail.plugins.opencontrail.loadbalancer.plugin.LoadBalancerPlugin
+ NeutronEnableDHCPAgent: false
+ NeutronEnableL3Agent: false
+ NeutronEnableMetadataAgent: false
+ NeutronEnableOVSAgent: false
+ NeutronEnableTunnelling: false
+
+ # required params:
+ #ContrailApiServerIp:
+ #ContrailExtensions: ''
+
+ # optional params
+ # ContrailApiServerPort: 8082
+ # ContrailMultiTenancy: false
diff --git a/environments/neutron-plumgrid.yaml b/environments/neutron-plumgrid.yaml
new file mode 100755
index 00000000..b8d66015
--- /dev/null
+++ b/environments/neutron-plumgrid.yaml
@@ -0,0 +1,29 @@
+# A Heat environment file which can be used to enable PLUMgrid
+# extensions, configured via puppet
+resource_registry:
+ OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml
+
+parameter_defaults:
+ NeutronCorePlugin: networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2
+ PLUMgridDirectorServer: 127.0.0.1
+ PLUMgridDirectorServerPort: 443
+ PLUMgridUsername: username
+ PLUMgridPassword: password
+ PLUMgridServerTimeOut: 99
+ PLUMgridNovaMetadataIP: 169.254.169.254
+ PLUMgridNovaMetadataPort: 8775
+ PLUMgridL2GatewayVendor: vendor
+ PLUMgridL2GatewayUsername: username
+ PLUMgridL2GatewayPassword: password
+ PLUMgridIdentityVersion: v2.0
+ PLUMgridConnectorType: distributed
+
+ #Optional Parameters
+ #PLUMgridNeutronPluginVersion: present
+ #PLUMgridPlumlibVersion: present
+
+ # PLUMgrid doesn't require dhcp, l3, ovs and metadata agents
+ NeutronEnableDHCPAgent: false
+ NeutronEnableL3Agent: false
+ NeutronEnableMetadataAgent: false
+ NeutronEnableOVSAgent: false
diff --git a/environments/puppet-ceph-devel.yaml b/environments/puppet-ceph-devel.yaml
index d782e8d8..a2d1100f 100644
--- a/environments/puppet-ceph-devel.yaml
+++ b/environments/puppet-ceph-devel.yaml
@@ -1,8 +1,7 @@
# A Heat environment file which can be used to enable a Ceph
-# storage cluster using the controller and 2 ceph nodes.
-# Rbd backends are enabled for Cinder, Glance, and Nova.
-parameters:
- CephStorageCount: 2
+# storage cluster using the controller and ceph nodes.
+# Rbd backends are enabled for Cinder, Glance, Gnocchi and Nova.
+parameter_defaults:
#NOTE: These ID's and keys should be regenerated for
# a production deployment. What is here is suitable for
# developer and CI testing only.
@@ -12,5 +11,6 @@ parameters:
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
GlanceBackend: rbd
+ GnocchiBackend: rbd
CinderEnableIscsiBackend: false
ControllerEnableCephStorage: true
diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml
index 7f5b5080..865e0b98 100644
--- a/environments/puppet-ceph-external.yaml
+++ b/environments/puppet-ceph-external.yaml
@@ -9,16 +9,18 @@ parameter_defaults:
#CephClientKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
#CephExternalMonHost: '172.16.1.7, 172.16.1.8'
- # the following parameters enable Ceph backends for Cinder, Glance, and Nova
+ # the following parameters enable Ceph backends for Cinder, Glance, Gnocchi and Nova
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
GlanceBackend: rbd
+ GnocchiBackend: rbd
# If the Ceph pools which host VMs, Volumes and Images do not match these
# names OR the client keyring to use is not named 'openstack', edit the
# following as needed.
NovaRbdPoolName: vms
CinderRbdPoolName: volumes
GlanceRbdPoolName: images
+ GnocchiRbdPoolName: metrics
CephClientUserName: openstack
# finally we disable the Cinder LVM backend
diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml
index 8986e35f..e3188090 100644
--- a/environments/puppet-pacemaker.yaml
+++ b/environments/puppet-pacemaker.yaml
@@ -4,3 +4,9 @@ resource_registry:
OS::TripleO::ControllerConfig: ../puppet/controller-config-pacemaker.yaml
OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+
+ # custom pacemaker services
+ # NOTE: For now we will need to specify overrides to all services
+ # which use pacemaker. In the future (with upcoming HA light work) this
+ # list will hopefully be much smaller however.
+ OS::TripleO::Services::Keystone: ../puppet/services/pacemaker/keystone.yaml
diff --git a/environments/puppet-tenant-vlan.yaml b/environments/puppet-tenant-vlan.yaml
index 0df63caf..ed948bc5 100644
--- a/environments/puppet-tenant-vlan.yaml
+++ b/environments/puppet-tenant-vlan.yaml
@@ -1,4 +1,4 @@
-parameters:
+parameter_defaults:
NeutronNetworkType: vlan
NeutronEnableTunnelling: false
- NeutronNetworkVLANRanges: datacentre:1:1000 \ No newline at end of file
+ NeutronNetworkVLANRanges: datacentre:1:1000
diff --git a/environments/storage-environment.yaml b/environments/storage-environment.yaml
index bd320bd9..e1cafd2b 100644
--- a/environments/storage-environment.yaml
+++ b/environments/storage-environment.yaml
@@ -15,6 +15,8 @@ parameter_defaults:
NovaEnableRbdBackend: true
## Glance backend can be either 'rbd' (Ceph), 'swift' or 'file'.
GlanceBackend: rbd
+ ## Gnocchi backend can be either 'rbd' (Ceph), 'swift' or 'file'.
+ GnocchiBackend: rbd
#### CINDER NFS SETTINGS ####
diff --git a/environments/updates/update-from-keystone-admin-internal-api.yaml b/environments/updates/update-from-keystone-admin-internal-api.yaml
index 3c71ef1b..a9fa2bea 100644
--- a/environments/updates/update-from-keystone-admin-internal-api.yaml
+++ b/environments/updates/update-from-keystone-admin-internal-api.yaml
@@ -1,7 +1,7 @@
# This environment file provides a default value for ServiceNetMap where
# Keystone Admin API service is running on the Internal API network
-parameters:
+parameter_defaults:
ServiceNetMap:
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
diff --git a/extraconfig/all_nodes/swap-partition.yaml b/extraconfig/all_nodes/swap-partition.yaml
new file mode 100644
index 00000000..89a2adb0
--- /dev/null
+++ b/extraconfig/all_nodes/swap-partition.yaml
@@ -0,0 +1,90 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Extra config to add swap space to nodes.
+
+# Parameters passed from the parent template - note if you maintain
+# out-of-tree templates they may require additional parameters if the
+# in-tree templates add a new role.
+parameters:
+ controller_servers:
+ type: json
+ compute_servers:
+ type: json
+ blockstorage_servers:
+ type: json
+ objectstorage_servers:
+ type: json
+ cephstorage_servers:
+ type: json
+ swap_partition_label:
+ type: string
+ description: Swap partition label
+ default: 'swap1'
+
+
+resources:
+
+ SwapConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -eux
+ swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label)
+ swapon $swap_partition
+ echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
+ inputs:
+ - name: swap_partition_label
+ description: Swap partition label
+ default: 'swap1'
+
+ ControllerSwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: controller_servers}
+ input_values:
+ swap_partition_label: {get_param: swap_partition_label}
+ actions: ["CREATE"]
+
+ ComputeSwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: compute_servers}
+ input_values:
+ swap_partition_label: {get_param: swap_partition_label}
+ actions: ["CREATE"]
+
+ BlockStorageSwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: blockstorage_servers}
+ input_values:
+ swap_partition_label: {get_param: swap_partition_label}
+ actions: ["CREATE"]
+
+ ObjectStorageSwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: objectstorage_servers}
+ input_values:
+ swap_partition_label: {get_param: swap_partition_label}
+ actions: ["CREATE"]
+
+ CephStorageSwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: cephstorage_servers}
+ input_values:
+ swap_partition_label: {get_param: swap_partition_label}
+ actions: ["CREATE"]
+
+outputs:
+ config_identifier:
+ value: none
diff --git a/extraconfig/all_nodes/swap.yaml b/extraconfig/all_nodes/swap.yaml
new file mode 100644
index 00000000..374b1e5d
--- /dev/null
+++ b/extraconfig/all_nodes/swap.yaml
@@ -0,0 +1,108 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Extra config to add swap space to nodes.
+
+# Parameters passed from the parent template - note if you maintain
+# out-of-tree templates they may require additional parameters if the
+# in-tree templates add a new role.
+parameters:
+ controller_servers:
+ type: json
+ compute_servers:
+ type: json
+ blockstorage_servers:
+ type: json
+ objectstorage_servers:
+ type: json
+ cephstorage_servers:
+ type: json
+ swap_size_megabytes:
+ type: string
+ description: Amount of swap space to allocate in megabytes
+ default: '4096'
+ swap_path:
+ type: string
+ description: Full path to location of swap file
+ default: '/swap'
+
+
+resources:
+
+ SwapConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -eux
+ if [ ! -f $swap_path ]; then
+ dd if=/dev/zero of=$swap_path count=$swap_size_megabytes bs=1M
+ chmod 0600 $swap_path
+ mkswap $swap_path
+ swapon $swap_path
+ else
+ echo "$swap_path already exists"
+ fi
+ echo "$swap_path swap swap defaults 0 0" >> /etc/fstab
+ inputs:
+ - name: swap_size_megabytes
+ description: Amount of swap space to allocate in megabytes
+ default: '4096'
+ - name: swap_path
+ description: Full path to location of swap file
+ default: '/swap'
+
+ ControllerSwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: controller_servers}
+ input_values:
+ swap_size_megabytes: {get_param: swap_size_megabytes}
+ swap_path: {get_param: swap_path}
+ actions: ["CREATE"]
+
+ ComputeSwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: compute_servers}
+ input_values:
+ swap_size_megabytes: {get_param: swap_size_megabytes}
+ swap_path: {get_param: swap_path}
+ actions: ["CREATE"]
+
+ BlockStorageSwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: blockstorage_servers}
+ input_values:
+ swap_size_megabytes: {get_param: swap_size_megabytes}
+ swap_path: {get_param: swap_path}
+ actions: ["CREATE"]
+
+ ObjectStorageSwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: objectstorage_servers}
+ input_values:
+ swap_size_megabytes: {get_param: swap_size_megabytes}
+ swap_path: {get_param: swap_path}
+ actions: ["CREATE"]
+
+ CephStorageSwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: cephstorage_servers}
+ input_values:
+ swap_size_megabytes: {get_param: swap_size_megabytes}
+ swap_path: {get_param: swap_path}
+ actions: ["CREATE"]
+
+outputs:
+ config_identifier:
+ value: none
diff --git a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
index 70437a8a..c388358a 100644
--- a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
@@ -20,3 +20,4 @@ parameter_defaults:
rhel_reg_user: ""
rhel_reg_type: ""
rhel_reg_method: ""
+ rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms"
diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
index a884bdae..7c65bd8b 100644
--- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
@@ -43,6 +43,8 @@ parameters:
type: string
rhel_reg_method:
type: string
+ rhel_reg_sat_repo:
+ type: string
resources:
@@ -68,6 +70,7 @@ resources:
- name: REG_USER
- name: REG_TYPE
- name: REG_METHOD
+ - name: REG_SAT_REPO
config: {get_file: scripts/rhel-registration}
RHELRegistrationDeployment:
@@ -95,6 +98,7 @@ resources:
REG_USER: {get_param: rhel_reg_user}
REG_TYPE: {get_param: rhel_reg_type}
REG_METHOD: {get_param: rhel_reg_method}
+ REG_SAT_REPO: {get_param: rhel_reg_sat_repo}
RHELUnregistration:
type: OS::Heat::SoftwareConfig
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
index cbbd6a1d..1c9acd2b 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
@@ -13,8 +13,9 @@ fi
opts=
attach_opts=
+sat5_opts=
repos="repos --enable rhel-7-server-rpms"
-satellite_repo="rhel-7-server-rh-common-rpms"
+satellite_repo=${REG_SAT_REPO}
if [ -n "${REG_AUTO_ATTACH:-}" ]; then
opts="$opts --auto-attach"
@@ -49,6 +50,7 @@ fi
if [ -n "${REG_FORCE:-}" ]; then
opts="$opts --force"
+ sat5_opts="$sat5_opts --force"
fi
if [ -n "${REG_SERVER_URL:-}" ]; then
@@ -57,6 +59,7 @@ fi
if [ -n "${REG_ACTIVATION_KEY:-}" ]; then
opts="$opts --activationkey=$REG_ACTIVATION_KEY"
+ sat5_opts="$sat5_opts --activationkey=$REG_ACTIVATION_KEY"
if [ -z "${REG_ORG:-}" ]; then
echo "WARNING: REG_ACTIVATION_KEY set without REG_ORG."
@@ -75,10 +78,12 @@ fi
if [ -n "${REG_MACHINE_NAME:-}" ]; then
opts="$opts --name $REG_MACHINE_NAME"
+ sat5_opts="$sat5_opts --profilename=$REG_MACHINE_NAME"
fi
if [ -n "${REG_ORG:-}" ]; then
opts="$opts --org=$REG_ORG"
+ sat5_opts="$sat5_opts --systemorgid=$REG_ORG"
fi
if [ -n "${REG_REPOS:-}" ]; then
@@ -91,6 +96,20 @@ if [ -n "${REG_TYPE:-}" ]; then
opts="$opts --type=$REG_TYPE"
fi
+function detect_satellite_version {
+ ping_api=$REG_SAT_URL/katello/api/ping
+ if curl -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
+ echo Satellite 6 detected at $REG_SAT_URL
+ satellite_version=6
+ elif curl -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ echo Satellite 5 detected at $REG_SAT_URL
+ satellite_version=5
+ else
+ echo No Satellite detected at $REG_SAT_URL
+ exit 1
+ fi
+}
+
case "${REG_METHOD:-}" in
portal)
subscription-manager register $opts
@@ -100,13 +119,22 @@ case "${REG_METHOD:-}" in
subscription-manager $repos
;;
satellite)
- repos="$repos --enable ${satellite_repo}"
- rpm -Uvh "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm" || true
- subscription-manager register $opts
- subscription-manager $repos
- yum install -y katello-agent || true # needed for errata reporting to satellite6
- katello-package-upload
- subscription-manager repos --disable ${satellite_repo}
+ detect_satellite_version
+ if [ "$satellite_version" = "6" ]; then
+ repos="$repos --enable ${satellite_repo}"
+ curl -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
+ subscription-manager register $opts
+ subscription-manager $repos
+ yum install -y katello-agent || true # needed for errata reporting to satellite6
+ katello-package-upload
+ subscription-manager repos --disable ${satellite_repo}
+ else
+ pushd /usr/share/rhn/
+ curl -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ popd
+ rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
+ fi
;;
disable)
echo "Disabling RHEL registration"
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-unregistration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-unregistration
index 1e72e0a6..916f97e3 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-unregistration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-unregistration
@@ -7,6 +7,8 @@ case "${REG_METHOD:-}" in
portal|satellite)
# Allow unregistration to fail.
# We don't want to fail stack deletes if unregistration fails.
+ # Note that this will be a no-op on satellite 5, which doesn't support
+ # unregistering from the cli.
subscription-manager unregister || true
subscription-manager clean || true
;;
diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh
new file mode 100644
index 00000000..07666245
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_block_storage.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+#
+# This runs an upgrade of Cinder Block Storage nodes.
+#
+set -eu
+
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
new file mode 100644
index 00000000..de42b16d
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# This delivers the ceph-storage upgrade script to be invoked as part of the tripleo
+# major upgrade workflow.
+#
+set -eu
+
+UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
+
+cat > $UPGRADE_SCRIPT << ENDOFCAT
+### DO NOT MODIFY THIS FILE
+### This file is automatically delivered to the ceph-storage nodes as part of the
+### tripleo upgrades workflow
+
+
+function systemctl_ceph {
+ action=\$1
+ systemctl \$action ceph
+}
+
+# "so that mirrors aren't rebalanced as if the OSD died" - gfidente
+ceph osd set noout
+
+systemctl_ceph stop
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y update
+systemctl_ceph start
+
+ceph osd unset noout
+
+ENDOFCAT
+
+# ensure the permissions are OK
+chmod 0755 $UPGRADE_SCRIPT
+
diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh
new file mode 100644
index 00000000..78628c8c
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_compute.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# This delivers the compute upgrade script to be invoked as part of the tripleo
+# major upgrade workflow.
+#
+set -eu
+
+UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
+
+cat > $UPGRADE_SCRIPT << ENDOFCAT
+### DO NOT MODIFY THIS FILE
+### This file is automatically delivered to the compute nodes as part of the
+### tripleo upgrades workflow
+
+# pin nova to kilo (messaging +-1) for the nova-compute service
+
+crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute
+
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y update
+
+ENDOFCAT
+
+# ensure the permissions are OK
+chmod 0755 $UPGRADE_SCRIPT
+
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
new file mode 100755
index 00000000..bf2ee330
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+set -eu
+
+cluster_sync_timeout=600
+
+if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
+ echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
+ exit 1
+fi
+
+if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ pcs resource disable httpd
+ check_resource httpd stopped 1800
+ if pcs status | grep openstack-keystone; then
+ pcs resource disable openstack-keystone
+ check_resource openstack-keystone stopped 1800
+ fi
+ pcs resource disable redis
+ check_resource redis stopped 600
+ pcs resource disable mongod
+ check_resource mongod stopped 600
+ pcs resource disable rabbitmq
+ check_resource rabbitmq stopped 600
+ pcs resource disable memcached
+ check_resource memcached stopped 600
+ pcs resource disable galera
+ check_resource galera stopped 600
+ pcs cluster stop --all
+fi
+
+# Swift isn't controled by pacemaker
+systemctl_swift stop
+
+tstart=$(date +%s)
+while systemctl is-active pacemaker; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > cluster_sync_timeout )) ; then
+ echo_error "ERROR: cluster shutdown timed out"
+ exit 1
+ fi
+done
+
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y -q update
+
+# Pin messages sent to compute nodes to kilo, these will be upgraded later
+crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
+# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
+# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
+crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
+# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
+# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
+crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
new file mode 100755
index 00000000..10bea573
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+set -eu
+
+cluster_form_timeout=600
+cluster_settle_timeout=600
+galera_sync_timeout=600
+
+if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ pcs cluster start --all
+
+ tstart=$(date +%s)
+ while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > cluster_form_timeout )) ; then
+ echo_error "ERROR: timed out forming the cluster"
+ exit 1
+ fi
+ done
+
+ if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
+ echo_error "ERROR: timed out waiting for cluster to finish transition"
+ exit 1
+ fi
+
+ pcs resource enable galera
+ check_resource galera started 600
+ pcs resource enable mongod
+ check_resource mongod started 600
+
+ tstart=$(date +%s)
+ while ! clustercheck; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > galera_sync_timeout )) ; then
+ echo_error "ERROR galera sync timed out"
+ exit 1
+ fi
+ done
+
+ # Run all the db syncs
+ # TODO: check if this can be triggered in puppet and removed from here
+ ceilometer-dbsync --config-file=/etc/ceilometer/ceilometer.conf
+ cinder-manage db sync
+ glance-manage --config-file=/etc/glance/glance-registry.conf db_sync
+ heat-manage --config-file /etc/heat/heat.conf db_sync
+ keystone-manage db_sync
+ neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+ nova-manage db sync
+
+ pcs resource enable memcached
+ check_resource memcached started 600
+ pcs resource enable rabbitmq
+ check_resource rabbitmq started 600
+ pcs resource enable redis
+ check_resource redis started 600
+ if pcs status | grep openstack-keystone; then
+ pcs resource enable openstack-keystone
+ check_resource openstack-keystone started 1800
+ fi
+ pcs resource enable httpd
+ check_resource httpd started 1800
+fi
+
+# Swift isn't controled by heat
+systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh
new file mode 100644
index 00000000..931f4f42
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_object_storage.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+#
+# This delivers the swift-storage upgrade script to be invoked as part of the tripleo
+# major upgrade workflow.
+#
+set -eu
+
+UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
+
+cat > $UPGRADE_SCRIPT << ENDOFCAT
+### DO NOT MODIFY THIS FILE
+### This file is automatically delivered to the swift-storage nodes as part of the
+### tripleo upgrades workflow
+
+
+function systemctl_swift {
+ action=\$1
+ for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
+ openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
+ openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object; do
+ systemctl \$action \$S
+ done
+}
+
+
+systemctl_swift stop
+
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y update
+
+systemctl_swift start
+
+
+
+ENDOFCAT
+
+# ensure the permissions are OK
+chmod 0755 $UPGRADE_SCRIPT
+
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
new file mode 100644
index 00000000..4af3186c
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -0,0 +1,85 @@
+heat_template_version: 2014-10-16
+description: 'Upgrade for Pacemaker deployments'
+
+parameters:
+ controller_servers:
+ type: json
+ compute_servers:
+ type: json
+ blockstorage_servers:
+ type: json
+ objectstorage_servers:
+ type: json
+ cephstorage_servers:
+ type: json
+ input_values:
+ type: json
+ description: input values for the software deployments
+
+ UpgradeLevelNovaCompute:
+ type: string
+ description: Nova Compute upgrade level
+ default: ''
+
+resources:
+ # TODO(jistr): for Mitaka->Newton upgrades and further we can use
+ # map_merge with input_values instead of feeding params into scripts
+ # via str_replace on bash snippets
+
+ ControllerPacemakerUpgradeConfig_Step1:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
+ params:
+ UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+ - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - get_file: major_upgrade_controller_pacemaker_1.sh
+
+ ControllerPacemakerUpgradeDeployment_Step1:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: controller_servers}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
+ input_values: {get_param: input_values}
+
+ BlockStorageUpgradeConfig:
+ type: OS::Heat::SoftwareConfig
+ depends_on: ControllerPacemakerUpgradeDeployment_Step1
+ properties:
+ group: script
+ config: {get_file: major_upgrade_block_storage.sh}
+
+ BlockStorageUpgradeDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: blockstorage_servers}
+ config: {get_resource: BlockStorageUpgradeConfig}
+ input_values: {get_param: input_values}
+
+ ControllerPacemakerUpgradeConfig_Step2:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - get_file: major_upgrade_controller_pacemaker_2.sh
+
+ ControllerPacemakerUpgradeDeployment_Step2:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: BlockStorageUpgradeDeployment
+ properties:
+ servers: {get_param: controller_servers}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
+ input_values: {get_param: input_values}
+
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml
new file mode 100644
index 00000000..623549a0
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml
@@ -0,0 +1,131 @@
+heat_template_version: 2014-10-16
+description: 'Upgrade for Pacemaker deployments'
+
+parameters:
+
+ controller_servers:
+ type: json
+ compute_servers:
+ type: json
+ blockstorage_servers:
+ type: json
+ objectstorage_servers:
+ type: json
+ cephstorage_servers:
+ type: json
+ input_values:
+ type: json
+ description: input values for the software deployments
+
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+ UpgradeLevelNovaCompute:
+ type: string
+ description: Nova Compute upgrade level
+ default: ''
+
+resources:
+
+ # For the UpgradeInit also rename /etc/resolv.conf.save for +bug/1567004
+
+ UpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ UpgradeInitControllerDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: controller_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitComputeDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: compute_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitBlockStorageDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: blockstorage_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitObjectStorageDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: objectstorage_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ UpgradeInitCephStorageDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: cephstorage_servers}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ # TODO(jistr): for Mitaka->Newton upgrades and further we can use
+ # map_merge with input_values instead of feeding params into scripts
+ # via str_replace on bash snippets
+
+ ComputeDeliverUpgradeScriptConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
+ params:
+ UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+ - get_file: major_upgrade_compute.sh
+
+ ComputeDeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: compute_servers}
+ config: {get_resource: ComputeDeliverUpgradeScriptConfig}
+ input_values: {get_param: input_values}
+
+ ObjectStorageDeliverUpgradeScriptConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: major_upgrade_object_storage.sh}
+
+ ObjectStorageDeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: objectstorage_servers}
+ config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig}
+ input_values: {get_param: input_values}
+
+ CephStorageDeliverUpgradeScriptConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: major_upgrade_ceph_storage.sh}
+
+ CephStorageDeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: cephstorage_servers}
+ config: {get_resource: CephStorageDeliverUpgradeScriptConfig}
+ input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
new file mode 100644
index 00000000..b63198db
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+# Special pieces of upgrade migration logic go into this
+# file. E.g. Pacemaker cluster transitions for existing deployments,
+# matching changes to overcloud_controller_pacemaker.pp (Puppet
+# handles deployment, this file handles migrations).
+#
+# This file shouldn't execute any action on its own, all logic should
+# be wrapped into bash functions. Upgrade scripts will source this
+# file and call the functions defined in this file where appropriate.
+#
+# The migration functions should be idempotent. If the migration has
+# been already applied, it should be possible to call the function
+# again without damaging the deployment or failing the upgrade.
+
+function add_missing_openstack_core_constraints {
+ # The CIBs are saved under /root as they might contain sensitive data
+ CIB="/root/migration.cib"
+ CIB_BACKUP="/root/backup.cib"
+ CIB_PUSH_NEEDED=n
+
+ rm -f "$CIB" "$CIB_BACKUP" || /bin/true
+ pcs cluster cib "$CIB"
+ cp "$CIB" "$CIB_BACKUP"
+
+ if ! pcs -f "$CIB" constraint --full | grep 'start openstack-sahara-api-clone then start openstack-sahara-engine-clone'; then
+ pcs -f "$CIB" constraint order start openstack-sahara-api-clone then start openstack-sahara-engine-clone
+ CIB_PUSH_NEEDED=y
+ fi
+
+ if ! pcs -f "$CIB" constraint --full | grep 'start openstack-core-clone then start openstack-ceilometer-notification-clone'; then
+ pcs -f "$CIB" constraint order start openstack-core-clone then start openstack-ceilometer-notification-clone
+ CIB_PUSH_NEEDED=y
+ fi
+
+ if ! pcs -f "$CIB" constraint --full | grep 'start openstack-aodh-evaluator-clone then start openstack-aodh-listener-clone'; then
+ pcs -f "$CIB" constraint order start openstack-aodh-evaluator-clone then start openstack-aodh-listener-clone
+ CIB_PUSH_NEEDED=y
+ fi
+
+ if pcs -f "$CIB" constraint --full | grep 'start openstack-core-clone then start openstack-heat-api-clone'; then
+ CID=$(pcs -f "$CIB" constraint --full | grep 'start openstack-core-clone then start openstack-heat-api-clone' | sed -e 's/.*id\://g' -e 's/)//g')
+ pcs -f "$CIB" constraint remove $CID
+ CIB_PUSH_NEEDED=y
+ fi
+
+ if [ "$CIB_PUSH_NEEDED" = 'y' ]; then
+ pcs cluster cib-push "$CIB"
+ fi
+}
+
+function remove_ceilometer_alarm {
+ if pcs status | grep openstack-ceilometer-alarm; then
+ # Disable pacemaker resources for ceilometer-alarms
+ pcs resource disable openstack-ceilometer-alarm-evaluator
+ check_resource openstack-ceilometer-alarm-evaluator stopped 600
+ pcs resource delete openstack-ceilometer-alarm-evaluator
+ pcs resource disable openstack-ceilometer-alarm-notifier
+ check_resource openstack-ceilometer-alarm-notifier stopped 600
+ pcs resource delete openstack-ceilometer-alarm-notifier
+
+ # remove constraints
+ pcs constraint remove ceilometer-delay-then-ceilometer-alarm-evaluator-constraint
+ pcs constraint remove ceilometer-alarm-evaluator-with-ceilometer-delay-colocation
+ pcs constraint remove ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint
+ pcs constraint remove ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation
+ pcs constraint remove ceilometer-alarm-notifier-then-ceilometer-notification-constraint
+ pcs constraint remove ceilometer-notification-with-ceilometer-alarm-notifier-colocation
+
+ fi
+
+ # uninstall openstack-ceilometer-alarm package
+ yum -y remove openstack-ceilometer-alarm
+
+}
diff --git a/extraconfig/tasks/noop.yaml b/extraconfig/tasks/noop.yaml
deleted file mode 100644
index 0cff7469..00000000
--- a/extraconfig/tasks/noop.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-heat_template_version: 2014-10-16
-description: 'No-op task'
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- default: {}
- description: input values for the software deployments
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
new file mode 100755
index 00000000..7d794c97
--- /dev/null
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+set -eu
+
+function check_resource {
+
+ if [ "$#" -ne 3 ]; then
+ echo_error "ERROR: check_resource function expects 3 parameters, $# given"
+ exit 1
+ fi
+
+ service=$1
+ state=$2
+ timeout=$3
+
+ if [ "$state" = "stopped" ]; then
+ match_for_incomplete='Started'
+ else # started
+ match_for_incomplete='Stopped'
+ fi
+
+ nodes_local=$(pcs status | grep ^Online | sed 's/.*\[ \(.*\) \]/\1/g' | sed 's/ /\|/g')
+ if timeout -k 10 $timeout crm_resource --wait; then
+ node_states=$(pcs status --full | grep "$service" | grep -v Clone | { egrep "$nodes_local" || true; } )
+ if echo "$node_states" | grep -q "$match_for_incomplete"; then
+ echo_error "ERROR: cluster finished transition but $service was not in $state state, exiting."
+ exit 1
+ else
+ echo "$service has $state"
+ fi
+ else
+ echo_error "ERROR: cluster remained unstable for more than $timeout seconds, exiting."
+ exit 1
+ fi
+
+}
+
+function echo_error {
+ echo "$@" | tee /dev/fd2
+}
+
+function systemctl_swift {
+ services=( openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
+ openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
+ openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy )
+ action=$1
+ case $action in
+ stop)
+ services=$(systemctl | grep swift | grep running | awk '{print $1}')
+ ;;
+ start)
+ enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml 'enable_swift_storage')
+ if [[ $enable_swift_storage != "true" ]]; then
+ services=( openstack-swift-proxy )
+ fi
+ ;;
+ *) services=() ;; # for safetly, should never happen
+ esac
+ for S in ${services[@]}; do
+ systemctl $action $S
+ done
+}
diff --git a/extraconfig/tasks/pacemaker_maintenance_mode.sh b/extraconfig/tasks/pacemaker_maintenance_mode.sh
new file mode 100755
index 00000000..ddc84ad2
--- /dev/null
+++ b/extraconfig/tasks/pacemaker_maintenance_mode.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+set -x
+
+# On initial deployment, the pacemaker service is disabled and is-active exits
+# 3 in that case, so allow this to fail gracefully.
+pacemaker_status=$(systemctl is-active pacemaker || :)
+
+if [ "$pacemaker_status" = "active" ]; then
+ pcs property set maintenance-mode=true
+fi
+
+# We need to reload haproxy in case the certificate changed because
+# puppet doesn't know the contents of the cert file. We shouldn't
+# reload it if it wasn't already active (such as if using external
+# loadbalancer or on initial deployment).
+haproxy_status=$(systemctl is-active haproxy || :)
+if [ "$haproxy_status" = "active" ]; then
+ systemctl reload haproxy
+fi
diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh
index 12201097..b2bdc55a 100755
--- a/extraconfig/tasks/pacemaker_resource_restart.sh
+++ b/extraconfig/tasks/pacemaker_resource_restart.sh
@@ -3,38 +3,6 @@
set -eux
pacemaker_status=$(systemctl is-active pacemaker)
-check_interval=3
-
-function check_resource {
-
- service=$1
- state=$2
- timeout=$3
- tstart=$(date +%s)
- tend=$(( $tstart + $timeout ))
-
- if [ "$state" = "stopped" ]; then
- match_for_incomplete='Started'
- else # started
- match_for_incomplete='Stopped'
- fi
-
- while (( $(date +%s) < $tend )); do
- node_states=$(pcs status --full | grep "$service" | grep -v Clone)
- if echo "$node_states" | grep -q "$match_for_incomplete"; then
- echo "$service not yet $state, sleeping $check_interval seconds."
- sleep $check_interval
- else
- echo "$service has $state"
- timeout -k 10 $timeout crm_resource --wait
- return
- fi
- done
-
- echo "$service never $state after $timeout seconds" | tee /dev/fd/2
- exit 1
-
-}
# Run if pacemaker is running, we're the bootstrap node,
# and we're updating the deployment (not creating).
diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml
index 7de41d94..fbed9ce5 100644
--- a/extraconfig/tasks/post_puppet_pacemaker.yaml
+++ b/extraconfig/tasks/post_puppet_pacemaker.yaml
@@ -33,7 +33,11 @@ resources:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: pacemaker_resource_restart.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: pacemaker_resource_restart.sh
ControllerPostPuppetRestartDeployment:
type: OS::Heat::SoftwareDeployments
diff --git a/extraconfig/tasks/pre_puppet_pacemaker.yaml b/extraconfig/tasks/pre_puppet_pacemaker.yaml
index 2cfe92a7..82546588 100644
--- a/extraconfig/tasks/pre_puppet_pacemaker.yaml
+++ b/extraconfig/tasks/pre_puppet_pacemaker.yaml
@@ -14,13 +14,8 @@ resources:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: |
- #!/bin/bash
- pacemaker_status=$(systemctl is-active pacemaker)
-
- if [ "$pacemaker_status" = "active" ]; then
- pcs property set maintenance-mode=true
- fi
+ config:
+ get_file: pacemaker_maintenance_mode.sh
ControllerPrePuppetMaintenanceModeDeployment:
type: OS::Heat::SoftwareDeployments
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index 364d7343..f3c3b4bf 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -53,12 +53,13 @@ neutron-l3-agent
neutron-metadata-agent
neutron-openvswitch-agent
neutron-server
-openstack-ceilometer-alarm-evaluator
-openstack-ceilometer-alarm-notifier
openstack-ceilometer-api
openstack-ceilometer-central
openstack-ceilometer-collector
openstack-ceilometer-notification
+openstack-aodh-evaluator
+openstack-aodh-notifier
+openstack-aodh-listener
openstack-cinder-api
openstack-cinder-scheduler
openstack-cinder-volume
@@ -107,6 +108,9 @@ openstack-nova-scheduler"
pcs -f $pacemaker_dumpfile constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false
fi
+ if ! pcs constraint order show | grep "promote redis-master then start openstack-aodh-evaluator-clone"; then
+ pcs -f $pacemaker_dumpfile constraint order promote redis-master then start openstack-aodh-evaluator-clone require-all=false
+ fi
# ensure neutron constraints https://review.openstack.org/#/c/229466
# remove ovs-cleanup after server and add openvswitch-agent instead
if pcs constraint order show | grep "start neutron-server-clone then start neutron-ovs-cleanup-clone"; then
@@ -128,6 +132,9 @@ openstack-nova-scheduler"
# mongod start timeout is higher, setting only stop timeout
pcs -f $pacemaker_dumpfile resource update mongod op start timeout=370s op stop timeout=200s
+ echo "Making sure rabbitmq has the notify=true meta parameter"
+ pcs -f $pacemaker_dumpfile resource update rabbitmq meta notify=true
+
echo "Applying new Pacemaker config"
if ! pcs cluster cib-push $pacemaker_dumpfile; then
echo "ERROR failed to apply new pacemaker config"
@@ -151,14 +158,14 @@ openstack-nova-scheduler"
kill $(ps ax | grep -e "radvd.*\.pid\.radvd" | awk '{print $1}') 2>/dev/null || :
else
echo "Upgrading openstack-puppet-modules"
- yum -y update openstack-puppet-modules
+ yum -q -y update openstack-puppet-modules
echo "Upgrading other packages is handled by config management tooling"
echo -n "true" > $heat_outputs_path.update_managed_packages
exit 0
fi
command=${command:-update}
-full_command="yum -y $command $command_arguments"
+full_command="yum -q -y $command $command_arguments"
echo "Running: $full_command"
result=$($full_command)
diff --git a/extraconfig/tasks/yum_update_noop.yaml b/extraconfig/tasks/yum_update_noop.yaml
new file mode 100644
index 00000000..b759d9c5
--- /dev/null
+++ b/extraconfig/tasks/yum_update_noop.yaml
@@ -0,0 +1,29 @@
+heat_template_version: 2014-10-16
+description: 'No-op yum update task'
+
+resources:
+
+ config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ echo -n "false" > $heat_outputs_path.update_managed_packages
+ inputs:
+ - name: update_identifier
+ description: yum will only run for previously unused values of update_identifier
+ default: ''
+ - name: command
+ description: yum sub-command to run, defaults to "update"
+ default: update
+ - name: command_arguments
+ description: yum command arguments, defaults to ""
+ default: ''
+ outputs:
+ - name: update_managed_packages
+ description: boolean value indicating whether to upgrade managed packages
+
+outputs:
+ OS::stack_id:
+ value: {get_resource: config}
diff --git a/net-config-bond.yaml b/net-config-bond.yaml
index b624563f..0a162e77 100644
--- a/net-config-bond.yaml
+++ b/net-config-bond.yaml
@@ -4,6 +4,11 @@ description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge.
parameters:
+ BondInterfaceOvsOptions:
+ default: ''
+ description: The ovs_options string for the bond interface. Set things like
+ lacp=active and/or bond_mode=balance-slb using this option.
+ type: string
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
@@ -58,6 +63,7 @@ resources:
type: ovs_bond
name: bond1
use_dhcp: true
+ ovs_options: {get_param: BondInterfaceOvsOptions}
members:
# os-net-config translates nic1 => em1 (for example)
-
diff --git a/network/config/bond-with-vlans/controller-v6.yaml b/network/config/bond-with-vlans/controller-v6.yaml
new file mode 100644
index 00000000..7869ebfc
--- /dev/null
+++ b/network/config/bond-with-vlans/controller-v6.yaml
@@ -0,0 +1,180 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config with 2 bonded nics on a bridge
+ with VLANs attached for the controller role with IPv6 on the External
+ network. The IPv6 default route is on the External network, and the
+ IPv4 default route is on the Control Plane.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ BondInterfaceOvsOptions:
+ default: 'bond_mode=active-backup'
+ description: The ovs_options string for the bond interface. Set things like
+ lacp=active and/or bond_mode=balance-slb using this option.
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
+ ExternalInterfaceDefaultRoute:
+ default: '10.0.0.1'
+ description: default route for the external network
+ type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: interface
+ name: nic1
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+ -
+ type: ovs_bridge
+ name: {get_input: bridge_name}
+ dns_servers: {get_param: DnsServers}
+ members:
+ -
+ type: ovs_bond
+ name: bond1
+ ovs_options: {get_param: BondInterfaceOvsOptions}
+ members:
+ -
+ type: interface
+ name: nic2
+ primary: true
+ -
+ type: interface
+ name: nic3
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: ExternalNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: ExternalIpSubnet}
+ routes:
+ -
+ default: true
+ next_hop: {get_param: ExternalInterfaceDefaultRoute}
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: InternalApiNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: StorageNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageMgmtIpSubnet}
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: TenantNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # device: bond1
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/multiple-nics/controller-v6.yaml b/network/config/multiple-nics/controller-v6.yaml
new file mode 100644
index 00000000..b69879fb
--- /dev/null
+++ b/network/config/multiple-nics/controller-v6.yaml
@@ -0,0 +1,174 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config to configure multiple interfaces
+ for the controller role with IPv6 on the External network. The IPv6
+ default route is on the External network, and the IPv4 default route
+ is on the Control Plane.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
+ ExternalInterfaceDefaultRoute:
+ default: '10.0.0.1'
+ description: default route for the external network
+ type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+ -
+ type: interface
+ name: nic2
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: interface
+ name: nic3
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: StorageMgmtIpSubnet}
+ -
+ type: interface
+ name: nic4
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ -
+ # Create a bridge which can also be used for VLAN-mode bridge mapping
+ type: ovs_bridge
+ name: br-tenant
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+ members:
+ -
+ type: interface
+ name: nic5
+ use_dhcp: false
+ # force the MAC address of the bridge to this interface
+ primary: true
+ -
+ type: ovs_bridge
+ name: {get_input: bridge_name}
+ dns_servers: {get_param: DnsServers}
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: ExternalIpSubnet}
+ routes:
+ -
+ default: true
+ next_hop: {get_param: ExternalInterfaceDefaultRoute}
+ members:
+ -
+ type: interface
+ name: nic6
+ # force the MAC address of the bridge to this interface
+ primary: true
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: interface
+ # name: nic7
+ # use_dhcp: false
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/single-nic-linux-bridge-vlans/README.md b/network/config/single-nic-linux-bridge-vlans/README.md
new file mode 100644
index 00000000..b7ded049
--- /dev/null
+++ b/network/config/single-nic-linux-bridge-vlans/README.md
@@ -0,0 +1,19 @@
+This directory contains Heat templates to help configure
+Vlans on a single NICs for each Overcloud role.
+
+Configuration
+-------------
+
+To make use of these templates create a Heat environment that looks
+something like this:
+
+ resource\_registry:
+ OS::TripleO::BlockStorage::Net::SoftwareConfig: network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: network/config/single-nic-linux-bridge-vlans/compute.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: network/config/single-nic-linux-bridge-vlans/controller.yaml
+ OS::TripleO::ObjectStorage::Net::SoftwareConfig: network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
+ OS::TripleO::CephStorage::Net::SoftwareConfig: network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
+
+Or use this Heat environment file:
+
+ environments/net-single-nic-linux-bridge-with-vlans.yaml
diff --git a/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml b/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
new file mode 100644
index 00000000..a46d7e20
--- /dev/null
+++ b/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
@@ -0,0 +1,106 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config to configure VLANs for the
+ ceph storage role.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: linux_bridge
+ name: br-storage
+ use_dhcp: false
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+ members:
+ -
+ type: interface
+ name: nic1
+ # force the MAC address of the bridge to this interface
+ primary: true
+ -
+ type: vlan
+ vlan_id: {get_param: StorageNetworkVlanID}
+ device: br-storage
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ device: br-storage
+ addresses:
+ -
+ ip_netmask: {get_param: StorageMgmtIpSubnet}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml b/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
new file mode 100644
index 00000000..08613a3b
--- /dev/null
+++ b/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
@@ -0,0 +1,117 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config to configure VLANs for the
+ cinder storage role.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: linux_bridge
+ name: br-storage
+ use_dhcp: false
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+ members:
+ -
+ type: interface
+ name: nic1
+ # force the MAC address of the bridge to this interface
+ primary: true
+ -
+ type: vlan
+ vlan_id: {get_param: InternalApiNetworkVlanID}
+ device: br-storage
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageNetworkVlanID}
+ device: br-storage
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ device: br-storage
+ addresses:
+ -
+ ip_netmask: {get_param: StorageMgmtIpSubnet}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/single-nic-linux-bridge-vlans/compute.yaml b/network/config/single-nic-linux-bridge-vlans/compute.yaml
new file mode 100644
index 00000000..b6522c67
--- /dev/null
+++ b/network/config/single-nic-linux-bridge-vlans/compute.yaml
@@ -0,0 +1,118 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config to configure VLANs for the
+ compute role.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: linux_bridge
+ name: {get_input: bridge_name}
+ use_dhcp: false
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ -
+ ip_netmask: 0.0.0.0/0
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+ members:
+ -
+ type: interface
+ name: {get_input: interface_name}
+ # force the MAC address of the bridge to this interface
+ primary: true
+ -
+ type: vlan
+ vlan_id: {get_param: InternalApiNetworkVlanID}
+ device: {get_input: bridge_name}
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageNetworkVlanID}
+ device: {get_input: bridge_name}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: TenantNetworkVlanID}
+ device: {get_input: bridge_name}
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/single-nic-linux-bridge-vlans/controller.yaml b/network/config/single-nic-linux-bridge-vlans/controller.yaml
new file mode 100644
index 00000000..72105481
--- /dev/null
+++ b/network/config/single-nic-linux-bridge-vlans/controller.yaml
@@ -0,0 +1,149 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config to configure VLANs for the
+ controller role.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ExternalInterfaceDefaultRoute:
+ default: '10.0.0.1'
+ description: default route for the external network
+ type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: linux_bridge
+ name: {get_input: bridge_name}
+ use_dhcp: false
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ -
+ ip_netmask: 0.0.0.0/0
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+ members:
+ -
+ type: interface
+ name: {get_input: interface_name}
+ # force the MAC address of the bridge to this interface
+ primary: true
+ -
+ type: vlan
+ vlan_id: {get_param: ExternalNetworkVlanID}
+ device: {get_input: bridge_name}
+ addresses:
+ -
+ ip_netmask: {get_param: ExternalIpSubnet}
+ routes:
+ -
+ ip_netmask: 0.0.0.0/0
+ default: true
+ next_hop: {get_param: ExternalInterfaceDefaultRoute}
+ -
+ type: vlan
+ vlan_id: {get_param: InternalApiNetworkVlanID}
+ device: {get_input: bridge_name}
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageNetworkVlanID}
+ device: {get_input: bridge_name}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ device: {get_input: bridge_name}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageMgmtIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: TenantNetworkVlanID}
+ device: {get_input: bridge_name}
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml b/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
new file mode 100644
index 00000000..962b9890
--- /dev/null
+++ b/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
@@ -0,0 +1,117 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config to configure VLANs for the
+ swift storage role.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: linux_bridge
+ name: br-storage
+ use_dhcp: false
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+ members:
+ -
+ type: interface
+ name: nic1
+ # force the MAC address of the bridge to this interface
+ primary: true
+ -
+ type: vlan
+ vlan_id: {get_param: InternalApiNetworkVlanID}
+ device: br-storage
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageNetworkVlanID}
+ device: br-storage
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ device: br-storage
+ addresses:
+ -
+ ip_netmask: {get_param: StorageMgmtIpSubnet}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/single-nic-vlans/controller-v6.yaml b/network/config/single-nic-vlans/controller-v6.yaml
new file mode 100644
index 00000000..472e539d
--- /dev/null
+++ b/network/config/single-nic-vlans/controller-v6.yaml
@@ -0,0 +1,158 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config to configure VLANs for the
+ controller role with IPv6 on the External network. The IPv6 default
+ route is on the External network, and the IPv4 default route is on
+ the Control Plane.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
+ ExternalInterfaceDefaultRoute:
+ default: '10.0.0.1'
+ description: default route for the external network
+ type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: ovs_bridge
+ name: {get_input: bridge_name}
+ use_dhcp: false
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+ members:
+ -
+ type: interface
+ name: nic1
+ # force the MAC address of the bridge to this interface
+ primary: true
+ -
+ type: vlan
+ vlan_id: {get_param: ExternalNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: ExternalIpSubnet}
+ routes:
+ -
+ default: true
+ next_hop: {get_param: ExternalInterfaceDefaultRoute}
+ -
+ type: vlan
+ vlan_id: {get_param: InternalApiNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageMgmtIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: TenantNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+ #- # Uncomment when including environments/network-management.yaml
+ # type: vlan
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/single-nic-vlans/controller.yaml b/network/config/single-nic-vlans/controller.yaml
index 3b22b36b..a5a0745d 100644
--- a/network/config/single-nic-vlans/controller.yaml
+++ b/network/config/single-nic-vlans/controller.yaml
@@ -111,7 +111,7 @@ resources:
ip_netmask: {get_param: ExternalIpSubnet}
routes:
-
- ip_netmask: 0.0.0.0/0
+ default: true
next_hop: {get_param: ExternalInterfaceDefaultRoute}
-
type: vlan
diff --git a/network/endpoints/build_endpoint_map.py b/network/endpoints/build_endpoint_map.py
new file mode 100755
index 00000000..d8cdee3d
--- /dev/null
+++ b/network/endpoints/build_endpoint_map.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python
+
+"""
+Generate the endpoint_map.yaml template from data in the endpoint_data.yaml
+file.
+
+By default the files in the same directory as this script are operated on, but
+different files can be optionally specified on the command line.
+
+The --check option verifies that the current output file is up-to-date with the
+latest data in the input file. The script exits with status code 2 if a
+mismatch is detected.
+"""
+
+from __future__ import print_function
+
+
+__all__ = ['load_endpoint_data', 'generate_endpoint_map_template',
+ 'write_template', 'build_endpoint_map', 'check_up_to_date']
+
+
+import collections
+import copy
+import itertools
+import os
+import sys
+import yaml
+
+
+(IN_FILE, OUT_FILE) = ('endpoint_data.yaml', 'endpoint_map.yaml')
+
+SUBST = (SUBST_IP_ADDRESS, SUBST_CLOUDNAME) = ('IP_ADDRESS', 'CLOUDNAME')
+PARAMS = (PARAM_CLOUDNAME, PARAM_ENDPOINTMAP) = ('CloudName', 'EndpointMap')
+FIELDS = (F_PORT, F_PROTOCOL, F_HOST) = ('port', 'protocol', 'host')
+
+ENDPOINT_TYPES = frozenset(['Internal', 'Public', 'Admin'])
+
+
+def get_file(default_fn, override=None, writable=False):
+ if override == '-':
+ if writable:
+ return sys.stdout
+ else:
+ return sys.stdin
+
+ if override is not None:
+ filename = override
+ else:
+ filename = os.path.join(os.path.dirname(__file__), default_fn)
+
+ return open(filename, 'w' if writable else 'r')
+
+
+def load_endpoint_data(infile=None):
+ with get_file(IN_FILE, infile) as f:
+ return yaml.safe_load(f)
+
+
+def vip_param_name(endpoint_type_defn):
+ return endpoint_type_defn['vip_param'] + 'VirtualIP'
+
+
+def vip_param_names(config):
+ def ep_types(svc):
+ return (v for k, v in svc.items() if k in ENDPOINT_TYPES or not k)
+
+ return set(vip_param_name(defn)
+ for svc in config.values() for defn in ep_types(svc))
+
+
+def endpoint_map_default(config):
+ def map_item(ep_name, ep_type, svc):
+ values = collections.OrderedDict([
+ (F_PROTOCOL, svc.get(F_PROTOCOL, 'http')),
+ (F_PORT, str(svc[ep_type].get(F_PORT, svc[F_PORT]))),
+ (F_HOST, SUBST_IP_ADDRESS),
+ ])
+ return ep_name + ep_type, values
+
+ return collections.OrderedDict(map_item(ep_name, ep_type, svc)
+ for ep_name, svc in sorted(config.items())
+ for ep_type in sorted(set(svc) &
+ ENDPOINT_TYPES))
+
+
+def make_parameter(ptype, default, description=None):
+ param = collections.OrderedDict([('type', ptype), ('default', default)])
+ if description is not None:
+ param['description'] = description
+ return param
+
+
+def template_parameters(config):
+ params = collections.OrderedDict((n, make_parameter('string', ''))
+ for n in sorted(vip_param_names(config)))
+
+ params[PARAM_ENDPOINTMAP] = make_parameter('json',
+ endpoint_map_default(config),
+ 'Mapping of service endpoint '
+ '-> protocol. Typically set '
+ 'via parameter_defaults in the '
+ 'resource registry.')
+
+ params[PARAM_CLOUDNAME] = make_parameter('string',
+ 'overcloud',
+ 'The DNS name of this cloud. '
+ 'e.g. ci-overcloud.tripleo.org')
+ return params
+
+
+def template_output_definition(endpoint_name,
+ endpoint_variant,
+ endpoint_type,
+ vip_param,
+ uri_suffix=None,
+ name_override=None):
+ def extract_field(field):
+ assert field in FIELDS
+ return {'get_param': ['EndpointMap',
+ endpoint_name + endpoint_type,
+ copy.copy(field)]}
+
+ port = extract_field(F_PORT)
+ protocol = extract_field(F_PROTOCOL)
+ host = {
+ 'str_replace': collections.OrderedDict([
+ ('template', extract_field(F_HOST)),
+ ('params', {
+ SUBST_IP_ADDRESS: {'get_param': vip_param},
+ SUBST_CLOUDNAME: {'get_param': PARAM_CLOUDNAME},
+ })
+ ])
+ }
+ uri_fields = [protocol, '://', copy.deepcopy(host), ':', port]
+ uri_fields_suffix = (copy.deepcopy(uri_fields) +
+ ([uri_suffix] if uri_suffix is not None else []))
+
+ name = name_override if name_override is not None else (endpoint_name +
+ endpoint_variant +
+ endpoint_type)
+
+ return name, {
+ 'host': host,
+ 'port': extract_field('port'),
+ 'protocol': extract_field('protocol'),
+ 'uri': {
+ 'list_join': ['', uri_fields_suffix]
+ },
+ 'uri_no_suffix': {
+ 'list_join': ['', uri_fields]
+ },
+ }
+
+
+def template_endpoint_items(config):
+ def get_svc_endpoints(ep_name, svc):
+ for ep_type in set(svc) & ENDPOINT_TYPES:
+ defn = svc[ep_type]
+ for variant, suffix in defn.get('uri_suffixes',
+ {'': None}).items():
+ name_override = defn.get('names', {}).get(variant)
+ yield template_output_definition(ep_name, variant, ep_type,
+ vip_param_name(defn),
+ suffix,
+ name_override)
+
+ return itertools.chain.from_iterable(sorted(get_svc_endpoints(ep_name,
+ svc))
+ for (ep_name,
+ svc) in sorted(config.items()))
+
+
+def generate_endpoint_map_template(config):
+ return collections.OrderedDict([
+ ('heat_template_version', '2015-04-30'),
+ ('description', 'A map of OpenStack endpoints. Since the endpoints '
+ 'are URLs, we need to have brackets around IPv6 IP addresses. The '
+ 'inputs to these parameters come from net_ip_uri_map, which will '
+ 'include these brackets in IPv6 addresses.'),
+ ('parameters', template_parameters(config)),
+ ('outputs', {
+ 'endpoint_map': {
+ 'value':
+ collections.OrderedDict(template_endpoint_items(config))
+ }
+ }),
+ ])
+
+
+autogen_warning = """### DO NOT MODIFY THIS FILE
+### This file is automatically generated from endpoint_data.yaml
+### by the script build_endpoint_map.py
+
+"""
+
+
+class TemplateDumper(yaml.SafeDumper):
+ def represent_ordered_dict(self, data):
+ return self.represent_dict(data.items())
+
+
+TemplateDumper.add_representer(collections.OrderedDict,
+ TemplateDumper.represent_ordered_dict)
+
+
+def write_template(template, filename=None):
+ with get_file(OUT_FILE, filename, writable=True) as f:
+ f.write(autogen_warning)
+ yaml.dump(template, f, TemplateDumper, width=68)
+
+
+def read_template(template, filename=None):
+ with get_file(OUT_FILE, filename) as f:
+ return yaml.safe_load(f)
+
+
+def build_endpoint_map(output_filename=None, input_filename=None):
+ if output_filename is not None and output_filename == input_filename:
+ raise Exception('Cannot read from and write to the same file')
+ config = load_endpoint_data(input_filename)
+ template = generate_endpoint_map_template(config)
+ write_template(template, output_filename)
+
+
+def check_up_to_date(output_filename=None, input_filename=None):
+ if output_filename is not None and output_filename == input_filename:
+ raise Exception('Input and output filenames must be different')
+ config = load_endpoint_data(input_filename)
+ template = generate_endpoint_map_template(config)
+ existing_template = read_template(output_filename)
+ return existing_template == template
+
+
+def get_options():
+ from optparse import OptionParser
+
+ parser = OptionParser('usage: %prog'
+ ' [-i INPUT_FILE] [-o OUTPUT_FILE] [--check]',
+ description=__doc__)
+ parser.add_option('-i', '--input', dest='input_file', action='store',
+ default=None,
+ help='Specify a different endpoint data file')
+ parser.add_option('-o', '--output', dest='output_file', action='store',
+ default=None,
+ help='Specify a different endpoint map template file')
+ parser.add_option('-c', '--check', dest='check', action='store_true',
+ default=False, help='Check that the output file is '
+ 'up to date with the data')
+ parser.add_option('-d', '--debug', dest='debug', action='store_true',
+ default=False, help='Print stack traces on error')
+
+ return parser.parse_args()
+
+
+def main():
+ options, args = get_options()
+ if args:
+ print('Warning: ignoring positional args: %s' % ' '.join(args),
+ file=sys.stderr)
+
+ try:
+ if options.check:
+ if not check_up_to_date(options.output_file, options.input_file):
+ print('EndpointMap template does not match input data',
+ file=sys.stderr)
+ sys.exit(2)
+ else:
+ build_endpoint_map(options.output_file, options.input_file)
+ except Exception as exc:
+ if options.debug:
+ raise
+ print('%s: %s' % (type(exc).__name__, str(exc)), file=sys.stderr)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/network/endpoints/endpoint.yaml b/network/endpoints/endpoint.yaml
deleted file mode 100644
index 6246cfdd..00000000
--- a/network/endpoints/endpoint.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
- OpenStack Endpoint
-
-parameters:
- EndpointName:
- type: string
- description: The name of the Endpoint being evaluated
- EndpointMap:
- type: json
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- IP:
- type: string
- description: The IP address of the Neutron Port that the endpoint is attached to
- UriSuffix:
- type: string
- default: ''
- description: A suffix attached to the URL
- CloudName:
- type: string
- default: ''
- description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
-
-outputs:
- endpoint:
- description: >
- A Hash containing a mapping of service endpoints to ports, protocols, uris
- assigned IPs, and hostnames for a specific endpoint
- value:
- port: {get_param: [EndpointMap, {get_param: EndpointName }, port] }
- protocol: {get_param: [EndpointMap, {get_param: EndpointName }, protocol] }
- ip: {get_param: IP}
- host:
- str_replace:
- template: {get_param: [EndpointMap, {get_param: EndpointName }, host]}
- params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName}}
- uri:
- list_join:
- - ''
- - - {get_param: [EndpointMap, {get_param: EndpointName }, protocol] }
- - '://'
- - str_replace:
- template: {get_param: [EndpointMap, {get_param: EndpointName }, host]}
- params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName }}
- - ':'
- - {get_param: [EndpointMap, {get_param: EndpointName }, port] }
- - {get_param: UriSuffix }
- uri_no_suffix:
- list_join:
- - ''
- - - {get_param: [EndpointMap, {get_param: EndpointName }, protocol] }
- - '://'
- - str_replace:
- template: {get_param: [EndpointMap, {get_param: EndpointName }, host]}
- params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName} }
- - ':'
- - {get_param: [EndpointMap, {get_param: EndpointName }, port] }
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
new file mode 100644
index 00000000..bd4e2281
--- /dev/null
+++ b/network/endpoints/endpoint_data.yaml
@@ -0,0 +1,192 @@
+# Data in this file is used to generate the endpoint_map.yaml template.
+# Run the script build_endpoint_map.py to regenerate the file.
+
+Aodh:
+ Internal:
+ vip_param: AodhApi
+ Public:
+ vip_param: Public
+ Admin:
+ vip_param: AodhApi
+ port: 8042
+
+Ceilometer:
+ Internal:
+ vip_param: CeilometerApi
+ Public:
+ vip_param: Public
+ Admin:
+ vip_param: CeilometerApi
+ port: 8777
+
+Gnocchi:
+ Internal:
+ vip_param: GnocchiApi
+ Public:
+ vip_param: Public
+ Admin:
+ vip_param: GnocchiApi
+ port: 8041
+
+Cinder:
+ Internal:
+ vip_param: CinderApi
+ uri_suffixes:
+ '': /v1/%(tenant_id)s
+ V2: /v2/%(tenant_id)s
+ Public:
+ vip_param: Public
+ uri_suffixes:
+ '': /v1/%(tenant_id)s
+ V2: /v2/%(tenant_id)s
+ Admin:
+ vip_param: CinderApi
+ uri_suffixes:
+ '': /v1/%(tenant_id)s
+ V2: /v2/%(tenant_id)s
+ port: 8776
+
+Glance:
+ Internal:
+ vip_param: GlanceApi
+ Public:
+ vip_param: Public
+ Admin:
+ vip_param: GlanceApi
+ port: 9292
+
+GlanceRegistry:
+ Internal:
+ vip_param: GlanceRegistry
+ port: 9191
+
+Mysql:
+ '':
+ vip_param: Mysql
+
+Heat:
+ Internal:
+ vip_param: HeatApi
+ uri_suffixes:
+ '': /v1/%(tenant_id)s
+ Public:
+ vip_param: Public
+ uri_suffixes:
+ '': /v1/%(tenant_id)s
+ Admin:
+ vip_param: HeatApi
+ uri_suffixes:
+ '': /v1/%(tenant_id)s
+ port: 8004
+
+Horizon:
+ Public:
+ vip_param: Public
+ uri_suffixes:
+ '': /dashboard
+ port: 80
+
+# TODO(ayoung): V3 is a temporary fix. Endpoints should be versionless.
+# Required for https://bugs.launchpad.net/puppet-nova/+bug/1542486
+Keystone:
+ Internal:
+ vip_param: KeystonePublicApi
+ uri_suffixes:
+ '': /v2.0
+ EC2: /v2.0/ec2tokens
+ V3: /v3
+ names:
+ EC2: KeystoneEC2
+ Public:
+ vip_param: Public
+ uri_suffixes:
+ '': /v2.0
+ V3: /v3
+ Admin:
+ vip_param: KeystoneAdminApi
+ uri_suffixes:
+ '': /v2.0
+ V3: /v3
+ port: 35357
+ port: 5000
+
+Neutron:
+ Internal:
+ vip_param: NeutronApi
+ Public:
+ vip_param: Public
+ Admin:
+ vip_param: NeutronApi
+ port: 9696
+
+Nova:
+ Internal:
+ vip_param: NovaApi
+ uri_suffixes:
+ '': /v2.1/%(tenant_id)s
+ Public:
+ vip_param: Public
+ uri_suffixes:
+ '': /v2.1/%(tenant_id)s
+ Admin:
+ vip_param: NovaApi
+ uri_suffixes:
+ '': /v2.1/%(tenant_id)s
+ port: 8774
+
+NovaEC2:
+ Internal:
+ vip_param: NovaApi
+ uri_suffixes:
+ '': /services/Cloud
+ Public:
+ vip_param: Public
+ uri_suffixes:
+ '': /services/Cloud
+ Admin:
+ vip_param: NovaApi
+ uri_suffixes:
+ '': /services/Admin
+ port: 8773
+
+NovaVNCProxy:
+ Internal:
+ vip_param: NovaApi
+ Public:
+ vip_param: Public
+ Admin:
+ vip_param: NovaApi
+ port: 6080
+
+Swift:
+ Internal:
+ vip_param: SwiftProxy
+ uri_suffixes:
+ '': /v1/AUTH_%(tenant_id)s
+ S3:
+ Public:
+ vip_param: Public
+ uri_suffixes:
+ '': /v1/AUTH_%(tenant_id)s
+ S3:
+ Admin:
+ vip_param: SwiftProxy
+ uri_suffixes:
+ '':
+ S3:
+ port: 8080
+
+Sahara:
+ Internal:
+ vip_param: SaharaApi
+ uri_suffixes:
+ '': /v1.1/%(tenant_id)s
+ Public:
+ vip_param: SaharaApi
+ uri_suffixes:
+ '': /v1.1/%(tenant_id)s
+ Admin:
+ vip_param: SaharaApi
+ uri_suffixes:
+ '': /v1.1/%(tenant_id)s
+ port: 8386
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index f6063c0e..f9a8b83c 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -1,484 +1,2042 @@
-heat_template_version: 2015-04-30
-
-description: >
- A Map of OpenStack Endpoints
+### DO NOT MODIFY THIS FILE
+### This file is automatically generated from endpoint_data.yaml
+### by the script build_endpoint_map.py
+heat_template_version: '2015-04-30'
+description: A map of OpenStack endpoints. Since the endpoints are URLs,
+ we need to have brackets around IPv6 IP addresses. The inputs to these
+ parameters come from net_ip_uri_map, which will include these brackets
+ in IPv6 addresses.
parameters:
- CeilometerApiVirtualIP:
- type: string
- default: ''
- CinderApiVirtualIP:
- type: string
- default: ''
- GlanceApiVirtualIP:
- type: string
- default: ''
- GlanceRegistryVirtualIP:
- type: string
- default: ''
- HeatApiVirtualIP:
- type: string
- default: ''
- KeystoneAdminApiVirtualIP:
- type: string
- default: ''
- KeystonePublicApiVirtualIP:
- type: string
- default: ''
- MysqlVirtualIP:
- type: string
- default: ''
- NeutronApiVirtualIP:
- type: string
- default: ''
- NovaApiVirtualIP:
- type: string
- default: ''
- PublicVirtualIP:
- type: string
- default: ''
- SwiftProxyVirtualIP:
- type: string
- default: ''
- SaharaApiVirtualIP:
- type: string
- default: ''
+ AodhApiVirtualIP: {type: string, default: ''}
+ CeilometerApiVirtualIP: {type: string, default: ''}
+ CinderApiVirtualIP: {type: string, default: ''}
+ GlanceApiVirtualIP: {type: string, default: ''}
+ GlanceRegistryVirtualIP: {type: string, default: ''}
+ GnocchiApiVirtualIP: {type: string, default: ''}
+ HeatApiVirtualIP: {type: string, default: ''}
+ KeystoneAdminApiVirtualIP: {type: string, default: ''}
+ KeystonePublicApiVirtualIP: {type: string, default: ''}
+ MysqlVirtualIP: {type: string, default: ''}
+ NeutronApiVirtualIP: {type: string, default: ''}
+ NovaApiVirtualIP: {type: string, default: ''}
+ PublicVirtualIP: {type: string, default: ''}
+ SaharaApiVirtualIP: {type: string, default: ''}
+ SwiftProxyVirtualIP: {type: string, default: ''}
EndpointMap:
type: json
default:
- CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
- CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
- CeilometerPublic: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
- CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
- CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
- CinderPublic: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
- GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
- GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
- GlancePublic: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
- GlanceRegistryAdmin: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
- GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
- GlanceRegistryPublic: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
- HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
- HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
- HeatPublic: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
- HorizonPublic: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
- KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
- KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
- KeystonePublic: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
- NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
- NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
- NeutronPublic: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
- NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
- NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
- NovaPublic: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
- NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
- NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
- NovaEC2Public: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
- SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- SwiftPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
- SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
- SaharaPublic: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ AodhAdmin: {protocol: http, port: '8042', host: IP_ADDRESS}
+ AodhInternal: {protocol: http, port: '8042', host: IP_ADDRESS}
+ AodhPublic: {protocol: http, port: '8042', host: IP_ADDRESS}
+ CeilometerAdmin: {protocol: http, port: '8777', host: IP_ADDRESS}
+ CeilometerInternal: {protocol: http, port: '8777', host: IP_ADDRESS}
+ CeilometerPublic: {protocol: http, port: '8777', host: IP_ADDRESS}
+ CinderAdmin: {protocol: http, port: '8776', host: IP_ADDRESS}
+ CinderInternal: {protocol: http, port: '8776', host: IP_ADDRESS}
+ CinderPublic: {protocol: http, port: '8776', host: IP_ADDRESS}
+ GlanceAdmin: {protocol: http, port: '9292', host: IP_ADDRESS}
+ GlanceInternal: {protocol: http, port: '9292', host: IP_ADDRESS}
+ GlancePublic: {protocol: http, port: '9292', host: IP_ADDRESS}
+ GlanceRegistryInternal: {protocol: http, port: '9191', host: IP_ADDRESS}
+ GnocchiAdmin: {protocol: http, port: '8041', host: IP_ADDRESS}
+ GnocchiInternal: {protocol: http, port: '8041', host: IP_ADDRESS}
+ GnocchiPublic: {protocol: http, port: '8041', host: IP_ADDRESS}
+ HeatAdmin: {protocol: http, port: '8004', host: IP_ADDRESS}
+ HeatInternal: {protocol: http, port: '8004', host: IP_ADDRESS}
+ HeatPublic: {protocol: http, port: '8004', host: IP_ADDRESS}
+ HorizonPublic: {protocol: http, port: '80', host: IP_ADDRESS}
+ KeystoneAdmin: {protocol: http, port: '35357', host: IP_ADDRESS}
+ KeystoneInternal: {protocol: http, port: '5000', host: IP_ADDRESS}
+ KeystonePublic: {protocol: http, port: '5000', host: IP_ADDRESS}
+ NeutronAdmin: {protocol: http, port: '9696', host: IP_ADDRESS}
+ NeutronInternal: {protocol: http, port: '9696', host: IP_ADDRESS}
+ NeutronPublic: {protocol: http, port: '9696', host: IP_ADDRESS}
+ NovaAdmin: {protocol: http, port: '8774', host: IP_ADDRESS}
+ NovaInternal: {protocol: http, port: '8774', host: IP_ADDRESS}
+ NovaPublic: {protocol: http, port: '8774', host: IP_ADDRESS}
+ NovaEC2Admin: {protocol: http, port: '8773', host: IP_ADDRESS}
+ NovaEC2Internal: {protocol: http, port: '8773', host: IP_ADDRESS}
+ NovaEC2Public: {protocol: http, port: '8773', host: IP_ADDRESS}
+ NovaVNCProxyAdmin: {protocol: http, port: '6080', host: IP_ADDRESS}
+ NovaVNCProxyInternal: {protocol: http, port: '6080', host: IP_ADDRESS}
+ NovaVNCProxyPublic: {protocol: http, port: '6080', host: IP_ADDRESS}
+ SaharaAdmin: {protocol: http, port: '8386', host: IP_ADDRESS}
+ SaharaInternal: {protocol: http, port: '8386', host: IP_ADDRESS}
+ SaharaPublic: {protocol: http, port: '8386', host: IP_ADDRESS}
+ SwiftAdmin: {protocol: http, port: '8080', host: IP_ADDRESS}
+ SwiftInternal: {protocol: http, port: '8080', host: IP_ADDRESS}
+ SwiftPublic: {protocol: http, port: '8080', host: IP_ADDRESS}
description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- CloudName:
- type: string
- default: overcloud
- description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
-
-resources:
-
- CeilometerInternal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: CeilometerInternal
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: CeilometerApiVirtualIP}
- CeilometerPublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: CeilometerPublic
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: PublicVirtualIP}
- CeilometerAdmin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: CeilometerAdmin
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: CeilometerApiVirtualIP}
-
- CinderInternal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: CinderInternal
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: CinderApiVirtualIP}
- UriSuffix: '/v1/%(tenant_id)s'
- CinderPublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: CinderPublic
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: PublicVirtualIP}
- UriSuffix: '/v1/%(tenant_id)s'
- CinderAdmin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: CinderAdmin
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: CinderApiVirtualIP}
- UriSuffix: '/v1/%(tenant_id)s'
-
- CinderV2Internal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: CinderInternal
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: CinderApiVirtualIP}
- UriSuffix: '/v2/%(tenant_id)s'
- CinderV2Public:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: CinderPublic
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: PublicVirtualIP}
- UriSuffix: '/v2/%(tenant_id)s'
- CinderV2Admin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: CinderAdmin
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: CinderApiVirtualIP}
- UriSuffix: '/v2/%(tenant_id)s'
-
- GlanceInternal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: GlanceInternal
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: GlanceApiVirtualIP}
- GlancePublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: GlancePublic
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: PublicVirtualIP}
- GlanceAdmin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: GlanceAdmin
- EndpointMap: { get_param: EndpointMap }
- CloudName: {get_param: CloudName}
- IP: {get_param: GlanceApiVirtualIP}
- GlanceRegistryInternal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: GlanceInternal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: GlanceRegistryVirtualIP}
- GlanceRegistryPublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: GlancePublic
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: PublicVirtualIP}
- GlanceRegistryAdmin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: GlanceAdmin
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: GlanceRegistryVirtualIP}
-
- HeatInternal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: HeatInternal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: HeatApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v1/%(tenant_id)s'
- HeatPublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: HeatPublic
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: PublicVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v1/%(tenant_id)s'
- HeatAdmin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: HeatAdmin
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: HeatApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v1/%(tenant_id)s'
-
- HorizonPublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: HeatPublic
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: PublicVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/dashboard'
-
- KeystoneInternal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: KeystoneInternal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: KeystonePublicApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v2.0'
- KeystonePublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: KeystonePublic
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: PublicVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v2.0'
- KeystoneAdmin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: KeystoneAdmin
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: KeystoneAdminApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v2.0'
- KeystoneEC2:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: KeystoneInternal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: KeystonePublicApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v2.0/ec2tokens'
-
- NeutronInternal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NeutronInternal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: NeutronApiVirtualIP}
- CloudName: {get_param: CloudName}
- NeutronPublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NeutronPublic
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: PublicVirtualIP}
- CloudName: {get_param: CloudName}
- NeutronAdmin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NeutronAdmin
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: NeutronApiVirtualIP}
- CloudName: {get_param: CloudName}
-
- NovaInternal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NovaInternal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: NovaApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v2/%(tenant_id)s'
- NovaPublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NovaPublic
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: PublicVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v2/%(tenant_id)s'
- NovaAdmin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NovaAdmin
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: NovaApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v2/%(tenant_id)s'
- NovaV3Internal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NovaInternal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: NovaApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v3'
- NovaV3Public:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NovaPublic
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: PublicVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v3'
- NovaV3Admin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NovaAdmin
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: NovaApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v3'
-
- NovaEC2Internal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NovaEC2Internal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: NovaApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/services/Cloud'
- NovaEC2Public:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NovaEC2Public
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: PublicVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/services/Cloud'
- NovaEC2Admin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: NovaEC2Admin
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: NovaApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/services/Admin'
-
- SwiftInternal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: SwiftInternal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: SwiftProxyVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v1/AUTH_%(tenant_id)s'
- SwiftPublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: SwiftPublic
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: PublicVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v1/AUTH_%(tenant_id)s'
- SwiftAdmin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: SwiftAdmin
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: SwiftProxyVirtualIP}
- CloudName: {get_param: CloudName}
- # No Suffix for the Admin interface
- SwiftS3Internal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: SwiftInternal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: SwiftProxyVirtualIP}
- CloudName: {get_param: CloudName}
- SwiftS3Public:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: SwiftPublic
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: PublicVirtualIP}
- CloudName: {get_param: CloudName}
- SwiftS3Admin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: SwiftAdmin
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: SwiftProxyVirtualIP}
- CloudName: {get_param: CloudName}
-
- SaharaInternal:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: SaharaInternal
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: SaharaApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v1.1/%(tenant_id)s'
- SaharaPublic:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: SaharaPublic
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: SaharaApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v1.1/%(tenant_id)s'
- SaharaAdmin:
- type: OS::TripleO::Endpoint
- properties:
- EndpointName: SaharaAdmin
- EndpointMap: { get_param: EndpointMap }
- IP: {get_param: SaharaApiVirtualIP}
- CloudName: {get_param: CloudName}
- UriSuffix: '/v1.1/%(tenant_id)s'
-
+ via parameter_defaults in the resource registry.
+ CloudName: {type: string, default: overcloud, description: The DNS name
+ of this cloud. e.g. ci-overcloud.tripleo.org}
outputs:
endpoint_map:
value:
- CeilometerInternal: {get_attr: [ CeilometerInternal, endpoint] }
- CeilometerPublic: {get_attr: [ CeilometerPublic, endpoint] }
- CeilometerAdmin: {get_attr: [ CeilometerAdmin, endpoint] }
- CinderInternal: {get_attr: [ CinderInternal, endpoint] }
- CinderPublic: {get_attr: [ CinderPublic, endpoint] }
- CinderAdmin: {get_attr: [ CinderAdmin, endpoint] }
- CinderV2Internal: {get_attr: [ CinderV2Internal, endpoint] }
- CinderV2Public: {get_attr: [ CinderV2Public, endpoint] }
- CinderV2Admin: {get_attr: [ CinderV2Admin, endpoint] }
- GlanceInternal: {get_attr: [ GlanceInternal, endpoint] }
- GlancePublic: {get_attr: [ GlancePublic, endpoint] }
- GlanceAdmin: {get_attr: [ GlanceAdmin, endpoint] }
- GlanceRegistryInternal: {get_attr: [ GlanceRegistryInternal, endpoint] }
- GlanceRegistryPublic: {get_attr: [ GlanceRegistryPublic, endpoint] }
- GlanceRegistryAdmin: {get_attr: [ GlanceRegistryAdmin, endpoint] }
- HeatInternal: {get_attr: [ HeatInternal, endpoint] }
- HeatPublic: {get_attr: [ HeatPublic, endpoint] }
- HeatAdmin: {get_attr: [ HeatAdmin, endpoint] }
- HorizonPublic: {get_attr: [ HorizonPublic, endpoint] }
- KeystoneInternal: {get_attr: [ KeystoneInternal, endpoint] }
- KeystonePublic: {get_attr: [ KeystonePublic, endpoint] }
- KeystoneAdmin: {get_attr: [ KeystoneAdmin, endpoint] }
- KeystoneEC2: {get_attr: [ KeystoneEC2, endpoint] }
- NeutronInternal: {get_attr: [ NeutronInternal, endpoint] }
- NeutronPublic: {get_attr: [ NeutronPublic, endpoint] }
- NeutronAdmin: {get_attr: [ NeutronAdmin, endpoint] }
- NovaInternal: {get_attr: [ NovaInternal, endpoint] }
- NovaPublic: {get_attr: [ NovaPublic, endpoint] }
- NovaAdmin: {get_attr: [ NovaAdmin, endpoint] }
- NovaV3Internal: {get_attr: [ NovaV3Internal, endpoint] }
- NovaV3Public: {get_attr: [ NovaV3Public, endpoint] }
- NovaV3Admin: {get_attr: [ NovaV3Admin, endpoint] }
- NovaEC2Internal: {get_attr: [ NovaEC2Internal, endpoint] }
- NovaEC2Public: {get_attr: [ NovaEC2Public, endpoint] }
- NovaEC2Admin: {get_attr: [ NovaEC2Admin, endpoint] }
- SwiftInternal: {get_attr: [ SwiftInternal, endpoint] }
- SwiftPublic: {get_attr: [ SwiftPublic, endpoint] }
- SwiftAdmin: {get_attr: [ SwiftAdmin, endpoint] }
- SwiftS3Internal: {get_attr: [ SwiftS3Internal, endpoint] }
- SwiftS3Public: {get_attr: [ SwiftS3Public, endpoint] }
- SwiftS3Admin: {get_attr: [ SwiftS3Admin, endpoint] }
- SaharaInternal: {get_attr: [ SaharaInternal, endpoint] }
- SaharaPublic: {get_attr: [ SaharaPublic, endpoint] }
- SaharaAdmin: {get_attr: [ SaharaAdmin, endpoint] } \ No newline at end of file
+ AodhAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, AodhAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ port:
+ get_param: [EndpointMap, AodhAdmin, port]
+ protocol:
+ get_param: [EndpointMap, AodhAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhAdmin, port]
+ AodhInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, AodhInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ port:
+ get_param: [EndpointMap, AodhInternal, port]
+ protocol:
+ get_param: [EndpointMap, AodhInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhInternal, port]
+ AodhPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, AodhPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, AodhPublic, port]
+ protocol:
+ get_param: [EndpointMap, AodhPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhPublic, port]
+ CeilometerAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CeilometerAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CeilometerApiVirtualIP}
+ port:
+ get_param: [EndpointMap, CeilometerAdmin, port]
+ protocol:
+ get_param: [EndpointMap, CeilometerAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CeilometerAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CeilometerAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CeilometerApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CeilometerAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CeilometerAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CeilometerAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CeilometerApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CeilometerAdmin, port]
+ CeilometerInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CeilometerInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CeilometerApiVirtualIP}
+ port:
+ get_param: [EndpointMap, CeilometerInternal, port]
+ protocol:
+ get_param: [EndpointMap, CeilometerInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CeilometerInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CeilometerInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CeilometerApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CeilometerInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CeilometerInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CeilometerInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CeilometerApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CeilometerInternal, port]
+ CeilometerPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CeilometerPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, CeilometerPublic, port]
+ protocol:
+ get_param: [EndpointMap, CeilometerPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CeilometerPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CeilometerPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CeilometerPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CeilometerPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CeilometerPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CeilometerPublic, port]
+ CinderAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CinderAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ port:
+ get_param: [EndpointMap, CinderAdmin, port]
+ protocol:
+ get_param: [EndpointMap, CinderAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderAdmin, port]
+ - /v1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderAdmin, port]
+ CinderInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CinderInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ port:
+ get_param: [EndpointMap, CinderInternal, port]
+ protocol:
+ get_param: [EndpointMap, CinderInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderInternal, port]
+ - /v1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderInternal, port]
+ CinderPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CinderPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, CinderPublic, port]
+ protocol:
+ get_param: [EndpointMap, CinderPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderPublic, port]
+ - /v1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderPublic, port]
+ CinderV2Admin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CinderAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ port:
+ get_param: [EndpointMap, CinderAdmin, port]
+ protocol:
+ get_param: [EndpointMap, CinderAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderAdmin, port]
+ - /v2/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderAdmin, port]
+ CinderV2Internal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CinderInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ port:
+ get_param: [EndpointMap, CinderInternal, port]
+ protocol:
+ get_param: [EndpointMap, CinderInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderInternal, port]
+ - /v2/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: CinderApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderInternal, port]
+ CinderV2Public:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CinderPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, CinderPublic, port]
+ protocol:
+ get_param: [EndpointMap, CinderPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderPublic, port]
+ - /v2/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CinderPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CinderPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, CinderPublic, port]
+ GlanceAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GlanceApiVirtualIP}
+ port:
+ get_param: [EndpointMap, GlanceAdmin, port]
+ protocol:
+ get_param: [EndpointMap, GlanceAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GlanceApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GlanceAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GlanceApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GlanceAdmin, port]
+ GlanceInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlanceInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GlanceApiVirtualIP}
+ port:
+ get_param: [EndpointMap, GlanceInternal, port]
+ protocol:
+ get_param: [EndpointMap, GlanceInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GlanceApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GlanceInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GlanceApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GlanceInternal, port]
+ GlancePublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlancePublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, GlancePublic, port]
+ protocol:
+ get_param: [EndpointMap, GlancePublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlancePublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlancePublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GlancePublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlancePublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlancePublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GlancePublic, port]
+ GlanceRegistryInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlanceRegistryInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GlanceRegistryVirtualIP}
+ port:
+ get_param: [EndpointMap, GlanceRegistryInternal, port]
+ protocol:
+ get_param: [EndpointMap, GlanceRegistryInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceRegistryInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceRegistryInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GlanceRegistryVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GlanceRegistryInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceRegistryInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceRegistryInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GlanceRegistryVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GlanceRegistryInternal, port]
+ GnocchiAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GnocchiAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GnocchiApiVirtualIP}
+ port:
+ get_param: [EndpointMap, GnocchiAdmin, port]
+ protocol:
+ get_param: [EndpointMap, GnocchiAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GnocchiAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GnocchiAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GnocchiApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GnocchiAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GnocchiAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GnocchiAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GnocchiApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GnocchiAdmin, port]
+ GnocchiInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GnocchiInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GnocchiApiVirtualIP}
+ port:
+ get_param: [EndpointMap, GnocchiInternal, port]
+ protocol:
+ get_param: [EndpointMap, GnocchiInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GnocchiInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GnocchiInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GnocchiApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GnocchiInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GnocchiInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GnocchiInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: GnocchiApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GnocchiInternal, port]
+ GnocchiPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GnocchiPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, GnocchiPublic, port]
+ protocol:
+ get_param: [EndpointMap, GnocchiPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GnocchiPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GnocchiPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GnocchiPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GnocchiPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GnocchiPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, GnocchiPublic, port]
+ HeatAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, HeatAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ port:
+ get_param: [EndpointMap, HeatAdmin, port]
+ protocol:
+ get_param: [EndpointMap, HeatAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatAdmin, port]
+ - /v1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatAdmin, port]
+ HeatInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, HeatInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ port:
+ get_param: [EndpointMap, HeatInternal, port]
+ protocol:
+ get_param: [EndpointMap, HeatInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatInternal, port]
+ - /v1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: HeatApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatInternal, port]
+ HeatPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, HeatPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, HeatPublic, port]
+ protocol:
+ get_param: [EndpointMap, HeatPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatPublic, port]
+ - /v1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HeatPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HeatPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HeatPublic, port]
+ HorizonPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, HorizonPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, HorizonPublic, port]
+ protocol:
+ get_param: [EndpointMap, HorizonPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HorizonPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HorizonPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HorizonPublic, port]
+ - /dashboard
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, HorizonPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, HorizonPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, HorizonPublic, port]
+ KeystoneAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP}
+ port:
+ get_param: [EndpointMap, KeystoneAdmin, port]
+ protocol:
+ get_param: [EndpointMap, KeystoneAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystoneAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystoneAdmin, port]
+ - /v2.0
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystoneAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystoneAdmin, port]
+ KeystoneEC2:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP}
+ port:
+ get_param: [EndpointMap, KeystoneInternal, port]
+ protocol:
+ get_param: [EndpointMap, KeystoneInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystoneInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystoneInternal, port]
+ - /v2.0/ec2tokens
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystoneInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystoneInternal, port]
+ KeystoneInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP}
+ port:
+ get_param: [EndpointMap, KeystoneInternal, port]
+ protocol:
+ get_param: [EndpointMap, KeystoneInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystoneInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystoneInternal, port]
+ - /v2.0
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystoneInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystoneInternal, port]
+ KeystonePublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, KeystonePublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, KeystonePublic, port]
+ protocol:
+ get_param: [EndpointMap, KeystonePublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystonePublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystonePublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystonePublic, port]
+ - /v2.0
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystonePublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystonePublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystonePublic, port]
+ KeystoneV3Admin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP}
+ port:
+ get_param: [EndpointMap, KeystoneAdmin, port]
+ protocol:
+ get_param: [EndpointMap, KeystoneAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystoneAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystoneAdmin, port]
+ - /v3
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystoneAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystoneAdmin, port]
+ KeystoneV3Internal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP}
+ port:
+ get_param: [EndpointMap, KeystoneInternal, port]
+ protocol:
+ get_param: [EndpointMap, KeystoneInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystoneInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystoneInternal, port]
+ - /v3
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystoneInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystoneInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystoneInternal, port]
+ KeystoneV3Public:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, KeystonePublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, KeystonePublic, port]
+ protocol:
+ get_param: [EndpointMap, KeystonePublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystonePublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystonePublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystonePublic, port]
+ - /v3
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, KeystonePublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, KeystonePublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, KeystonePublic, port]
+ NeutronAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NeutronAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NeutronApiVirtualIP}
+ port:
+ get_param: [EndpointMap, NeutronAdmin, port]
+ protocol:
+ get_param: [EndpointMap, NeutronAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NeutronAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NeutronAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NeutronApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NeutronAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NeutronAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NeutronAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NeutronApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NeutronAdmin, port]
+ NeutronInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NeutronInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NeutronApiVirtualIP}
+ port:
+ get_param: [EndpointMap, NeutronInternal, port]
+ protocol:
+ get_param: [EndpointMap, NeutronInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NeutronInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NeutronInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NeutronApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NeutronInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NeutronInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NeutronInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NeutronApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NeutronInternal, port]
+ NeutronPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NeutronPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, NeutronPublic, port]
+ protocol:
+ get_param: [EndpointMap, NeutronPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NeutronPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NeutronPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NeutronPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NeutronPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NeutronPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NeutronPublic, port]
+ NovaAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ port:
+ get_param: [EndpointMap, NovaAdmin, port]
+ protocol:
+ get_param: [EndpointMap, NovaAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaAdmin, port]
+ - /v2.1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaAdmin, port]
+ NovaInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ port:
+ get_param: [EndpointMap, NovaInternal, port]
+ protocol:
+ get_param: [EndpointMap, NovaInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaInternal, port]
+ - /v2.1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaInternal, port]
+ NovaPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, NovaPublic, port]
+ protocol:
+ get_param: [EndpointMap, NovaPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaPublic, port]
+ - /v2.1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaPublic, port]
+ NovaEC2Admin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaEC2Admin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ port:
+ get_param: [EndpointMap, NovaEC2Admin, port]
+ protocol:
+ get_param: [EndpointMap, NovaEC2Admin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaEC2Admin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaEC2Admin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaEC2Admin, port]
+ - /services/Admin
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaEC2Admin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaEC2Admin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaEC2Admin, port]
+ NovaEC2Internal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaEC2Internal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ port:
+ get_param: [EndpointMap, NovaEC2Internal, port]
+ protocol:
+ get_param: [EndpointMap, NovaEC2Internal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaEC2Internal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaEC2Internal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaEC2Internal, port]
+ - /services/Cloud
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaEC2Internal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaEC2Internal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaEC2Internal, port]
+ NovaEC2Public:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaEC2Public, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, NovaEC2Public, port]
+ protocol:
+ get_param: [EndpointMap, NovaEC2Public, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaEC2Public, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaEC2Public, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaEC2Public, port]
+ - /services/Cloud
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaEC2Public, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaEC2Public, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaEC2Public, port]
+ NovaVNCProxyAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaVNCProxyAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ port:
+ get_param: [EndpointMap, NovaVNCProxyAdmin, port]
+ protocol:
+ get_param: [EndpointMap, NovaVNCProxyAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaVNCProxyAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaVNCProxyAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaVNCProxyAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaVNCProxyAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaVNCProxyAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaVNCProxyAdmin, port]
+ NovaVNCProxyInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaVNCProxyInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ port:
+ get_param: [EndpointMap, NovaVNCProxyInternal, port]
+ protocol:
+ get_param: [EndpointMap, NovaVNCProxyInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaVNCProxyInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaVNCProxyInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaVNCProxyInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaVNCProxyInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaVNCProxyInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: NovaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaVNCProxyInternal, port]
+ NovaVNCProxyPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaVNCProxyPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, NovaVNCProxyPublic, port]
+ protocol:
+ get_param: [EndpointMap, NovaVNCProxyPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaVNCProxyPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaVNCProxyPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaVNCProxyPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaVNCProxyPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaVNCProxyPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, NovaVNCProxyPublic, port]
+ SaharaAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, SaharaAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SaharaApiVirtualIP}
+ port:
+ get_param: [EndpointMap, SaharaAdmin, port]
+ protocol:
+ get_param: [EndpointMap, SaharaAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SaharaAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SaharaAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SaharaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SaharaAdmin, port]
+ - /v1.1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SaharaAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SaharaAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SaharaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SaharaAdmin, port]
+ SaharaInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, SaharaInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SaharaApiVirtualIP}
+ port:
+ get_param: [EndpointMap, SaharaInternal, port]
+ protocol:
+ get_param: [EndpointMap, SaharaInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SaharaInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SaharaInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SaharaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SaharaInternal, port]
+ - /v1.1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SaharaInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SaharaInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SaharaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SaharaInternal, port]
+ SaharaPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, SaharaPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SaharaApiVirtualIP}
+ port:
+ get_param: [EndpointMap, SaharaPublic, port]
+ protocol:
+ get_param: [EndpointMap, SaharaPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SaharaPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SaharaPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SaharaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SaharaPublic, port]
+ - /v1.1/%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SaharaPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SaharaPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SaharaApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SaharaPublic, port]
+ SwiftAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, SwiftAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ port:
+ get_param: [EndpointMap, SwiftAdmin, port]
+ protocol:
+ get_param: [EndpointMap, SwiftAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftAdmin, port]
+ SwiftInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, SwiftInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ port:
+ get_param: [EndpointMap, SwiftInternal, port]
+ protocol:
+ get_param: [EndpointMap, SwiftInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftInternal, port]
+ - /v1/AUTH_%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftInternal, port]
+ SwiftPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, SwiftPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, SwiftPublic, port]
+ protocol:
+ get_param: [EndpointMap, SwiftPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftPublic, port]
+ - /v1/AUTH_%(tenant_id)s
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftPublic, port]
+ SwiftS3Admin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, SwiftAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ port:
+ get_param: [EndpointMap, SwiftAdmin, port]
+ protocol:
+ get_param: [EndpointMap, SwiftAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftAdmin, port]
+ SwiftS3Internal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, SwiftInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ port:
+ get_param: [EndpointMap, SwiftInternal, port]
+ protocol:
+ get_param: [EndpointMap, SwiftInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: SwiftProxyVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftInternal, port]
+ SwiftS3Public:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, SwiftPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, SwiftPublic, port]
+ protocol:
+ get_param: [EndpointMap, SwiftPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, SwiftPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, SwiftPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, SwiftPublic, port]
diff --git a/network/external_v6.yaml b/network/external_v6.yaml
new file mode 100644
index 00000000..3e120f24
--- /dev/null
+++ b/network/external_v6.yaml
@@ -0,0 +1,69 @@
+heat_template_version: 2015-04-30
+
+description: >
+ External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
+
+parameters:
+ # the defaults here work for static IP assignment (IPAM) only
+ ExternalNetCidr:
+ # OpenStack uses the EUI-64 address format, which requires a /64 prefix
+ default: '2001:db8:fd00:1000::/64'
+ description: Cidr for the external network.
+ type: string
+ ExternalNetValueSpecs:
+ default: {'provider:physical_network': 'external', 'provider:network_type': 'flat'}
+ description: Value specs for the external network.
+ type: json
+ ExternalNetAdminStateUp:
+ default: false
+ description: This admin state of of the network.
+ type: boolean
+ ExternalNetShared:
+ default: false
+ description: Whether this network is shared across all tenants.
+ type: boolean
+ ExternalNetName:
+ default: external
+ description: The name of the external network.
+ type: string
+ ExternalSubnetName:
+ default: external_subnet
+ description: The name of the external subnet in Neutron.
+ type: string
+ ExternalAllocationPools:
+ default: [{'start': '2001:db8:fd00:1000::10', 'end': '2001:db8:fd00:1000:ffff:ffff:ffff:fffe'}]
+ description: Ip allocation pool range for the external network.
+ type: json
+ IPv6AddressMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 address mode
+ type: string
+ IPv6RAMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 router advertisement mode
+ type: string
+
+resources:
+ ExternalNetwork:
+ type: OS::Neutron::Net
+ properties:
+ admin_state_up: {get_param: ExternalNetAdminStateUp}
+ name: {get_param: ExternalNetName}
+ shared: {get_param: ExternalNetShared}
+ value_specs: {get_param: ExternalNetValueSpecs}
+
+ ExternalSubnet:
+ type: OS::Neutron::Subnet
+ properties:
+ ip_version: 6
+ ipv6_address_mode: {get_param: IPv6AddressMode}
+ ipv6_ra_mode: {get_param: IPv6RAMode}
+ cidr: {get_param: ExternalNetCidr}
+ name: {get_param: ExternalSubnetName}
+ network: {get_resource: ExternalNetwork}
+ allocation_pools: {get_param: ExternalAllocationPools}
+
+outputs:
+ OS::stack_id:
+ description: Neutron external network
+ value: {get_resource: ExternalNetwork}
diff --git a/network/internal_api_v6.yaml b/network/internal_api_v6.yaml
new file mode 100644
index 00000000..68c14fbe
--- /dev/null
+++ b/network/internal_api_v6.yaml
@@ -0,0 +1,69 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Internal API network. Used for most APIs, Database, RPC.
+
+parameters:
+ # the defaults here work for static IP assignment (IPAM) only
+ InternalApiNetCidr:
+ # OpenStack uses the EUI-64 address format, which requires a /64 prefix
+ default: 'fd00:fd00:fd00:2000::/64'
+ description: Cidr for the internal API network.
+ type: string
+ InternalApiNetValueSpecs:
+ default: {'provider:physical_network': 'internal_api', 'provider:network_type': 'flat'}
+ description: Value specs for the internal API network.
+ type: json
+ InternalApiNetAdminStateUp:
+ default: false
+ description: This admin state of of the network.
+ type: boolean
+ InternalApiNetShared:
+ default: false
+ description: Whether this network is shared across all tenants.
+ type: boolean
+ InternalApiNetName:
+ default: internal_api
+ description: The name of the internal API network.
+ type: string
+ InternalApiSubnetName:
+ default: internal_api_subnet
+ description: The name of the internal API subnet in Neutron.
+ type: string
+ InternalApiAllocationPools:
+ default: [{'start': 'fd00:fd00:fd00:2000::10', 'end': 'fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe'}]
+ description: Ip allocation pool range for the internal API network.
+ type: json
+ IPv6AddressMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 address mode
+ type: string
+ IPv6RAMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 router advertisement mode
+ type: string
+
+resources:
+ InternalApiNetwork:
+ type: OS::Neutron::Net
+ properties:
+ admin_state_up: {get_param: InternalApiNetAdminStateUp}
+ name: {get_param: InternalApiNetName}
+ shared: {get_param: InternalApiNetShared}
+ value_specs: {get_param: InternalApiNetValueSpecs}
+
+ InternalApiSubnet:
+ type: OS::Neutron::Subnet
+ properties:
+ ip_version: 6
+ ipv6_address_mode: {get_param: IPv6AddressMode}
+ ipv6_ra_mode: {get_param: IPv6RAMode}
+ cidr: {get_param: InternalApiNetCidr}
+ name: {get_param: InternalApiSubnetName}
+ network: {get_resource: InternalApiNetwork}
+ allocation_pools: {get_param: InternalApiAllocationPools}
+
+outputs:
+ OS::stack_id:
+ description: Neutron internal network
+ value: {get_resource: InternalApiNetwork}
diff --git a/network/management.yaml b/network/management.yaml
index 9bfaafa2..1800b57a 100644
--- a/network/management.yaml
+++ b/network/management.yaml
@@ -16,7 +16,7 @@ parameters:
type: string
ManagementNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: The admin state of the network.
type: boolean
ManagementNetEnableDHCP:
default: false
diff --git a/network/noop.yaml b/network/noop.yaml
deleted file mode 100644
index 0963d2ce..00000000
--- a/network/noop.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: A stack which creates no network(s).
diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml
index 7a7043bd..5ac7d344 100644
--- a/network/ports/ctlplane_vip.yaml
+++ b/network/ports/ctlplane_vip.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Creates a port for a VIP on the undercloud ctlplane network.
@@ -45,13 +45,14 @@ outputs:
ip_address:
description: Virtual IP network IP
value: {get_attr: [VipPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: Virtual IP network IP (for compatibility with vip_v6.yaml)
+ value: {get_attr: [VipPort, fixed_ips, 0, ip_address]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the ctlplane network.
value:
list_join:
- ''
- - {get_attr: [VipPort, fixed_ips, 0, ip_address]}
- '/'
- - {get_attr: [VipPort, subnets, 0, cidr, -2]}
- - {get_attr: [VipPort, subnets, 0, cidr, -1]}
+ - {str_split: ['/', {get_attr: [VipPort, subnets, 0, cidr]}, 1]}
diff --git a/network/ports/external.yaml b/network/ports/external.yaml
index 7624eb9f..c4f815fb 100644
--- a/network/ports/external.yaml
+++ b/network/ports/external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Creates a port on the external network. The IP address will be chosen
@@ -48,13 +48,14 @@ outputs:
ip_address:
description: external network IP
value: {get_attr: [ExternalPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: external network IP (for compatibility with external_v6.yaml)
+ value: {get_attr: [ExternalPort, fixed_ips, 0, ip_address]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the external network IP
value:
list_join:
- ''
- - {get_attr: [ExternalPort, fixed_ips, 0, ip_address]}
- '/'
- - {get_attr: [ExternalPort, subnets, 0, cidr, -2]}
- - {get_attr: [ExternalPort, subnets, 0, cidr, -1]}
+ - {str_split: ['/', {get_attr: [ExternalPort, subnets, 0, cidr]}, 1]}
diff --git a/network/ports/external_from_pool.yaml b/network/ports/external_from_pool.yaml
index 8e9dc7c2..867176e3 100644
--- a/network/ports/external_from_pool.yaml
+++ b/network/ports/external_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Returns an IP from a network mapped list of IPs
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
@@ -33,13 +33,14 @@ outputs:
ip_address:
description: external network IP
value: {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: external network IP (for compatibility with IPv6)
+ value: {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the external network IP
value:
list_join:
- ''
- - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
- '/'
- - {get_param: [ExternalNetCidr, -2]}
- - {get_param: [ExternalNetCidr, -1]}
+ - {str_split: ['/', {get_param: ExternalNetCidr}, 1]}
diff --git a/network/ports/external_from_pool_v6.yaml b/network/ports/external_from_pool_v6.yaml
new file mode 100644
index 00000000..bf0c036d
--- /dev/null
+++ b/network/ports/external_from_pool_v6.yaml
@@ -0,0 +1,54 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs. This version is for IPv6
+ addresses. The ip_address_uri output will have brackets for use in URLs.
+
+parameters:
+ ExternalNetName:
+ description: Name of the external network
+ default: external
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ ExternalNetCidr:
+ default: '2001:db8:fd00:1000::/64'
+ description: Cidr for the external network.
+ type: string
+
+outputs:
+ ip_address:
+ description: external network IP
+ value: {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: external network IP (for compatibility with IPv6)
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the external network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {get_param: [ExternalNetCidr, -2]}
+ - {get_param: [ExternalNetCidr, -1]}
diff --git a/network/ports/external_v6.yaml b/network/ports/external_v6.yaml
new file mode 100644
index 00000000..522caaa0
--- /dev/null
+++ b/network/ports/external_v6.yaml
@@ -0,0 +1,68 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Creates a port on the external network. The IP address will be chosen
+ automatically if FixedIPs is empty.
+
+parameters:
+ ExternalNetName:
+ description: Name of the external neutron network
+ default: external
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ ControlPlaneNetwork: # Here for compatibility with ctlplane_vip.yaml
+ description: The name of the undercloud Neutron control plane
+ default: ctlplane
+ type: string
+ FixedIPs:
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
+
+resources:
+
+ ExternalPort:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: ExternalNetName}
+ name: {get_param: PortName}
+ fixed_ips: {get_param: FixedIPs}
+ replacement_policy: AUTO
+
+outputs:
+ ip_address:
+ description: external network IP
+ value: {get_attr: [ExternalPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: external network IP with brackets suitable for a URL
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_attr: [ExternalPort, fixed_ips, 0, ip_address]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the external network IP
+ value:
+ list_join:
+ - ''
+ - - {get_attr: [ExternalPort, fixed_ips, 0, ip_address]}
+ - '/'
+ - {get_attr: [ExternalPort, subnets, 0, cidr, -2]}
+ - {get_attr: [ExternalPort, subnets, 0, cidr, -1]}
diff --git a/network/ports/from_service.yaml b/network/ports/from_service.yaml
index 6b669f41..3d61910e 100644
--- a/network/ports/from_service.yaml
+++ b/network/ports/from_service.yaml
@@ -8,19 +8,19 @@ parameters:
description: Name of the service to lookup
default: ''
type: string
- NetworkName: # Here for compatability with ctlplane_vip.yaml
+ NetworkName: # Here for compatibility with ctlplane_vip.yaml
description: Name of the network where the VIP will be created
default: ctlplane
type: string
- PortName: # Here for compatability with ctlplane_vip.yaml
+ PortName: # Here for compatibility with ctlplane_vip.yaml
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with ctlplane_vip.yaml
+ ControlPlaneIP: # Here for compatibility with ctlplane_vip.yaml
description: IP address on the control plane
default: ''
type: string
- ControlPlaneNetwork: # Here for compatability with ctlplane_vip.yaml
+ ControlPlaneNetwork: # Here for compatibility with ctlplane_vip.yaml
description: The name of the undercloud Neutron control plane
default: ctlplane
type: string
@@ -32,3 +32,6 @@ outputs:
ip_address:
description: network IP
value: {get_param: [ServiceVips, {get_param: ServiceName}]}
+ ip_address_uri:
+ description: network IP (for compatibility with IPv6)
+ value: {get_param: [ServiceVips, {get_param: ServiceName}]}
diff --git a/network/ports/from_service_v6.yaml b/network/ports/from_service_v6.yaml
new file mode 100644
index 00000000..2dd0a0ee
--- /dev/null
+++ b/network/ports/from_service_v6.yaml
@@ -0,0 +1,42 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a service mapped list of IPv6 IPs
+
+parameters:
+ ServiceName:
+ description: Name of the service to lookup
+ default: ''
+ type: string
+ NetworkName: # Here for compatability with ctlplane_vip.yaml
+ description: Name of the network where the VIP will be created
+ default: ctlplane
+ type: string
+ PortName: # Here for compatability with ctlplane_vip.yaml
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with ctlplane_vip.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ ControlPlaneNetwork: # Here for compatability with ctlplane_vip.yaml
+ description: The name of the undercloud Neutron control plane
+ default: ctlplane
+ type: string
+ ServiceVips:
+ default: {}
+ type: json
+
+outputs:
+ ip_address:
+ description: network IP
+ value: {get_param: [ServiceVips, {get_param: ServiceName}]}
+ ip_address_uri:
+ description: network IP (with brackets for use in URLs)
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_param: [ServiceVips, {get_param: ServiceName}]}
+ - ']'
diff --git a/network/ports/internal_api.yaml b/network/ports/internal_api.yaml
index f84e8f71..1d521a8d 100644
--- a/network/ports/internal_api.yaml
+++ b/network/ports/internal_api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Creates a port on the internal_api network.
@@ -43,13 +43,15 @@ outputs:
ip_address:
description: internal API network IP
value: {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: |
+ internal API network IP (for compatibility with internal_api_v6.yaml)
+ value: {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the internal API network IP
value:
list_join:
- ''
- - {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]}
- '/'
- - {get_attr: [InternalApiPort, subnets, 0, cidr, -2]}
- - {get_attr: [InternalApiPort, subnets, 0, cidr, -1]}
+ - {str_split: ['/', {get_attr: [InternalApiPort, subnets, 0, cidr]}, 1]}
diff --git a/network/ports/internal_api_from_pool.yaml b/network/ports/internal_api_from_pool.yaml
index b98e1fb1..d7b67e26 100644
--- a/network/ports/internal_api_from_pool.yaml
+++ b/network/ports/internal_api_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Returns an IP from a network mapped list of IPs
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
@@ -33,13 +33,14 @@ outputs:
ip_address:
description: internal API network IP
value: {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: internal API network IP (for compatibility with internal_api_v6.yaml)
+ value: {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the internal API network IP
value:
list_join:
- ''
- - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
- '/'
- - {get_param: [InternalApiNetCidr, -2]}
- - {get_param: [InternalApiNetCidr, -1]}
+ - {str_split: ['/', {get_param: InternalApiNetCidr}, 1]}
diff --git a/network/ports/internal_api_from_pool_v6.yaml b/network/ports/internal_api_from_pool_v6.yaml
new file mode 100644
index 00000000..34c17ab2
--- /dev/null
+++ b/network/ports/internal_api_from_pool_v6.yaml
@@ -0,0 +1,54 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs. This version is for IPv6
+ addresses. The ip_address_uri output will have brackets for use in URLs.
+
+parameters:
+ InternalApiNetName:
+ description: Name of the internal API network
+ default: internal_api
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ InternalApiNetCidr:
+ default: 'fd00:fd00:fd00:2000::/64'
+ description: Cidr for the internal API network.
+ type: string
+
+outputs:
+ ip_address:
+ description: internal API network IP
+ value: {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: internal API network IP (for compatibility with internal_api_v6.yaml)
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the internal API network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {get_param: [InternalApiNetCidr, -2]}
+ - {get_param: [InternalApiNetCidr, -1]}
diff --git a/network/ports/internal_api_v6.yaml b/network/ports/internal_api_v6.yaml
new file mode 100644
index 00000000..279e6bd0
--- /dev/null
+++ b/network/ports/internal_api_v6.yaml
@@ -0,0 +1,63 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Creates a port on the internal_api network.
+
+parameters:
+ InternalApiNetName:
+ description: Name of the internal API neutron network
+ default: internal_api
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ FixedIPs:
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
+
+resources:
+
+ InternalApiPort:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: InternalApiNetName}
+ name: {get_param: PortName}
+ fixed_ips: {get_param: FixedIPs}
+ replacement_policy: AUTO
+
+outputs:
+ ip_address:
+ description: internal API network IP
+ value: {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: internal api network IP with brackets suitable for a URL
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the internal API network IP
+ value:
+ list_join:
+ - ''
+ - - {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]}
+ - '/'
+ - {get_attr: [InternalApiPort, subnets, 0, cidr, -2]}
+ - {get_attr: [InternalApiPort, subnets, 0, cidr, -1]}
diff --git a/network/ports/management.yaml b/network/ports/management.yaml
index 1d15ca60..967b66e1 100644
--- a/network/ports/management.yaml
+++ b/network/ports/management.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Creates a port on the management network. The IP address will be chosen
@@ -16,6 +16,12 @@ parameters:
ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
type: string
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
resources:
@@ -30,13 +36,14 @@ outputs:
ip_address:
description: management network IP
value: {get_attr: [ManagementPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: management network IP (for compatibility with management_v6.yaml)
+ value: {get_attr: [ManagementPort, fixed_ips, 0, ip_address]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the management network IP
value:
list_join:
- ''
- - {get_attr: [ManagementPort, fixed_ips, 0, ip_address]}
- '/'
- - {get_attr: [ManagementPort, subnets, 0, cidr, -2]}
- - {get_attr: [ManagementPort, subnets, 0, cidr, -1]}
+ - {str_split: ['/', {get_attr: [ManagementPort, subnets, 0, cidr]}, 1]}
diff --git a/network/ports/management_from_pool.yaml b/network/ports/management_from_pool.yaml
new file mode 100644
index 00000000..fc87e39a
--- /dev/null
+++ b/network/ports/management_from_pool.yaml
@@ -0,0 +1,46 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs
+
+parameters:
+ ManagementNetName:
+ description: Name of the management network
+ default: management
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ ManagementNetCidr:
+ default: '172.16.4.0/24'
+ description: Cidr for the management network.
+ type: string
+
+outputs:
+ ip_address:
+ description: management network IP
+ value: {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: management network IP (for compatibility with management_v6.yaml)
+ value: {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]}
+ ip_subnet:
+ description: IP/Subnet CIDR for the management network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {str_split: ['/', {get_param: ManagementNetCidr}, 1]}
diff --git a/network/ports/management_v6.yaml b/network/ports/management_v6.yaml
new file mode 100644
index 00000000..a94ebc7b
--- /dev/null
+++ b/network/ports/management_v6.yaml
@@ -0,0 +1,54 @@
+heat_template_version: 2015-10-15
+
+description: >
+ Creates a port on the management network. The IP address will be chosen
+ automatically if FixedIPs is empty.
+
+parameters:
+ ManagementNetName:
+ description: Name of the management neutron network
+ default: management
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatibility with noop.yaml
+ description: IP address on the control plane
+ type: string
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
+
+resources:
+
+ ManagementPort:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: ManagementNetName}
+ name: {get_param: PortName}
+ replacement_policy: AUTO
+
+outputs:
+ ip_address:
+ description: management network IP
+ value: {get_attr: [ManagementPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: management network IP with brackets suitable for a URL
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_attr: [ManagementPort, fixed_ips, 0, ip_address]}
+ - ']'
+ ip_subnet:
+ description: IP/Subnet CIDR for the management network IP
+ value:
+ list_join:
+ - ''
+ - - {get_attr: [ManagementPort, fixed_ips, 0, ip_address]}
+ - '/'
+ - {str_split: ['/', {get_attr: [ManagementPort, subnets, 0, cidr]}, 1]}
diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml
index c6386025..6bb4557b 100644
--- a/network/ports/net_ip_map.yaml
+++ b/network/ports/net_ip_map.yaml
@@ -7,21 +7,45 @@ parameters:
ExternalIp:
default: ''
type: string
+ ExternalIpUri:
+ default: ''
+ type: string
+ description: IP address with brackets in case of IPv6
InternalApiIp:
default: ''
type: string
+ InternalApiIpUri:
+ default: ''
+ type: string
+ description: IP address with brackets in case of IPv6
StorageIp:
default: ''
type: string
+ StorageIpUri:
+ default: ''
+ type: string
+ description: IP address with brackets in case of IPv6
StorageMgmtIp:
default: ''
type: string
+ StorageMgmtIpUri:
+ default: ''
+ type: string
+ description: IP address with brackets in case of IPv6
TenantIp:
default: ''
type: string
+ TenantIpUri:
+ default: ''
+ type: string
+ description: IP address with brackets in case of IPv6
ManagementIp:
default: ''
type: string
+ ManagementIpUri:
+ default: ''
+ type: string
+ description: IP address with brackets in case of IPv6
outputs:
net_ip_map:
@@ -36,3 +60,15 @@ outputs:
storage_mgmt: {get_param: StorageMgmtIp}
tenant: {get_param: TenantIp}
management: {get_param: ManagementIp}
+ net_ip_uri_map:
+ description: >
+ A Hash containing a mapping of netowrk names to assigned IPs for a
+ specific machine with brackets around IPv6 addresses for use in URLs.
+ value:
+ ctlplane: {get_param: ControlPlaneIp}
+ external: {get_param: ExternalIpUri}
+ internal_api: {get_param: InternalApiIpUri}
+ storage: {get_param: StorageIpUri}
+ storage_mgmt: {get_param: StorageMgmtIpUri}
+ tenant: {get_param: TenantIpUri}
+ management: {get_param: ManagementIpUri}
diff --git a/network/ports/net_vip_map_external.yaml b/network/ports/net_vip_map_external.yaml
index 23e1f992..a40a0bfc 100644
--- a/network/ports/net_vip_map_external.yaml
+++ b/network/ports/net_vip_map_external.yaml
@@ -24,18 +24,33 @@ parameters:
ExternalIp:
default: ''
type: string
+ ExternalIpUri:
+ default: ''
+ type: string
InternalApiIp:
default: ''
type: string
+ InternalApiIpUri:
+ default: ''
+ type: string
StorageIp:
default: ''
type: string
+ StorageIpUri:
+ default: ''
+ type: string
StorageMgmtIp:
default: ''
type: string
+ StorageMgmtIpUri:
+ default: ''
+ type: string
TenantIp:
default: ''
type: string
+ TenantIpUri:
+ default: ''
+ type: string
outputs:
net_ip_map:
@@ -48,3 +63,13 @@ outputs:
internal_api: {get_param: InternalApiNetworkVip}
storage: {get_param: StorageNetworkVip}
storage_mgmt: {get_param: StorageMgmtNetworkVip}
+ net_ip_uri_map:
+ description: >
+ A Hash containing a mapping of netowrk names to assigned IPs for a
+ specific machine with brackets around IPv6 addresses for use in URLs.
+ value:
+ ctlplane: {get_param: ControlPlaneIP}
+ external: {get_param: ExternalNetworkVip}
+ internal_api: {get_param: InternalApiNetworkVip}
+ storage: {get_param: StorageNetworkVip}
+ storage_mgmt: {get_param: StorageMgmtNetworkVip}
diff --git a/network/ports/net_vip_map_external_v6.yaml b/network/ports/net_vip_map_external_v6.yaml
new file mode 100644
index 00000000..f6d67fe8
--- /dev/null
+++ b/network/ports/net_vip_map_external_v6.yaml
@@ -0,0 +1,95 @@
+heat_template_version: 2015-04-30
+
+parameters:
+ # Set these via parameter defaults to configure external VIPs
+ ControlPlaneIP:
+ default: ''
+ type: string
+ ExternalNetworkVip:
+ default: ''
+ type: string
+ InternalApiNetworkVip:
+ default: ''
+ type: string
+ StorageNetworkVip:
+ default: ''
+ type: string
+ StorageMgmtNetworkVip:
+ default: ''
+ type: string
+ # The following are unused in this template
+ ControlPlaneIp:
+ default: ''
+ type: string
+ ExternalIp:
+ default: ''
+ type: string
+ ExternalIpUri:
+ default: ''
+ type: string
+ InternalApiIp:
+ default: ''
+ type: string
+ InternalApiIpUri:
+ default: ''
+ type: string
+ StorageIp:
+ default: ''
+ type: string
+ StorageIpUri:
+ default: ''
+ type: string
+ StorageMgmtIp:
+ default: ''
+ type: string
+ StorageMgmtIpUri:
+ default: ''
+ type: string
+ TenantIp:
+ default: ''
+ type: string
+ TenantIpUri:
+ default: ''
+ type: string
+
+outputs:
+ net_ip_map:
+ description: >
+ A Hash containing a mapping of network names to assigned IPs
+ for a specific machine.
+ value:
+ ctlplane: {get_param: ControlPlaneIP}
+ external: {get_param: ExternalNetworkVip}
+ internal_api: {get_param: InternalApiNetworkVip}
+ storage: {get_param: StorageNetworkVip}
+ storage_mgmt: {get_param: StorageMgmtNetworkVip}
+ net_ip_uri_map:
+ description: >
+ A Hash containing a mapping of netowrk names to assigned IPs for a
+ specific machine with brackets around IPv6 addresses for use in URLs.
+ value:
+ ctlplane: {get_param: ControlPlaneIP}
+ external:
+ list_join:
+ - ''
+ - - '['
+ - {get_param: ExternalNetworkVip}
+ - ']'
+ internal_api:
+ list_join:
+ - ''
+ - - '['
+ - {get_param: InternalApiNetworkVip}
+ - ']'
+ storage:
+ list_join:
+ - ''
+ - - '['
+ - {get_param: StorageNetworkVip}
+ - ']'
+ storage_mgmt:
+ list_join:
+ - ''
+ - - '['
+ - {get_param: StorageMgmtNetworkVip}
+ - ']'
diff --git a/network/ports/noop.yaml b/network/ports/noop.yaml
index ac946cd9..96c461e0 100644
--- a/network/ports/noop.yaml
+++ b/network/ports/noop.yaml
@@ -44,6 +44,9 @@ outputs:
ip_address:
description: pass thru network IP
value: {get_param: ControlPlaneIP}
+ ip_address_uri:
+ description: pass thru network IP (for compatibility with vip_v6.yaml)
+ value: {get_param: ControlPlaneIP}
ip_subnet:
description: IP/Subnet CIDR for the pass thru network IP
value:
diff --git a/network/ports/storage.yaml b/network/ports/storage.yaml
index a07e5a4f..1ed5cca1 100644
--- a/network/ports/storage.yaml
+++ b/network/ports/storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Creates a port on the storage network.
@@ -43,13 +43,14 @@ outputs:
ip_address:
description: storage network IP
value: {get_attr: [StoragePort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: storage network IP (for compatibility with storage_v6.yaml)
+ value: {get_attr: [StoragePort, fixed_ips, 0, ip_address]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the storage network IP
value:
list_join:
- ''
- - {get_attr: [StoragePort, fixed_ips, 0, ip_address]}
- '/'
- - {get_attr: [StoragePort, subnets, 0, cidr, -2]}
- - {get_attr: [StoragePort, subnets, 0, cidr, -1]}
+ - {str_split: ['/', {get_attr: [StoragePort, subnets, 0, cidr]}, 1]}
diff --git a/network/ports/storage_from_pool.yaml b/network/ports/storage_from_pool.yaml
index 668bc6f6..0a3d394c 100644
--- a/network/ports/storage_from_pool.yaml
+++ b/network/ports/storage_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Returns an IP from a network mapped list of IPs
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
@@ -33,13 +33,14 @@ outputs:
ip_address:
description: storage network IP
value: {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: storage network IP (for compatibility with storage_v6.yaml)
+ value: {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the storage network IP
value:
list_join:
- ''
- - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
- '/'
- - {get_param: [StorageNetCidr, -2]}
- - {get_param: [StorageNetCidr, -1]}
+ - {str_split: ['/', {get_param: StorageNetCidr}, 1]}
diff --git a/network/ports/storage_from_pool_v6.yaml b/network/ports/storage_from_pool_v6.yaml
new file mode 100644
index 00000000..966d96ae
--- /dev/null
+++ b/network/ports/storage_from_pool_v6.yaml
@@ -0,0 +1,54 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs. This version is for IPv6
+ addresses. The ip_address_uri output will have brackets for use in URLs.
+
+parameters:
+ StorageNetName:
+ description: Name of the storage network
+ default: storage
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ StorageNetCidr:
+ default: 'fd00:fd00:fd00:3000::/64'
+ description: Cidr for the storage network.
+ type: string
+
+outputs:
+ ip_address:
+ description: storage network IP
+ value: {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: storage network IP (for compatibility with storage_v6.yaml)
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the storage network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {get_param: [StorageNetCidr, -2]}
+ - {get_param: [StorageNetCidr, -1]}
diff --git a/network/ports/storage_mgmt.yaml b/network/ports/storage_mgmt.yaml
index 4890bf5a..548d226a 100644
--- a/network/ports/storage_mgmt.yaml
+++ b/network/ports/storage_mgmt.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Creates a port on the storage_mgmt API network.
@@ -43,13 +43,15 @@ outputs:
ip_address:
description: storage_mgmt network IP
value: {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: |
+ storage_mgmt network IP (for compatibility with storage_mgmt_v6.yaml)
+ value: {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the storage_mgmt network IP
value:
list_join:
- ''
- - {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]}
- '/'
- - {get_attr: [StorageMgmtPort, subnets, 0, cidr, -2]}
- - {get_attr: [StorageMgmtPort, subnets, 0, cidr, -1]}
+ - {str_split: ['/', {get_attr: [StorageMgmtPort, subnets, 0, cidr]}, 1]}
diff --git a/network/ports/storage_mgmt_from_pool.yaml b/network/ports/storage_mgmt_from_pool.yaml
index bea87105..c3f0f4e2 100644
--- a/network/ports/storage_mgmt_from_pool.yaml
+++ b/network/ports/storage_mgmt_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Returns an IP from a network mapped list of IPs
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
@@ -33,13 +33,14 @@ outputs:
ip_address:
description: storage MGMT network IP
value: {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: storage MGMT network IP (for compatibility with storage_mgmt_v6.yaml)
+ value: {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the storage MGMT network IP
value:
list_join:
- ''
- - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
- '/'
- - {get_param: [StorageMgmtNetCidr, -2]}
- - {get_param: [StorageMgmtNetCidr, -1]}
+ - {str_split: ['/', {get_param: StorageMgmtNetCidr}, 1]}
diff --git a/network/ports/storage_mgmt_from_pool_v6.yaml b/network/ports/storage_mgmt_from_pool_v6.yaml
new file mode 100644
index 00000000..890da75c
--- /dev/null
+++ b/network/ports/storage_mgmt_from_pool_v6.yaml
@@ -0,0 +1,54 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs This version is for IPv6
+ addresses. The ip_address_uri output will have brackets for use in URLs.
+
+parameters:
+ StorageMgmtNetName:
+ description: Name of the storage MGMT network
+ default: storage_mgmt
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ StorageMgmtNetCidr:
+ default: 'fd00:fd00:fd00:4000::/64'
+ description: Cidr for the storage MGMT network.
+ type: string
+
+outputs:
+ ip_address:
+ description: storage MGMT network IP
+ value: {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: storage MGMT network IP (for compatibility with storage_mgmt_v6.yaml)
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the storage MGMT network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {get_param: [StorageMgmtNetCidr, -2]}
+ - {get_param: [StorageMgmtNetCidr, -1]}
diff --git a/network/ports/storage_mgmt_v6.yaml b/network/ports/storage_mgmt_v6.yaml
new file mode 100644
index 00000000..61956be2
--- /dev/null
+++ b/network/ports/storage_mgmt_v6.yaml
@@ -0,0 +1,63 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Creates a port on the storage_mgmt API network.
+
+parameters:
+ StorageMgmtNetName:
+ description: Name of the storage_mgmt API neutron network
+ default: storage_mgmt
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ FixedIPs:
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
+
+resources:
+
+ StorageMgmtPort:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: StorageMgmtNetName}
+ name: {get_param: PortName}
+ fixed_ips: {get_param: FixedIPs}
+ replacement_policy: AUTO
+
+outputs:
+ ip_address:
+ description: storage_mgmt network IP
+ value: {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: storage_mgmt network IP with brackets suitable for a URI
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the storage_mgmt network IP
+ value:
+ list_join:
+ - ''
+ - - {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]}
+ - '/'
+ - {get_attr: [StorageMgmtPort, subnets, 0, cidr, -2]}
+ - {get_attr: [StorageMgmtPort, subnets, 0, cidr, -1]}
diff --git a/network/ports/storage_v6.yaml b/network/ports/storage_v6.yaml
new file mode 100644
index 00000000..13b62276
--- /dev/null
+++ b/network/ports/storage_v6.yaml
@@ -0,0 +1,63 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Creates a port on the storage network.
+
+parameters:
+ StorageNetName:
+ description: Name of the storage neutron network
+ default: storage
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ FixedIPs:
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
+
+resources:
+
+ StoragePort:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: StorageNetName}
+ name: {get_param: PortName}
+ fixed_ips: {get_param: FixedIPs}
+ replacement_policy: AUTO
+
+outputs:
+ ip_address:
+ description: storage network IP
+ value: {get_attr: [StoragePort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: storage network IP with brackets suitable for a URL
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_attr: [StoragePort, fixed_ips, 0, ip_address]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the storage network IP
+ value:
+ list_join:
+ - ''
+ - - {get_attr: [StoragePort, fixed_ips, 0, ip_address]}
+ - '/'
+ - {get_attr: [StoragePort, subnets, 0, cidr, -2]}
+ - {get_attr: [StoragePort, subnets, 0, cidr, -1]}
diff --git a/network/ports/tenant.yaml b/network/ports/tenant.yaml
index 86c58f2f..d8f78c49 100644
--- a/network/ports/tenant.yaml
+++ b/network/ports/tenant.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Creates a port on the tenant network.
@@ -43,13 +43,14 @@ outputs:
ip_address:
description: tenant network IP
value: {get_attr: [TenantPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: tenant network IP (for compatibility with tenant_v6.yaml)
+ value: {get_attr: [TenantPort, fixed_ips, 0, ip_address]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the tenant network IP
value:
list_join:
- ''
- - {get_attr: [TenantPort, fixed_ips, 0, ip_address]}
- '/'
- - {get_attr: [TenantPort, subnets, 0, cidr, -2]}
- - {get_attr: [TenantPort, subnets, 0, cidr, -1]}
+ - {str_split: ['/', {get_attr: [TenantPort, subnets, 0, cidr]}, 1]}
diff --git a/network/ports/tenant_from_pool.yaml b/network/ports/tenant_from_pool.yaml
index 29303bb6..d5fd7080 100644
--- a/network/ports/tenant_from_pool.yaml
+++ b/network/ports/tenant_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Returns an IP from a network mapped list of IPs
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
@@ -33,13 +33,14 @@ outputs:
ip_address:
description: tenant network IP
value: {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: tenant network IP (for compatibility with tenant_v6.yaml)
+ value: {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the tenant network IP
value:
list_join:
- ''
- - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
- '/'
- - {get_param: [TenantNetCidr, -2]}
- - {get_param: [TenantNetCidr, -1]}
+ - {str_split: ['/', {get_param: TenantNetCidr}, 1]}
diff --git a/network/ports/tenant_from_pool_v6.yaml b/network/ports/tenant_from_pool_v6.yaml
new file mode 100644
index 00000000..b2bcd426
--- /dev/null
+++ b/network/ports/tenant_from_pool_v6.yaml
@@ -0,0 +1,53 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs
+
+parameters:
+ TenantNetName:
+ description: Name of the tenant network
+ default: tenant
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ TenantNetCidr:
+ default: 'fd00:fd00:fd00:5000::/64'
+ description: Cidr for the tenant network.
+ type: string
+
+outputs:
+ ip_address:
+ description: tenant network IP
+ value: {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
+ ip_address_uri:
+ description: tenant network IP (for compatibility with tenant_v6.yaml)
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the tenant network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {get_param: [TenantNetCidr, -2]}
+ - {get_param: [TenantNetCidr, -1]}
diff --git a/network/ports/tenant_v6.yaml b/network/ports/tenant_v6.yaml
new file mode 100644
index 00000000..6ca37549
--- /dev/null
+++ b/network/ports/tenant_v6.yaml
@@ -0,0 +1,63 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Creates a port on the tenant network.
+
+parameters:
+ TenantNetName:
+ description: Name of the tenant neutron network
+ default: tenant
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ FixedIPs:
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
+
+resources:
+
+ TenantPort:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: TenantNetName}
+ name: {get_param: PortName}
+ fixed_ips: {get_param: FixedIPs}
+ replacement_policy: AUTO
+
+outputs:
+ ip_address:
+ description: tenant network IP
+ value: {get_attr: [TenantPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: tenant network IP with brackets suitable for a URL
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_attr: [TenantPort, fixed_ips, 0, ip_address]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the tenant network IP
+ value:
+ list_join:
+ - ''
+ - - {get_attr: [TenantPort, fixed_ips, 0, ip_address]}
+ - '/'
+ - {get_attr: [TenantPort, subnets, 0, cidr, -2]}
+ - {get_attr: [TenantPort, subnets, 0, cidr, -1]}
diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml
index 9bb6cde2..38322907 100644
--- a/network/ports/vip.yaml
+++ b/network/ports/vip.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
Creates a port for a VIP on the isolated network NetworkName.
@@ -45,13 +45,14 @@ outputs:
ip_address:
description: Virtual IP network IP
value: {get_attr: [VipPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: Virtual IP network IP (for compatibility with vip.yaml)
+ value: {get_attr: [VipPort, fixed_ips, 0, ip_address]}
ip_subnet:
- # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
description: IP/Subnet CIDR for the network associated with this IP
value:
list_join:
- ''
- - {get_attr: [VipPort, fixed_ips, 0, ip_address]}
- '/'
- - {get_attr: [VipPort, subnets, 0, cidr, -2]}
- - {get_attr: [VipPort, subnets, 0, cidr, -1]}
+ - {str_split: ['/', {get_attr: [VipPort, subnets, 0, cidr]}, 1]}
diff --git a/network/ports/vip_v6.yaml b/network/ports/vip_v6.yaml
new file mode 100644
index 00000000..de927094
--- /dev/null
+++ b/network/ports/vip_v6.yaml
@@ -0,0 +1,65 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Creates a port for a VIP on the isolated network NetworkName.
+ The IP address will be chosen automatically if FixedIPs is empty.
+
+parameters:
+ ServiceName: # Here for compatibility with from_service.yaml
+ description: Name of the service to lookup
+ default: ''
+ type: string
+ NetworkName:
+ description: Name of the network where the VIP will be created
+ default: internal_api
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ ControlPlaneNetwork:
+ description: The name of the undercloud Neutron control plane
+ default: ctlplane
+ type: string
+ FixedIPs:
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
+
+resources:
+ VipPort:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: NetworkName}
+ name: {get_param: PortName}
+ fixed_ips: {get_param: FixedIPs}
+ replacement_policy: AUTO
+
+outputs:
+ ip_address:
+ description: Virtual IP network IP
+ value: {get_attr: [VipPort, fixed_ips, 0, ip_address]}
+ ip_address_uri:
+ description: Virtual IP with brackets suitable for a URL
+ value:
+ list_join:
+ - ''
+ - - '['
+ - {get_attr: [VipPort, fixed_ips, 0, ip_address]}
+ - ']'
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the network associated with this IP
+ value:
+ list_join:
+ - ''
+ - - {get_attr: [VipPort, fixed_ips, 0, ip_address]}
+ - '/'
+ - {get_attr: [VipPort, subnets, 0, cidr, -2]}
+ - {get_attr: [VipPort, subnets, 0, cidr, -1]}
diff --git a/network/storage_mgmt_v6.yaml b/network/storage_mgmt_v6.yaml
new file mode 100644
index 00000000..f05644ef
--- /dev/null
+++ b/network/storage_mgmt_v6.yaml
@@ -0,0 +1,69 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Storage management network. Storage replication, etc.
+
+parameters:
+ # the defaults here work for static IP assignment (IPAM) only
+ StorageMgmtNetCidr:
+ # OpenStack uses the EUI-64 address format, which requires a /64 prefix
+ default: 'fd00:fd00:fd00:4000::/64'
+ description: Cidr for the storage management network.
+ type: string
+ StorageMgmtNetValueSpecs:
+ default: {'provider:physical_network': 'storage_mgmt', 'provider:network_type': 'flat'}
+ description: Value specs for the storage_mgmt network.
+ type: json
+ StorageMgmtNetAdminStateUp:
+ default: false
+ description: This admin state of of the network.
+ type: boolean
+ StorageMgmtNetShared:
+ default: false
+ description: Whether this network is shared across all tenants.
+ type: boolean
+ StorageMgmtNetName:
+ default: storage_mgmt
+ description: The name of the Storage management network.
+ type: string
+ StorageMgmtSubnetName:
+ default: storage_mgmt_subnet
+ description: The name of the Storage management subnet in Neutron.
+ type: string
+ StorageMgmtAllocationPools:
+ default: [{'start': 'fd00:fd00:fd00:4000::10', 'end': 'fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe'}]
+ description: Ip allocation pool range for the storage mgmt network.
+ type: json
+ IPv6AddressMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 address mode
+ type: string
+ IPv6RAMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 router advertisement mode
+ type: string
+
+resources:
+ StorageMgmtNetwork:
+ type: OS::Neutron::Net
+ properties:
+ admin_state_up: {get_param: StorageMgmtNetAdminStateUp}
+ name: {get_param: StorageMgmtNetName}
+ shared: {get_param: StorageMgmtNetShared}
+ value_specs: {get_param: StorageMgmtNetValueSpecs}
+
+ StorageMgmtSubnet:
+ type: OS::Neutron::Subnet
+ properties:
+ ip_version: 6
+ ipv6_address_mode: {get_param: IPv6AddressMode}
+ ipv6_ra_mode: {get_param: IPv6RAMode}
+ cidr: {get_param: StorageMgmtNetCidr}
+ name: {get_param: StorageMgmtSubnetName}
+ network: {get_resource: StorageMgmtNetwork}
+ allocation_pools: {get_param: StorageMgmtAllocationPools}
+
+outputs:
+ OS::stack_id:
+ description: Neutron storage management network
+ value: {get_resource: StorageMgmtNetwork}
diff --git a/network/storage_v6.yaml b/network/storage_v6.yaml
new file mode 100644
index 00000000..36a6fae8
--- /dev/null
+++ b/network/storage_v6.yaml
@@ -0,0 +1,69 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Storage network.
+
+parameters:
+ # the defaults here work for static IP assignment (IPAM) only
+ StorageNetCidr:
+ # OpenStack uses the EUI-64 address format, which requires a /64 prefix
+ default: 'fd00:fd00:fd00:3000::/64'
+ description: Cidr for the storage network.
+ type: string
+ StorageNetValueSpecs:
+ default: {'provider:physical_network': 'storage', 'provider:network_type': 'flat'}
+ description: Value specs for the storage network.
+ type: json
+ StorageNetAdminStateUp:
+ default: false
+ description: This admin state of of the network.
+ type: boolean
+ StorageNetShared:
+ default: false
+ description: Whether this network is shared across all tenants.
+ type: boolean
+ StorageNetName:
+ default: storage
+ description: The name of the storage network.
+ type: string
+ StorageSubnetName:
+ default: storage_subnet
+ description: The name of the storage subnet in Neutron.
+ type: string
+ StorageAllocationPools:
+ default: [{'start': 'fd00:fd00:fd00:3000::10', 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'}]
+ description: Ip allocation pool range for the storage network.
+ type: json
+ IPv6AddressMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 address mode
+ type: string
+ IPv6RAMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 router advertisement mode
+ type: string
+
+resources:
+ StorageNetwork:
+ type: OS::Neutron::Net
+ properties:
+ admin_state_up: {get_param: StorageNetAdminStateUp}
+ name: {get_param: StorageNetName}
+ shared: {get_param: StorageNetShared}
+ value_specs: {get_param: StorageNetValueSpecs}
+
+ StorageSubnet:
+ type: OS::Neutron::Subnet
+ properties:
+ ip_version: 6
+ ipv6_address_mode: {get_param: IPv6AddressMode}
+ ipv6_ra_mode: {get_param: IPv6RAMode}
+ cidr: {get_param: StorageNetCidr}
+ name: {get_param: StorageSubnetName}
+ network: {get_resource: StorageNetwork}
+ allocation_pools: {get_param: StorageAllocationPools}
+
+outputs:
+ OS::stack_id:
+ description: Neutron storage network
+ value: {get_resource: StorageNetwork}
diff --git a/network/tenant_v6.yaml b/network/tenant_v6.yaml
new file mode 100644
index 00000000..b653eaf7
--- /dev/null
+++ b/network/tenant_v6.yaml
@@ -0,0 +1,69 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Tenant IPv6 network.
+
+parameters:
+ # the defaults here work for static IP assignment (IPAM) only
+ TenantNetCidr:
+ # OpenStack uses the EUI-64 address format, which requires a /64 prefix
+ default: 'fd00:fd00:fd00:5000::/64'
+ description: Cidr for the tenant network.
+ type: string
+ TenantNetValueSpecs:
+ default: {'provider:physical_network': 'tenant', 'provider:network_type': 'flat'}
+ description: Value specs for the tenant network.
+ type: json
+ TenantNetAdminStateUp:
+ default: false
+ description: This admin state of of the network.
+ type: boolean
+ TenantNetShared:
+ default: false
+ description: Whether this network is shared across all tenants.
+ type: boolean
+ TenantNetName:
+ default: tenant
+ description: The name of the tenant network.
+ type: string
+ TenantSubnetName:
+ default: tenant_subnet
+ description: The name of the tenant subnet in Neutron.
+ type: string
+ TenantAllocationPools:
+ default: [{'start': 'fd00:fd00:fd00:5000::10', 'end': 'fd00:fd00:fd00:5000:ffff:ffff:ffff:fffe'}]
+ description: Ip allocation pool range for the tenant network.
+ type: json
+ IPv6AddressMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 address mode
+ type: string
+ IPv6RAMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 router advertisement mode
+ type: string
+
+resources:
+ TenantNetwork:
+ type: OS::Neutron::Net
+ properties:
+ admin_state_up: {get_param: TenantNetAdminStateUp}
+ name: {get_param: TenantNetName}
+ shared: {get_param: TenantNetShared}
+ value_specs: {get_param: TenantNetValueSpecs}
+
+ TenantSubnet:
+ type: OS::Neutron::Subnet
+ properties:
+ ip_version: 6
+ ipv6_address_mode: {get_param: IPv6AddressMode}
+ ipv6_ra_mode: {get_param: IPv6RAMode}
+ cidr: {get_param: TenantNetCidr}
+ name: {get_param: TenantSubnetName}
+ network: {get_resource: TenantNetwork}
+ allocation_pools: {get_param: TenantAllocationPools}
+
+outputs:
+ OS::stack_id:
+ description: Neutron tenant network
+ value: {get_resource: TenantNetwork}
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
index 888a3c89..55cefc76 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.yaml
@@ -23,9 +23,10 @@ resource_registry:
OS::TripleO::BootstrapNode::SoftwareConfig: puppet/bootstrap-config.yaml
# Tasks (for internal TripleO usage)
+ OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
- OS::TripleO::Tasks::ControllerPrePuppet: extraconfig/tasks/noop.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: extraconfig/tasks/noop.yaml
+ OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None
# This creates the "heat-admin" user for all OS images by default
# To disable, replace with firstboot/userdata_default.yaml
@@ -55,13 +56,12 @@ resource_registry:
OS::TripleO::Network: network/networks.yaml
OS::TripleO::VipConfig: puppet/vip-config.yaml
-
- OS::TripleO::Network::External: network/noop.yaml
- OS::TripleO::Network::InternalApi: network/noop.yaml
- OS::TripleO::Network::StorageMgmt: network/noop.yaml
- OS::TripleO::Network::Storage: network/noop.yaml
- OS::TripleO::Network::Tenant: network/noop.yaml
- OS::TripleO::Network::Management: network/noop.yaml
+ OS::TripleO::Network::External: OS::Heat::None
+ OS::TripleO::Network::InternalApi: OS::Heat::None
+ OS::TripleO::Network::StorageMgmt: OS::Heat::None
+ OS::TripleO::Network::Storage: OS::Heat::None
+ OS::TripleO::Network::Tenant: OS::Heat::None
+ OS::TripleO::Network::Management: OS::Heat::None
OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml
OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml
@@ -116,11 +116,15 @@ resource_registry:
OS::TripleO::BlockStorage::Ports::ManagementPort: network/ports/noop.yaml
# Service Endpoint Mappings
- OS::TripleO::Endpoint: network/endpoints/endpoint.yaml
OS::TripleO::EndpointMap: network/endpoints/endpoint_map.yaml
# validation resources
OS::TripleO::AllNodes::Validation: all-nodes-validation.yaml
+ # services
+ OS::TripleO::Services: puppet/services/services.yaml
+ OS::TripleO::Services::Keystone: puppet/services/keystone.yaml
+
parameter_defaults:
EnablePackageInstall: false
+ SoftwareConfigTransport: POLL_TEMP_URL
diff --git a/overcloud.yaml b/overcloud.yaml
index 673bf506..20c853cd 100644
--- a/overcloud.yaml
+++ b/overcloud.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2016-04-08
description: >
Deploy an OpenStack environment, consisting of several node types (roles),
@@ -16,6 +16,10 @@ parameters:
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
+ AodhPassword:
+ description: The password for the aodh services.
+ type: string
+ hidden: true
CeilometerBackend:
default: 'mongodb'
description: The ceilometer backend type.
@@ -28,6 +32,12 @@ parameters:
description: The password for the ceilometer service account.
type: string
hidden: true
+ CeilometerMeterDispatcher:
+ default: 'database'
+ description: Dispatcher to process meter data
+ type: string
+ constraints:
+ - allowed_values: ['gnocchi', 'database']
# This has to be an UUID so for now we generate it outside the template
CephClusterFSID:
default: ''
@@ -72,6 +82,10 @@ parameters:
default: []
description: Should be used for arbitrary ips.
type: json
+ CorosyncIPv6:
+ default: false
+ description: Enable IPv6 in Corosync
+ type: boolean
Debug:
default: ''
description: Set to True to enable debugging on all services.
@@ -88,12 +102,22 @@ parameters:
default: 'REBUILD_PRESERVE_EPHEMERAL'
description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
type: string
+ InternalApiVirtualFixedIPs:
+ default: []
+ description: >
+ Control the IP allocation for the InternalApiVirtualInterface port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ type: json
KeyName:
default: default
description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
constraints:
- custom_constraint: nova.keypair
+ MemcachedIPv6:
+ default: false
+ description: Enable IPv6 features in Memcached.
+ type: boolean
NeutronExternalNetworkBridge:
description: Name of bridge used for external network traffic.
type: string
@@ -168,6 +192,15 @@ parameters:
description: Shared secret to prevent spoofing
type: string
hidden: true
+ NeutronTenantMtu:
+ description: >
+ The default MTU for tenant networks. For VXLAN/GRE tunneling, this should
+ be at least 50 bytes smaller than the MTU on the physical network. This
+ value will be used to set the MTU on the virtual Ethernet device.
+ This value will be used to construct the NeutronDnsmasqOptions, since that
+ will determine the MTU that is assigned to the VM host through DHCP.
+ default: "1400"
+ type: string
NeutronTunnelTypes:
default: 'vxlan'
description: |
@@ -208,7 +241,7 @@ parameters:
The mechanism drivers for the Neutron tenant network.
type: comma_delimited_list
NeutronPluginExtensions:
- default: "qos"
+ default: "qos,port_security"
description: |
Comma-separated list of extensions enabled for the Neutron plugin.
type: comma_delimited_list
@@ -229,6 +262,10 @@ parameters:
type: number
default: 1
description: The number of neutron dhcp agents to schedule per network
+ NovaIPv6:
+ default: false
+ description: Enable IPv6 features in Nova
+ type: boolean
NovaPassword:
description: The password for the nova service account, used by nova-api.
type: string
@@ -241,6 +278,10 @@ parameters:
default: false
description: Should MongoDb journaling be disabled
type: boolean
+ MongoDbIPv6:
+ default: false
+ description: Enable IPv6 if MongoDB VIP is IPv6
+ type: boolean
PublicVirtualFixedIPs:
default: []
description: >
@@ -251,13 +292,11 @@ parameters:
type: string
default: unset
description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
- # FIXME: 'guest' is provisioned in RabbitMQ by default, we should create a user if these are changed
RabbitUserName:
default: guest
description: The username for RabbitMQ
type: string
RabbitPassword:
- default: guest
description: The password for RabbitMQ
type: string
hidden: true
@@ -276,6 +315,14 @@ parameters:
default: 16384
description: Configures RabbitMQ FD limit
type: string
+ RabbitIPv6:
+ default: false
+ description: Enable IPv6 in RabbitMQ
+ type: boolean
+ RedisPassword:
+ description: The password for Redis
+ type: string
+ hidden: true
SnmpdReadonlyUserName:
default: ro_snmp_user
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
@@ -284,6 +331,18 @@ parameters:
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
+ StorageVirtualFixedIPs:
+ default: []
+ description: >
+ Control the IP allocation for the StorageVirtualInterface port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ type: json
+ StorageMgmtVirtualFixedIPs:
+ default: []
+ description: >
+ Control the IP allocation for the StorageMgmgVirtualInterface port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ type: json
TimeZone:
default: 'UTC'
description: The timezone to be set on nodes.
@@ -302,12 +361,8 @@ parameters:
type: json
# Controller-specific params
- AdminToken:
- description: The keystone auth secret.
- type: string
- hidden: true
CinderLVMLoopDeviceSize:
- default: 5000
+ default: 10280
description: The size of the loopback file used by the cinder LVM driver.
type: number
CinderNfsMountOptions:
@@ -327,7 +382,7 @@ parameters:
type: string
hidden: true
CinderISCSIHelper:
- default: tgtadm
+ default: lioadm
description: The iSCSI helper to use with cinder.
type: string
ControllerCount:
@@ -434,50 +489,33 @@ parameters:
type: string
constraints:
- allowed_values: ['swift', 'file', 'rbd']
+ GnocchiBackend:
+ default: file
+ description: The short name of the Gnocchi backend to use. Should be one
+ of swift, rbd or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
+ GnocchiIndexerBackend:
+ default: 'mysql'
+ description: The short name of the Gnocchi indexer backend to use.
+ type: string
+ GnocchiPassword:
+ description: The password for the gnocchi service account.
+ type: string
+ hidden: true
HeatPassword:
description: The password for the Heat service account, used by the Heat services.
type: string
hidden: true
HeatStackDomainAdminPassword:
- description: Password for heat_domain_admin user.
+ description: Password for heat_stack_domain_admin user.
type: string
hidden: true
InstanceNameTemplate:
default: 'instance-%08x'
description: Template string to be used to generate instance names
type: string
- KeystoneCACertificate:
- default: ''
- description: Keystone self-signed certificate authority certificate.
- type: string
- KeystoneSigningCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSigningKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- KeystoneSSLCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSSLCertificateKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- KeystoneNotificationDriver:
- description: Comma-separated list of Oslo notification drivers used by Keystone
- default: ['messaging']
- type: comma_delimited_list
- KeystoneNotificationFormat:
- description: The Keystone notification format
- default: 'basic'
- type: string
- constraints:
- - allowed_values: [ 'basic', 'cadf' ]
ManageFirewall:
default: false
description: Whether to manage IPtables rules.
@@ -498,8 +536,11 @@ parameters:
type: number
default: 4096
NeutronDnsmasqOptions:
- default: 'dhcp-option-force=26,1400'
- description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the tunnel overhead.
+ default: 'dhcp-option-force=26,%MTU%'
+ description: >
+ Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU
+ to be set to the value of NeutronTenantMtu, which should be set to account
+ for tunnel overhead.
type: string
NeutronPublicInterfaceDefaultRoute:
default: ''
@@ -623,6 +664,8 @@ parameters:
default:
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
+ AodhApiNetwork: internal_api
+ GnocchiApiNetwork: internal_api
MongoDbNetwork: internal_api
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
@@ -654,6 +697,14 @@ parameters:
via parameter_defaults in the resource registry.
type: json
+ ControllerServices:
+ default:
+ - OS::TripleO::Services::Keystone
+ description: A list of service resources (configured in the Heat
+ resource_registry) which represent nested stacks
+ for each service that should get installed on the Controllers.
+ type: comma_delimited_list
+
# Block storage specific parameters
BlockStorageCount:
type: number
@@ -821,19 +872,28 @@ resources:
type: OS::TripleO::EndpointMap
properties:
CloudName: {get_param: CloudName}
- CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
- CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
- GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
- GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
- HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
- KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
- KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
- MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
- NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
- NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
- SaharaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
- SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
- PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]}
+ CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ AodhApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
+ CinderApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
+ GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
+ GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
+ GnocchiApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GnocchiApiNetwork]}]}
+ HeatApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
+ KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
+ KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
+ MysqlVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
+ NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
+ NovaApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+ SaharaApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
+ SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
+ PublicVirtualIP: {get_attr: [VipMap, net_ip_uri_map, external]}
+
+ ControllerServiceChain:
+ type: OS::TripleO::Services
+ properties:
+ Services: {get_param: ControllerServices}
+ EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ MysqlVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
Controller:
type: OS::Heat::ResourceGroup
@@ -845,10 +905,11 @@ resources:
type: OS::TripleO::Controller
properties:
AdminPassword: {get_param: AdminPassword}
- AdminToken: {get_param: AdminToken}
+ AodhPassword: {get_param: AodhPassword}
CeilometerBackend: {get_param: CeilometerBackend}
CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret}
CeilometerPassword: {get_param: CeilometerPassword}
+ CeilometerMeterDispatcher: {get_param: CeilometerMeterDispatcher}
CinderLVMLoopDeviceSize: {get_param: CinderLVMLoopDeviceSize}
CinderNfsMountOptions: {get_param: CinderNfsMountOptions}
CinderNfsServers: {get_param: CinderNfsServers}
@@ -860,6 +921,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ControlVirtualInterface: {get_param: ControlVirtualInterface}
ControllerExtraConfig: {get_param: controllerExtraConfig}
+ CorosyncIPv6: {get_param: CorosyncIPv6}
Debug: {get_param: Debug}
EnableFencing: {get_param: EnableFencing}
ManageFirewall: {get_param: ManageFirewall}
@@ -874,6 +936,9 @@ resources:
GlanceBackend: {get_param: GlanceBackend}
GlanceNotifierStrategy: {get_param: GlanceNotifierStrategy}
GlanceLogFile: {get_param: GlanceLogFile}
+ GnocchiPassword: {get_param: GnocchiPassword}
+ GnocchiBackend: {get_param: GnocchiBackend}
+ GnocchiIndexerBackend: {get_param: GnocchiIndexerBackend}
HAProxySyslogAddress: {get_param: HAProxySyslogAddress}
HeatPassword: {get_param: HeatPassword}
HeatStackDomainAdminPassword: {get_param: HeatStackDomainAdminPassword}
@@ -884,13 +949,7 @@ resources:
ImageUpdatePolicy: {get_param: ImageUpdatePolicy}
InstanceNameTemplate: {get_param: InstanceNameTemplate}
KeyName: {get_param: KeyName}
- KeystoneCACertificate: {get_param: KeystoneCACertificate}
- KeystoneSigningCertificate: {get_param: KeystoneSigningCertificate}
- KeystoneSigningKey: {get_param: KeystoneSigningKey}
- KeystoneSSLCertificate: {get_param: KeystoneSSLCertificate}
- KeystoneSSLCertificateKey: {get_param: KeystoneSSLCertificateKey}
- KeystoneNotificationDriver: {get_param: KeystoneNotificationDriver}
- KeystoneNotificationFormat: {get_param: KeystoneNotificationFormat}
+ MemcachedIPv6: {get_param: MemcachedIPv6}
MysqlClusterUniquePart: {get_attr: [MysqlClusterUniquePart, value]}
MysqlInnodbBufferPoolSize: {get_param: MysqlInnodbBufferPoolSize}
MysqlMaxConnections: {get_param: MysqlMaxConnections}
@@ -898,6 +957,7 @@ resources:
NeutronPublicInterfaceIP: {get_param: NeutronPublicInterfaceIP}
NeutronFlatNetworks: {get_param: NeutronFlatNetworks}
NeutronBridgeMappings: {get_param: NeutronBridgeMappings}
+ NeutronTenantMtu: {get_param: NeutronTenantMtu}
NeutronExternalNetworkBridge: {get_param: NeutronExternalNetworkBridge}
NeutronEnableIsolatedMetadata: {get_param: NeutronEnableIsolatedMetadata}
NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling}
@@ -907,7 +967,11 @@ resources:
NeutronPublicInterfaceDefaultRoute: {get_param: NeutronPublicInterfaceDefaultRoute}
NeutronPublicInterfaceRawDevice: {get_param: NeutronPublicInterfaceRawDevice}
NeutronPassword: {get_param: NeutronPassword}
- NeutronDnsmasqOptions: {get_param: NeutronDnsmasqOptions}
+ NeutronDnsmasqOptions:
+ str_replace:
+ template: {get_param: NeutronDnsmasqOptions}
+ params:
+ '%MTU%': {get_param: NeutronTenantMtu}
NeutronDVR: {get_param: NeutronDVR}
NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret}
NeutronAgentMode: {get_param: NeutronAgentMode}
@@ -922,9 +986,11 @@ resources:
NeutronDhcpAgentsPerNetwork: {get_param: NeutronDhcpAgentsPerNetwork}
NeutronNetworkType: {get_param: NeutronNetworkType}
NeutronTunnelTypes: {get_param: NeutronTunnelTypes}
+ NovaIPv6: {get_param: NovaIPv6}
NovaPassword: {get_param: NovaPassword}
NtpServer: {get_param: NtpServer}
MongoDbNoJournal: {get_param: MongoDbNoJournal}
+ MongoDbIPv6: {get_param: MongoDbIPv6}
PcsdPassword: {get_resource: PcsdPassword}
PublicVirtualInterface: {get_param: PublicVirtualInterface}
RabbitPassword: {get_param: RabbitPassword}
@@ -933,10 +999,13 @@ resources:
RabbitClientUseSSL: {get_param: RabbitClientUseSSL}
RabbitClientPort: {get_param: RabbitClientPort}
RabbitFDLimit: {get_param: RabbitFDLimit}
+ RabbitIPv6: {get_param: RabbitIPv6}
+ RedisPassword: {get_param: RedisPassword}
SaharaPassword: {get_param: SaharaPassword}
SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName}
SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword}
RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
+ RedisVirtualIPUri: {get_attr: [RedisVirtualIP, ip_address_uri]}
SwiftHashSuffix: {get_param: SwiftHashSuffix}
SwiftMountCheck: {get_param: SwiftMountCheck}
SwiftMinPartHours: {get_param: SwiftMinPartHours}
@@ -949,15 +1018,17 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ AodhApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
+ GnocchiApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GnocchiApiNetwork]}]}
CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
+ HeatApiVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
- KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
- KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
+ MysqlVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
SaharaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
@@ -970,6 +1041,7 @@ resources:
NodeIndex: '%index%'
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: ControllerSchedulerHints}
+ ServiceConfigSettings: {get_attr: [ControllerServiceChain, config_settings]}
Compute:
type: OS::Heat::ResourceGroup
@@ -996,6 +1068,7 @@ resources:
KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
NeutronBridgeMappings: {get_param: NeutronBridgeMappings}
+ NeutronTenantMtu: {get_param: NeutronTenantMtu}
NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling}
NeutronEnableL2Pop : {get_param: NeutronEnableL2Pop}
NeutronFlatNetworks: {get_param: NeutronFlatNetworks}
@@ -1024,6 +1097,7 @@ resources:
NovaComputeLibvirtType: {get_param: NovaComputeLibvirtType}
NovaComputeLibvirtVifDriver: {get_param: NovaComputeLibvirtVifDriver}
NovaEnableRbdBackend: {get_param: NovaEnableRbdBackend}
+ NovaIPv6: {get_param: NovaIPv6}
NovaPublicIP: {get_attr: [VipMap, net_ip_map, external]}
NovaPassword: {get_param: NovaPassword}
NovaOVSBridge: {get_param: NovaOVSBridge}
@@ -1048,6 +1122,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: NovaComputeSchedulerHints}
+ NodeIndex: '%index%'
BlockStorage:
type: OS::Heat::ResourceGroup
@@ -1083,12 +1158,13 @@ resources:
'%stackname%': {get_param: 'OS::stack_name'}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
- MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
+ MysqlVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
ExtraConfig: {get_param: ExtraConfig}
BlockStorageExtraConfig: {get_param: BlockStorageExtraConfig}
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: BlockStorageSchedulerHints}
+ NodeIndex: '%index%'
ObjectStorage:
type: OS::Heat::ResourceGroup
@@ -1121,6 +1197,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: ObjectStorageSchedulerHints}
+ NodeIndex: '%index%'
CephStorage:
type: OS::Heat::ResourceGroup
@@ -1148,6 +1225,7 @@ resources:
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
SchedulerHints: {get_param: CephStorageSchedulerHints}
+ NodeIndex: '%index%'
ControllerIpListMap:
type: OS::TripleO::Network::Ports::NetIpListMap
@@ -1179,6 +1257,8 @@ resources:
heat_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
swift_proxy_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
ceilometer_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ aodh_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
+ gnocchi_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, GnocchiApiNetwork]}]}
nova_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
nova_metadata_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
glance_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
@@ -1246,6 +1326,7 @@ resources:
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
PortName: internal_api_virtual_ip
+ FixedIPs: {get_param: InternalApiVirtualFixedIPs}
StorageVirtualIP:
depends_on: Networks
@@ -1253,6 +1334,7 @@ resources:
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
PortName: storage_virtual_ip
+ FixedIPs: {get_param: StorageVirtualFixedIPs}
StorageMgmtVirtualIP:
depends_on: Networks
@@ -1260,15 +1342,20 @@ resources:
properties:
ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
PortName: storage_management_virtual_ip
+ FixedIPs: {get_param: StorageMgmtVirtualFixedIPs}
VipMap:
type: OS::TripleO::Network::Ports::NetVipMap
properties:
ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
ExternalIp: {get_attr: [PublicVirtualIP, ip_address]}
+ ExternalIpUri: {get_attr: [PublicVirtualIP, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]}
+ InternalApiIpUri: {get_attr: [InternalApiVirtualIP, ip_address_uri]}
StorageIp: {get_attr: [StorageVirtualIP, ip_address]}
+ StorageIpUri: {get_attr: [StorageVirtualIP, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]}
+ StorageMgmtIpUri: {get_attr: [StorageMgmtVirtualIP, ip_address_uri]}
# No tenant or management VIP required
VipConfig:
@@ -1292,6 +1379,8 @@ resources:
nova_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
nova_metadata_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
ceilometer_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ aodh_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
+ gnocchi_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GnocchiApiNetwork]}]}
heat_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
horizon_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
redis_vip: {get_attr: [RedisVirtualIP, ip_address]}
@@ -1483,10 +1572,29 @@ resources:
config: {get_resource: AllNodesValidationConfig}
servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+ UpdateWorkflow:
+ type: OS::TripleO::Tasks::UpdateWorkflow
+ properties:
+ controller_servers: {get_attr: [Controller, attributes, nova_server_resource]}
+ compute_servers: {get_attr: [Compute, attributes, nova_server_resource]}
+ blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
+ objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
+ cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+ input_values:
+ deploy_identifier: {get_param: DeployIdentifier}
+ update_identifier: {get_param: UpdateIdentifier}
+
# Optional ExtraConfig for all nodes - all roles are passed in here, but
# the nested template may configure each role differently (or not at all)
AllNodesExtraConfig:
type: OS::TripleO::AllNodesExtraConfig
+ depends_on:
+ - UpdateWorkflow
+ - ComputeAllNodesValidationDeployment
+ - BlockStorageAllNodesValidationDeployment
+ - ObjectStorageAllNodesValidationDeployment
+ - CephStorageAllNodesValidationDeployment
+ - ControllerAllNodesValidationDeployment
properties:
controller_servers: {get_attr: [Controller, attributes, nova_server_resource]}
compute_servers: {get_attr: [Compute, attributes, nova_server_resource]}
@@ -1504,6 +1612,7 @@ resources:
allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
controller_config: {get_attr: [Controller, attributes, config_identifier]}
deployment_identifier: {get_param: DeployIdentifier}
+ StepConfig: {get_attr: [ControllerServiceChain, step_config]}
ComputeNodesPostDeployment:
type: OS::TripleO::ComputePostDeployment
@@ -1555,6 +1664,9 @@ outputs:
PublicVip:
description: Controller VIP for public API endpoints
value: {get_attr: [VipMap, net_ip_map, external]}
+ AodhInternalVip:
+ description: VIP for Aodh API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
CeilometerInternalVip:
description: VIP for Ceilometer API internal endpoint
value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
@@ -1564,6 +1676,9 @@ outputs:
GlanceInternalVip:
description: VIP for Glance API internal endpoint
value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
+ GnocchiInternalVip:
+ description: VIP for Gnocchi API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GnocchiApiNetwork]}]}
HeatInternalVip:
description: VIP for Heat API internal endpoint
value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
@@ -1582,6 +1697,12 @@ outputs:
SwiftInternalVip:
description: VIP for Swift Proxy internal endpoint
value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
+ EndpointMap:
+ description: |
+ Mapping of the resources with the needed info for their endpoints.
+ This includes the protocol used, the IP, port and also a full
+ representation of the URI.
+ value: {get_attr: [EndpointMap, endpoint_map]}
HostsEntry:
description: |
The content that should be appended to your /etc/hosts if you want to get
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 3dd3d5c9..90eb1b09 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -34,6 +34,8 @@ parameters:
type: comma_delimited_list
ceilometer_api_node_ips:
type: comma_delimited_list
+ aodh_api_node_ips:
+ type: comma_delimited_list
nova_api_node_ips:
type: comma_delimited_list
nova_metadata_node_ips:
@@ -42,6 +44,8 @@ parameters:
type: comma_delimited_list
glance_registry_node_ips:
type: comma_delimited_list
+ gnocchi_api_node_ips:
+ type: comma_delimited_list
cinder_api_node_ips:
type: comma_delimited_list
neutron_api_node_ips:
@@ -69,8 +73,8 @@ resources:
allNodesConfigImpl:
type: OS::Heat::StructuredConfig
properties:
+ group: os-apply-config
config:
- completion-signal: {get_input: deploy_signal_id}
hosts:
list_join:
- "\n"
@@ -139,6 +143,14 @@ resources:
list_join:
- "','"
- {get_param: memcache_node_ips}
+ memcache_node_ips_v6:
+ str_replace:
+ template: "['inet6:[SERVERS_LIST]']"
+ params:
+ SERVERS_LIST:
+ list_join:
+ - "]','inet6:["
+ - {get_param: memcache_node_ips}
mysql_node_ips:
str_replace:
template: "['SERVERS_LIST']"
@@ -179,6 +191,22 @@ resources:
list_join:
- "','"
- {get_param: ceilometer_api_node_ips}
+ aodh_api_node_ips:
+ str_replace:
+ template: "['SERVERS_LIST']"
+ params:
+ SERVERS_LIST:
+ list_join:
+ - "','"
+ - {get_param: aodh_api_node_ips}
+ gnocchi_api_node_ips:
+ str_replace:
+ template: "['SERVERS_LIST']"
+ params:
+ SERVERS_LIST:
+ list_join:
+ - "','"
+ - {get_param: gnocchi_api_node_ips}
nova_api_node_ips:
str_replace:
template: "['SERVERS_LIST']"
@@ -227,6 +255,15 @@ resources:
list_join:
- "','"
- {get_param: neutron_api_node_ips}
+ # TODO: pass a `midonet_api_node_ips` var
+ midonet_api_node_ips:
+ str_replace:
+ template: "['SERVERS_LIST']"
+ params:
+ SERVERS_LIST:
+ list_join:
+ - "','"
+ - {get_param: neutron_api_node_ips}
keystone_public_api_node_ips:
str_replace:
template: "['SERVERS_LIST']"
@@ -255,7 +292,9 @@ resources:
# NOTE(gfidente): interpolation with %{} in the
# hieradata file can't be used as it returns string
ceilometer::rabbit_hosts: *rabbit_nodes_array
+ aodh::rabbit_hosts: *rabbit_nodes_array
cinder::rabbit_hosts: *rabbit_nodes_array
+ glance::notify::rabbitmq::rabbit_hosts: *rabbit_nodes_array
heat::rabbit_hosts: *rabbit_nodes_array
neutron::rabbit_hosts: *rabbit_nodes_array
nova::rabbit_hosts: *rabbit_nodes_array
diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml
index 96198c3f..fd161886 100644
--- a/puppet/ceph-cluster-config.yaml
+++ b/puppet/ceph-cluster-config.yaml
@@ -36,9 +36,15 @@ parameters:
GlanceRbdPoolName:
default: images
type: string
+ GnocchiRbdPoolName:
+ default: metrics
+ type: string
CephClientUserName:
default: openstack
type: string
+ CephIPv6:
+ default: False
+ type: boolean
resources:
CephClusterConfigImpl:
@@ -50,15 +56,25 @@ resources:
datafiles:
ceph_cluster:
mapped_data:
+ ceph_ipv6: {get_param: CephIPv6}
ceph_storage_count: {get_param: ceph_storage_count}
ceph_mon_initial_members:
list_join:
- ','
- {get_param: ceph_mon_names}
- ceph::profile::params::mon_host:
+ ceph_mon_host:
list_join:
- ','
- {get_param: ceph_mon_ips}
+ ceph_mon_host_v6:
+ str_replace:
+ template: "'[IPS_LIST]'"
+ params:
+ IPS_LIST:
+ list_join:
+ - '],['
+ - {get_param: ceph_mon_ips}
+ ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
ceph::profile::params::fsid: {get_param: ceph_fsid}
ceph::profile::params::mon_key: {get_param: ceph_mon_key}
# We should use a separated key for the non-admin clients
@@ -78,21 +94,25 @@ resources:
cap_mon: 'allow profile bootstrap-osd'
},
client.CLIENT_USER: {
- secret: 'ADMIN_KEY',
+ secret: 'CLIENT_KEY',
mode: '0644',
cap_mon: 'allow r',
- cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL'
+ cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
}
}"
params:
CLIENT_USER: {get_param: CephClientUserName}
+ CLIENT_KEY: {get_param: ceph_client_key}
ADMIN_KEY: {get_param: ceph_admin_key}
NOVA_POOL: {get_param: NovaRbdPoolName}
CINDER_POOL: {get_param: CinderRbdPoolName}
GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+ gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName}
+ gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName}
nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
nova::compute::rbd::rbd_keyring:
@@ -100,11 +120,17 @@ resources:
- '.'
- - 'client'
- {get_param: CephClientUserName}
+ gnocchi::storage::ceph::ceph_keyring:
+ list_join:
+ - '.'
+ - - 'client'
+ - {get_param: CephClientUserName}
ceph_client_user_name: {get_param: CephClientUserName}
ceph_pools:
- {get_param: CinderRbdPoolName}
- {get_param: NovaRbdPoolName}
- {get_param: GlanceRbdPoolName}
+ - {get_param: GnocchiRbdPoolName}
outputs:
config_id:
diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml
index f9c53465..e90710c7 100644
--- a/puppet/ceph-storage-post.yaml
+++ b/puppet/ceph-storage-post.yaml
@@ -14,8 +14,19 @@ parameters:
type: json
description: Value which changes if the node configuration may need to be re-applied
-
resources:
+
+ CephStorageArtifactsConfig:
+ type: deploy-artifacts.yaml
+
+ CephStorageArtifactsDeploy:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: CephStorageArtifactsConfig}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
CephStoragePuppetConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -29,6 +40,7 @@ resources:
CephStorageDeployment_Step1:
type: OS::Heat::StructuredDeployments
+ depends_on: CephStorageArtifactsDeploy
properties:
name: CephStorageDeployment_Step1
servers: {get_param: servers}
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index e310e1f5..f26d07f7 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -47,6 +47,10 @@ parameters:
Hostname:
type: string
default: '' # Defaults to Heat created hostname
+ HostnameMap:
+ type: json
+ default: {}
+ description: Optional mapping to override hostnames
ExtraConfig:
default: {}
description: |
@@ -58,6 +62,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ CephStorageIPs:
+ default: {}
+ type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -86,6 +93,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
CephStorage:
@@ -99,7 +109,10 @@ resources:
- network: ctlplane
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
- name: {get_param: Hostname}
+ name:
+ str_replace:
+ template: {get_param: Hostname}
+ params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
metadata: {get_param: ServerMetadata}
scheduler_hints: {get_param: SchedulerHints}
@@ -128,31 +141,43 @@ resources:
type: OS::TripleO::CephStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::CephStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::CephStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::CephStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::CephStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::CephStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: CephStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::CephStorage::Net::SoftwareConfig
@@ -170,11 +195,17 @@ resources:
properties:
ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
ManagementIp: {get_attr: [ManagementPort, ip_address]}
+ ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
NetIpSubnetMap:
type: OS::TripleO::Network::Ports::NetIpSubnetMap
@@ -280,11 +311,54 @@ outputs:
hosts_entry:
value:
str_replace:
- template: "IP HOST.DOMAIN HOST"
+ template: |
+ PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST
+ EXTERNALIP EXTERNALHOST.DOMAIN EXTERNALHOST
+ INTERNAL_APIIP INTERNAL_APIHOST.DOMAIN INTERNAL_APIHOST
+ STORAGEIP STORAGEHOST.DOMAIN STORAGEHOST
+ STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
+ TENANTIP TENANTHOST.DOMAIN TENANTHOST
+ MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
params:
- IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]}
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
- HOST: {get_attr: [CephStorage, name]}
+ PRIMARYHOST: {get_attr: [CephStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - external
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - internalapi
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storage
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storagemgmt
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - tenant
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - management
nova_server_resource:
description: Heat resource handle for the ceph storage server
value:
diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml
index 9b7c752a..f470203f 100644
--- a/puppet/cinder-storage-post.yaml
+++ b/puppet/cinder-storage-post.yaml
@@ -14,8 +14,20 @@ parameters:
resources:
+ VolumeArtifactsConfig:
+ type: deploy-artifacts.yaml
+
+ VolumeArtifactsDeploy:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: VolumeArtifactsConfig}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
VolumePuppetConfig:
type: OS::Heat::SoftwareConfig
+ depends_on: VolumeArtifactsDeploy
properties:
group: puppet
options:
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index f7e8f907..5b61e0b6 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -9,11 +9,11 @@ parameters:
description: Whether to enable or not the Iscsi backend for Cinder
type: boolean
CinderISCSIHelper:
- default: tgtadm
+ default: lioadm
description: The iSCSI helper to use with cinder.
type: string
CinderLVMLoopDeviceSize:
- default: 5000
+ default: 10280
description: The size of the loopback file used by the cinder LVM driver.
type: number
CinderPassword:
@@ -38,6 +38,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ BlockStorageIPs:
+ default: {}
+ type: json
Flavor:
description: Flavor for block storage nodes to request when deploying.
type: string
@@ -48,7 +51,6 @@ parameters:
description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
RabbitPassword:
- default: 'guest'
type: string
hidden: true
RabbitUserName:
@@ -89,6 +91,10 @@ parameters:
Hostname:
type: string
default: '' # Defaults to Heat created hostname
+ HostnameMap:
+ type: json
+ default: {}
+ description: Optional mapping to override hostnames
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -106,7 +112,7 @@ parameters:
GlanceApiVirtualIP:
type: string
default: ''
- MysqlVirtualIP:
+ MysqlVirtualIPUri:
type: string
default: ''
NetworkDeploymentActions:
@@ -137,6 +143,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
@@ -151,7 +160,10 @@ resources:
- network: ctlplane
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
- name: {get_param: Hostname}
+ name:
+ str_replace:
+ template: {get_param: Hostname}
+ params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
metadata: {get_param: ServerMetadata}
scheduler_hints: {get_param: SchedulerHints}
@@ -180,31 +192,43 @@ resources:
type: OS::TripleO::BlockStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::BlockStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::BlockStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::BlockStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::BlockStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::BlockStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: BlockStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::BlockStorage::Net::SoftwareConfig
@@ -222,11 +246,17 @@ resources:
properties:
ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]}
ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
ManagementIp: {get_attr: [ManagementPort, ip_address]}
+ ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -245,7 +275,7 @@ resources:
config: {get_resource: BlockStorageConfig}
input_values:
debug: {get_param: Debug}
- cinder_dsn: {list_join: ['', ['mysql+pymysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIP} , '/cinder']]}
+ cinder_dsn: {list_join: ['', ['mysql+pymysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIPUri} , '/cinder']]}
snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
cinder_lvm_loop_device_size:
@@ -255,7 +285,11 @@ resources:
size: {get_param: CinderLVMLoopDeviceSize}
cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
cinder_iscsi_helper: {get_param: CinderISCSIHelper}
- cinder_iscsi_ip_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]}
+ cinder_iscsi_ip_address:
+ str_replace:
+ template: "'IP'"
+ params:
+ IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]}
glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
rabbit_username: {get_param: RabbitUserName}
rabbit_password: {get_param: RabbitPassword}
@@ -343,11 +377,54 @@ outputs:
hosts_entry:
value:
str_replace:
- template: "IP HOST.DOMAIN HOST"
+ template: |
+ PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST
+ EXTERNALIP EXTERNALHOST.DOMAIN EXTERNALHOST
+ INTERNAL_APIIP INTERNAL_APIHOST.DOMAIN INTERNAL_APIHOST
+ STORAGEIP STORAGEHOST.DOMAIN STORAGEHOST
+ STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
+ TENANTIP TENANTHOST.DOMAIN TENANTHOST
+ MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
params:
- IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]}
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
- HOST: {get_attr: [BlockStorage, name]}
+ PRIMARYHOST: {get_attr: [BlockStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - external
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - internalapi
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storage
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storagemgmt
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - tenant
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - management
nova_server_resource:
description: Heat resource handle for the block storage server
value:
diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml
index 3861e50c..a122df0e 100644
--- a/puppet/compute-post.yaml
+++ b/puppet/compute-post.yaml
@@ -17,6 +17,17 @@ parameters:
resources:
+ ComputeArtifactsConfig:
+ type: deploy-artifacts.yaml
+
+ ComputeArtifactsDeploy:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: ComputeArtifactsConfig}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
ComputePuppetConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -30,6 +41,7 @@ resources:
ComputePuppetDeployment:
type: OS::Heat::StructuredDeployments
+ depends_on: ComputeArtifactsDeploy
properties:
name: ComputePuppetDeployment
servers: {get_param: servers}
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index 8b2bcd33..6759d3b9 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -118,6 +118,15 @@ parameters:
default: nic1
description: A port to add to the NeutronPhysicalBridge.
type: string
+ NeutronTenantMtu:
+ description: >
+ The default MTU for tenant networks. For VXLAN/GRE tunneling, this should
+ be at least 50 bytes smaller than the MTU on the physical network. This
+ value will be used to set the MTU on the virtual Ethernet device.
+ This number is related to the value of NeutronDnsmasqOptions, since that
+ will determine the MTU that is assigned to the VM host through DHCP.
+ default: 1400
+ type: number
NeutronTunnelTypes:
type: comma_delimited_list
description: |
@@ -186,6 +195,9 @@ parameters:
default: 'dvr_snat'
description: Agent mode for the neutron-l3-agent on the controller hosts
type: string
+ NodeIndex:
+ type: number
+ default: 0
NovaApiHost:
type: string
default: '' # Has to be here because of the ignored empty value bug
@@ -198,6 +210,9 @@ parameters:
NovaCompute specific configuration to inject into the cluster. Same
structure as ExtraConfig.
type: json
+ NovaComputeIPs:
+ default: {}
+ type: json
NovaComputeLibvirtType:
type: string
default: kvm
@@ -209,6 +224,10 @@ parameters:
default: false
description: Whether to enable or not the Rbd backend for Nova
type: boolean
+ NovaIPv6:
+ default: false
+ description: Enable IPv6 features in Nova
+ type: boolean
NovaPassword:
description: The password for the nova service account, used by nova-api.
type: string
@@ -232,7 +251,6 @@ parameters:
type: string
default: '' # Has to be here because of the ignored empty value bug
RabbitPassword:
- default: guest
description: The password for RabbitMQ
type: string
hidden: true
@@ -258,6 +276,10 @@ parameters:
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
+ UpgradeLevelNovaCompute:
+ type: string
+ description: Nova Compute upgrade level
+ default: ''
EnablePackageInstall:
default: 'false'
description: Set to true to enable package installation via Puppet
@@ -285,6 +307,10 @@ parameters:
Hostname:
type: string
default: '' # Defaults to Heat created hostname
+ HostnameMap:
+ type: json
+ default: {}
+ description: Optional mapping to override hostnames
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -329,7 +355,10 @@ resources:
- network: ctlplane
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
- name: {get_param: Hostname}
+ name:
+ str_replace:
+ template: {get_param: Hostname}
+ params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
metadata: {get_param: ServerMetadata}
scheduler_hints: {get_param: SchedulerHints}
@@ -358,42 +387,60 @@ resources:
type: OS::TripleO::Compute::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::Compute::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::Compute::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::Compute::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::Compute::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::Compute::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ IPPool: {get_param: NovaComputeIPs}
+ NodeIndex: {get_param: NodeIndex}
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
properties:
ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]}
ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
ManagementIp: {get_attr: [ManagementPort, ip_address]}
+ ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
NetworkConfig:
type: OS::TripleO::Compute::Net::SoftwareConfig
@@ -434,9 +481,11 @@ resources:
- all_nodes # provided by allNodesConfig
- '"%{::osfamily}"'
- common
+ - neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre
- cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre
- nova_nuage_data # Optionally provided by ComputeExtraConfigPre
- midonet_data # Optionally provided by AllNodesExtraConfig
+ - neutron_opencontrail_data # Optionally provided by ComputeExtraConfigPre
datafiles:
compute_extraconfig:
mapped_data: {get_param: NovaComputeExtraConfig}
@@ -450,11 +499,13 @@ resources:
raw_data: {get_file: hieradata/compute.yaml}
mapped_data:
cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend}
+ nova::use_ipv6: {get_input: nova_ipv6}
nova::debug: {get_input: debug}
nova::rabbit_userid: {get_input: rabbit_username}
nova::rabbit_password: {get_input: rabbit_password}
nova::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
nova::rabbit_port: {get_input: rabbit_client_port}
+ nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute}
nova_compute_driver: {get_input: nova_compute_driver}
nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type}
nova::compute::neutron::libvirt_vif_driver: {get_input: nova_compute_libvirt_vif_driver}
@@ -463,7 +514,11 @@ resources:
nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend}
rbd_persistent_storage: {get_input: cinder_enable_rbd_backend}
nova_password: {get_input: nova_password}
+ nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu}
nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address}
+ nova::vncproxy::common::vncproxy_protocol: {get_input: nova_vncproxy_protocol}
+ nova::vncproxy::common::vncproxy_host: {get_input: nova_vncproxy_host}
+ nova::vncproxy::common::vncproxy_port: {get_input: nova_vncproxy_port}
nova::network::neutron::neutron_ovs_bridge: {get_input: nova_ovs_bridge}
nova::network::neutron::security_group_api: {get_input: nova_security_group_api}
ceilometer::debug: {get_input: debug}
@@ -487,6 +542,7 @@ resources:
neutron_host: {get_input: neutron_host}
neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
+ neutron::network_device_mtu: {get_input: neutron_tenant_mtu}
neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types}
neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types}
neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions}
@@ -498,9 +554,9 @@ resources:
neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
neutron_physical_bridge: {get_input: neutron_physical_bridge}
neutron_public_interface: {get_input: neutron_public_interface}
- nova::network::neutron::neutron_admin_password: {get_input: neutron_password}
+ nova::network::neutron::neutron_password: {get_input: neutron_password}
nova::network::neutron::neutron_url: {get_input: neutron_internal_url}
- nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url}
+ nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url}
neutron_router_distributed: {get_input: neutron_router_distributed}
neutron_agent_mode: {get_input: neutron_agent_mode}
neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
@@ -533,10 +589,22 @@ resources:
nova_api_host: {get_param: NovaApiHost}
nova_password: {get_param: NovaPassword}
nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend}
+ nova_ipv6: {get_param: NovaIPv6}
cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]}
+ nova_vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
+ # Remove brackets that may come if the IP address is IPv6.
+ # For DNS names and IPv4, this will just get the NovaVNCProxyPublic value
+ nova_vncproxy_host:
+ str_replace:
+ template: {get_param: [EndpointMap, NovaVNCProxyPublic, host]}
+ params:
+ '[': ''
+ ']': ''
+ nova_vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
nova_ovs_bridge: {get_param: NovaOVSBridge}
nova_security_group_api: {get_param: NovaSecurityGroupAPI}
+ upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute}
ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
ceilometer_password: {get_param: CeilometerPassword}
ceilometer_compute_agent: {get_param: CeilometerComputeAgent}
@@ -581,6 +649,7 @@ resources:
template: MAPPINGS
params:
MAPPINGS: {get_param: NeutronBridgeMappings}
+ neutron_tenant_mtu: {get_param: NeutronTenantMtu}
neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
neutron_physical_bridge: {get_param: NeutronPhysicalBridge}
@@ -612,7 +681,7 @@ resources:
AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions}
neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]}
- neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]}
+ neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]}
keystone_vip: {get_param: KeystonePublicApiVirtualIP}
admin_password: {get_param: AdminPassword}
rabbit_username: {get_param: RabbitUserName}
@@ -689,11 +758,54 @@ outputs:
Server's IP address and hostname in the /etc/hosts format
value:
str_replace:
- template: "IP HOST.DOMAIN HOST"
+ template: |
+ PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST
+ EXTERNALIP EXTERNALHOST.DOMAIN EXTERNALHOST
+ INTERNAL_APIIP INTERNAL_APIHOST.DOMAIN INTERNAL_APIHOST
+ STORAGEIP STORAGEHOST.DOMAIN STORAGEHOST
+ STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
+ TENANTIP TENANTHOST.DOMAIN TENANTHOST
+ MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
params:
- IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]}
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
- HOST: {get_attr: [NovaCompute, name]}
+ PRIMARYHOST: {get_attr: [NovaCompute, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - external
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - internalapi
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storage
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storagemgmt
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - tenant
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - management
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
diff --git a/puppet/controller-config-pacemaker.yaml b/puppet/controller-config-pacemaker.yaml
index dc81498a..dfebcf82 100644
--- a/puppet/controller-config-pacemaker.yaml
+++ b/puppet/controller-config-pacemaker.yaml
@@ -8,6 +8,10 @@ parameters:
default: false
description: Whether to run config management (e.g. Puppet) in debug mode.
type: boolean
+ StepConfig:
+ type: string
+ description: Config manifests that will be used to step through the deployment.
+ default: ''
resources:
@@ -22,7 +26,11 @@ resources:
outputs:
- name: result
config:
- get_file: manifests/overcloud_controller_pacemaker.pp
+ list_join:
+ - ''
+ - - get_file: manifests/overcloud_controller_pacemaker.pp
+ - get_file: manifests/ringbuilder.pp
+ - {get_param: StepConfig}
outputs:
OS::stack_id:
diff --git a/puppet/controller-config.yaml b/puppet/controller-config.yaml
index f85e1a9e..458aff32 100644
--- a/puppet/controller-config.yaml
+++ b/puppet/controller-config.yaml
@@ -8,6 +8,10 @@ parameters:
default: false
description: Whether to run config management (e.g. Puppet) in debug mode.
type: boolean
+ StepConfig:
+ type: string
+ description: Config manifests that will be used to step through the deployment.
+ default: ''
resources:
@@ -22,7 +26,11 @@ resources:
outputs:
- name: result
config:
- get_file: manifests/overcloud_controller.pp
+ list_join:
+ - ''
+ - - get_file: manifests/overcloud_controller.pp
+ - get_file: manifests/ringbuilder.pp
+ - {get_param: StepConfig}
outputs:
OS::stack_id:
diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml
index d250dd70..80b08a06 100644
--- a/puppet/controller-post.yaml
+++ b/puppet/controller-post.yaml
@@ -13,10 +13,22 @@ parameters:
NodeConfigIdentifiers:
type: json
description: Value which changes if the node configuration may need to be re-applied
-
+ StepConfig:
+ type: string
+ description: Config manifests that will be used to step through the deployment.
+ default: ''
resources:
+ ControllerArtifactsConfig:
+ type: deploy-artifacts.yaml
+
+ ControllerArtifactsDeploy:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: ControllerArtifactsConfig}
+
ControllerPrePuppet:
type: OS::TripleO::Tasks::ControllerPrePuppet
properties:
@@ -26,6 +38,8 @@ resources:
ControllerPuppetConfig:
type: OS::TripleO::ControllerConfig
+ properties:
+ StepConfig: {get_param: StepConfig}
# Step through a series of Puppet runs using the same manifest.
# NOTE: To enable stepping through the deployments via heat hooks,
@@ -33,7 +47,7 @@ resources:
# e.g all Deployment resources should have a *Deployment_StepN suffix
ControllerLoadBalancerDeployment_Step1:
type: OS::Heat::StructuredDeployments
- depends_on: ControllerPrePuppet
+ depends_on: [ControllerPrePuppet, ControllerArtifactsDeploy]
properties:
name: ControllerLoadBalancerDeployment_Step1
servers: {get_param: servers}
@@ -55,39 +69,26 @@ resources:
update_identifier: {get_param: NodeConfigIdentifiers}
actions: ['CREATE'] # no need for two passes on an UPDATE
- ControllerRingbuilderPuppetConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- options:
- enable_debug: {get_param: ConfigDebug}
- enable_hiera: True
- enable_facter: False
- inputs:
- outputs:
- - name: result
- config:
- get_file: manifests/ringbuilder.pp
-
- ControllerRingbuilderDeployment_Step3:
+ ControllerOvercloudServicesDeployment_Step3:
type: OS::Heat::StructuredDeployments
depends_on: ControllerServicesBaseDeployment_Step2
properties:
- name: ControllerRingbuilderDeployment_Step3
+ name: ControllerOvercloudServicesDeployment_Step3
servers: {get_param: servers}
- config: {get_resource: ControllerRingbuilderPuppetConfig}
+ config: {get_resource: ControllerPuppetConfig}
input_values:
+ step: 3
update_identifier: {get_param: NodeConfigIdentifiers}
ControllerOvercloudServicesDeployment_Step4:
type: OS::Heat::StructuredDeployments
- depends_on: ControllerRingbuilderDeployment_Step3
+ depends_on: ControllerOvercloudServicesDeployment_Step3
properties:
name: ControllerOvercloudServicesDeployment_Step4
servers: {get_param: servers}
config: {get_resource: ControllerPuppetConfig}
input_values:
- step: 3
+ step: 4
update_identifier: {get_param: NodeConfigIdentifiers}
ControllerOvercloudServicesDeployment_Step5:
@@ -98,7 +99,7 @@ resources:
servers: {get_param: servers}
config: {get_resource: ControllerPuppetConfig}
input_values:
- step: 4
+ step: 5
update_identifier: {get_param: NodeConfigIdentifiers}
ControllerOvercloudServicesDeployment_Step6:
@@ -109,12 +110,23 @@ resources:
servers: {get_param: servers}
config: {get_resource: ControllerPuppetConfig}
input_values:
- step: 5
+ step: 6
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
+ ControllerOvercloudServicesDeployment_Step7:
+ type: OS::Heat::StructuredDeployments
+ depends_on: ControllerOvercloudServicesDeployment_Step6
+ properties:
+ name: ControllerOvercloudServicesDeployment_Step7
+ servers: {get_param: servers}
+ config: {get_resource: ControllerPuppetConfig}
+ input_values:
+ step: 7
update_identifier: {get_param: NodeConfigIdentifiers}
ControllerPostPuppet:
type: OS::TripleO::Tasks::ControllerPostPuppet
- depends_on: ControllerOvercloudServicesDeployment_Step6
+ depends_on: ControllerOvercloudServicesDeployment_Step7
properties:
servers: {get_param: servers}
input_values:
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index ea19c711..8ad0ef28 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -4,17 +4,15 @@ description: >
OpenStack controller node configured by Puppet.
parameters:
- AdminEmail:
- default: 'admin@example.com'
- description: The email for the keystone admin account.
- type: string
- hidden: true
AdminPassword:
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
- AdminToken:
- description: The keystone auth secret and db password.
+ AodhApiVirtualIP:
+ type: string
+ default: ''
+ AodhPassword:
+ description: The password for the aodh services.
type: string
hidden: true
CeilometerApiVirtualIP:
@@ -32,6 +30,16 @@ parameters:
description: The password for the ceilometer service and db account.
type: string
hidden: true
+ CeilometerStoreEvents:
+ default: false
+ description: Whether to store events in ceilometer.
+ type: boolean
+ CeilometerMeterDispatcher:
+ default: 'database'
+ description: Dispatcher to process meter data
+ type: string
+ constraints:
+ - allowed_values: ['gnocchi', 'database']
CinderApiVirtualIP:
type: string
default: ''
@@ -57,11 +65,11 @@ parameters:
description: Whether to enable or not the Rbd backend for Cinder
type: boolean
CinderISCSIHelper:
- default: tgtadm
+ default: lioadm
description: The iSCSI helper to use with cinder.
type: string
CinderLVMLoopDeviceSize:
- default: 5000
+ default: 10280
description: The size of the loopback file used by the cinder LVM driver.
type: number
CinderNfsMountOptions:
@@ -107,6 +115,10 @@ parameters:
default: 'br-ex'
description: Interface where virtual ip will be assigned.
type: string
+ CorosyncIPv6:
+ default: false
+ description: Enable IPv6 in Corosync
+ type: boolean
Debug:
default: ''
description: Set to True to enable debugging on all services.
@@ -217,6 +229,31 @@ parameters:
Mount options for Pacemaker mount used as Glance storage.
Effective when GlanceFilePcmkManage is true.
type: string
+ GnocchiBackend:
+ default: file
+ description: The short name of the Gnocchi backend to use. Should be one
+ of swift, rbd, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
+ GnocchiIndexerBackend:
+ default: 'mysql'
+ description: The short name of the Gnocchi indexer backend to use.
+ type: string
+ GnocchiApiVirtualIP:
+ type: string
+ default: ''
+ GnocchiPassword:
+ description: The password for the gnocchi service and db account.
+ type: string
+ hidden: true
+ HAProxyStatsPassword:
+ description: Password for HAProxy stats endpoint
+ type: string
+ HAProxyStatsUser:
+ description: User for HAProxy stats endpoint
+ default: admin
+ type: string
HAProxySyslogAddress:
default: /dev/log
description: Syslog address where HAproxy will send its log
@@ -230,7 +267,7 @@ parameters:
type: string
hidden: true
HeatStackDomainAdminPassword:
- description: Password for heat_domain_admin user.
+ description: Password for heat_stack_domain_admin user.
type: string
hidden: true
HeatAuthEncryptionKey:
@@ -245,6 +282,11 @@ parameters:
default: 0
description: Number of workers for Heat service.
type: number
+ HeatEnableDBPurge:
+ type: boolean
+ default: true
+ description: |
+ Whether to create cron job for purging soft deleted rows in the Heat database.
HorizonSecret:
description: Secret key for Django
type: string
@@ -268,43 +310,6 @@ parameters:
type: string
constraints:
- custom_constraint: nova.keypair
- KeystoneCACertificate:
- default: ''
- description: Keystone self-signed certificate authority certificate.
- type: string
- KeystoneEnableDBPurge:
- default: true
- description: |
- Whether to create cron job for purging soft deleted rows in Keystone database.
- type: boolean
- KeystoneSigningCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSigningKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- KeystoneSSLCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSSLCertificateKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- KeystoneNotificationDriver:
- description: Comma-separated list of Oslo notification drivers used by Keystone
- default: ['messaging']
- type: comma_delimited_list
- KeystoneNotificationFormat:
- description: The Keystone notification format
- default: 'basic'
- type: string
- constraints:
- - allowed_values: [ 'basic', 'cadf' ]
KeystoneRegion:
type: string
default: 'regionOne'
@@ -313,14 +318,14 @@ parameters:
default: false
description: Whether to manage IPtables rules.
type: boolean
+ MemcachedIPv6:
+ default: false
+ description: Enable IPv6 features in Memcached.
+ type: boolean
PurgeFirewallRules:
default: false
description: Whether IPtables rules should be purged before setting up the new ones.
type: boolean
- KeystoneWorkers:
- default: 0
- description: Number of workers for Keystone service.
- type: number
SaharaApiVirtualIP:
type: string
default: ''
@@ -492,6 +497,15 @@ parameters:
default: ''
description: If set, the public interface is a vlan with this device as the raw device.
type: string
+ NeutronTenantMtu:
+ description: >
+ The default MTU for tenant networks. For VXLAN/GRE tunneling, this should
+ be at least 50 bytes smaller than the MTU on the physical network. This
+ value will be used to set the MTU on the virtual Ethernet device.
+ This number is related to the value of NeutronDnsmasqOptions, since that
+ will determine the MTU that is assigned to the VM host through DHCP.
+ default: 1400
+ type: number
NeutronTunnelTypes:
default: 'vxlan'
description: |
@@ -510,7 +524,7 @@ parameters:
default: ["1:4094", ]
type: comma_delimited_list
NeutronPluginExtensions:
- default: "qos"
+ default: "qos,port_security"
description: |
Comma-separated list of extensions enabled for the Neutron plugin.
type: comma_delimited_list
@@ -531,6 +545,10 @@ parameters:
description: |
Whether to create cron job for purging soft deleted rows in Nova database.
type: boolean
+ NovaIPv6:
+ default: false
+ description: Enable IPv6 features in Nova
+ type: boolean
NovaPassword:
description: The password for the nova service and db account, used by nova-api.
type: string
@@ -543,6 +561,10 @@ parameters:
default: false
description: Should MongoDb journaling be disabled
type: boolean
+ MongoDbIPv6:
+ default: false
+ description: Enable IPv6 if Mongo DB VIP is IPv6
+ type: boolean
NtpServer:
default: ''
description: Comma-separated list of ntp servers
@@ -565,7 +587,6 @@ parameters:
default: '' # Has to be here because of the ignored empty value bug
hidden: true
RabbitPassword:
- default: guest
description: The password for RabbitMQ
type: string
hidden: true
@@ -587,9 +608,21 @@ parameters:
default: 16384
description: Configures RabbitMQ FD limit
type: string
+ RabbitIPv6:
+ default: false
+ description: Enable IPv6 in RabbitMQ
+ type: boolean
+ RedisPassword:
+ type: string
+ description: The password to access the Redis service
+ hidden: true
RedisVirtualIP:
type: string
default: '' # Has to be here because of the ignored empty value bug
+ RedisVirtualIPUri:
+ type: string
+ default: '' # Has to be here because of the ignored empty value bug
+ description: An IP address which is wrapped in brackets in case of IPv6
SnmpdReadonlyUserName:
default: ro_snmp_user
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
@@ -615,6 +648,10 @@ parameters:
default: 10
description: Partition Power to use when building Swift rings
type: number
+ SwiftRingBuild:
+ default: true
+ description: Whether to manage Swift rings or not
+ type: boolean
SwiftPassword:
description: The password for the swift service account, used by the swift proxy
services.
@@ -635,12 +672,19 @@ parameters:
default: 'UTC'
description: The timezone to be set on controller nodes.
type: string
+ UpgradeLevelNovaCompute:
+ type: string
+ description: Nova Compute upgrade level
+ default: ''
VirtualIP: # DEPRECATED: use per service settings instead
type: string
default: '' # Has to be here because of the ignored empty value bug
HeatApiVirtualIP:
type: string
default: ''
+ HeatApiVirtualIPUri:
+ type: string
+ default: ''
GlanceApiVirtualIP:
type: string
default: ''
@@ -650,10 +694,7 @@ parameters:
MysqlVirtualIP:
type: string
default: ''
- KeystoneAdminApiVirtualIP:
- type: string
- default: ''
- KeystonePublicApiVirtualIP:
+ MysqlVirtualIPUri:
type: string
default: ''
NeutronApiVirtualIP:
@@ -682,6 +723,10 @@ parameters:
Hostname:
type: string
default: '' # Defaults to Heat created hostname
+ HostnameMap:
+ type: json
+ default: {}
+ description: Optional mapping to override hostnames
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -713,6 +758,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ ServiceConfigSettings:
+ type: json
+ default: {}
resources:
@@ -727,7 +775,10 @@ resources:
- network: ctlplane
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
- name: {get_param: Hostname}
+ name:
+ str_replace:
+ template: {get_param: Hostname}
+ params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
metadata: {get_param: ServerMetadata}
scheduler_hints: {get_param: SchedulerHints}
@@ -790,6 +841,8 @@ resources:
ManagementPort:
type: OS::TripleO::Controller::Ports::ManagementPort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
NetIpMap:
@@ -797,11 +850,17 @@ resources:
properties:
ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]}
ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
ManagementIp: {get_attr: [ManagementPort, ip_address]}
+ ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
NetIpSubnetMap:
type: OS::TripleO::Network::Ports::NetIpSubnetMap
@@ -865,7 +924,6 @@ resources:
cinder_workers: {get_param: CinderWorkers}
glance_workers: {get_param: GlanceWorkers}
heat_workers: {get_param: HeatWorkers}
- keystone_workers: {get_param: KeystoneWorkers}
nova_workers: {get_param: NovaWorkers}
neutron_workers: {get_param: NeutronWorkers}
swift_workers: {get_param: SwiftWorkers}
@@ -873,30 +931,31 @@ resources:
neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
haproxy_log_address: {get_param: HAProxySyslogAddress}
+ haproxy_stats_password: {get_param: HAProxyStatsPassword}
+ haproxy_stats_user: {get_param: HAProxyStatsUser}
heat.watch_server_url:
list_join:
- ''
- - 'http://'
- - {get_param: HeatApiVirtualIP}
+ - {get_param: HeatApiVirtualIPUri}
- ':8003'
heat.metadata_server_url:
list_join:
- ''
- - 'http://'
- - {get_param: HeatApiVirtualIP}
+ - {get_param: HeatApiVirtualIPUri}
- ':8000'
heat.waitcondition_server_url:
list_join:
- ''
- - 'http://'
- - {get_param: HeatApiVirtualIP}
+ - {get_param: HeatApiVirtualIPUri}
- ':8000/v1/waitcondition'
heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey}
+ heat_enable_db_purge: {get_param: HeatEnableDBPurge}
horizon_allowed_hosts: {get_param: HorizonAllowedHosts}
horizon_secret: {get_param: HorizonSecret}
- admin_email: {get_param: AdminEmail}
admin_password: {get_param: AdminPassword}
- admin_token: {get_param: AdminToken}
neutron_public_interface_ip: {get_param: NeutronPublicInterfaceIP}
debug: {get_param: Debug}
cinder_enable_db_purge: {get_param: CinderEnableDBPurge}
@@ -919,7 +978,7 @@ resources:
- - 'mysql+pymysql://cinder:'
- {get_param: CinderPassword}
- '@'
- - {get_param: MysqlVirtualIP}
+ - {get_param: MysqlVirtualIPUri}
- '/cinder'
glance_port: {get_param: [EndpointMap, GlanceInternal, port]}
glance_password: {get_param: GlancePassword}
@@ -936,7 +995,7 @@ resources:
- - 'mysql+pymysql://glance:'
- {get_param: GlancePassword}
- '@'
- - {get_param: MysqlVirtualIP}
+ - {get_param: MysqlVirtualIPUri}
- '/glance'
heat_password: {get_param: HeatPassword}
heat_stack_domain_admin_password: {get_param: HeatStackDomainAdminPassword}
@@ -946,28 +1005,10 @@ resources:
- - 'mysql+pymysql://heat:'
- {get_param: HeatPassword}
- '@'
- - {get_param: MysqlVirtualIP}
+ - {get_param: MysqlVirtualIPUri}
- '/heat'
- keystone_ca_certificate: {get_param: KeystoneCACertificate}
- keystone_signing_key: {get_param: KeystoneSigningKey}
- keystone_signing_certificate: {get_param: KeystoneSigningCertificate}
- keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
- keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
- keystone_notification_driver: {get_param: KeystoneNotificationDriver}
- keystone_notification_format: {get_param: KeystoneNotificationFormat}
- keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
- keystone_dsn:
- list_join:
- - ''
- - - 'mysql+pymysql://keystone:'
- - {get_param: AdminToken}
- - '@'
- - {get_param: MysqlVirtualIP}
- - '/keystone'
keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- keystone_public_url: { get_param: [EndpointMap, KeystonePublic, uri_no_suffix] }
- keystone_internal_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
keystone_ec2_uri: { get_param: [EndpointMap, KeystoneEC2, uri] }
enable_fencing: {get_param: EnableFencing}
enable_galera: {get_param: EnableGalera}
@@ -1061,6 +1102,7 @@ resources:
params:
AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions}
neutron_password: {get_param: NeutronPassword}
+ neutron_tenant_mtu: {get_param: NeutronTenantMtu}
neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions}
neutron_dsn:
list_join:
@@ -1068,33 +1110,53 @@ resources:
- - 'mysql+pymysql://neutron:'
- {get_param: NeutronPassword}
- '@'
- - {get_param: MysqlVirtualIP}
+ - {get_param: MysqlVirtualIPUri}
- '/ovs_neutron?charset=utf8'
neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] }
neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] }
neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] }
- neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri_no_suffix ] }
+ neutron_auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] }
nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] }
ceilometer_backend: {get_param: CeilometerBackend}
ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
ceilometer_password: {get_param: CeilometerPassword}
+ ceilometer_store_events: {get_param: CeilometerStoreEvents}
+ aodh_password: {get_param: AodhPassword}
+ ceilometer_meter_dispatcher: {get_param: CeilometerMeterDispatcher}
+ gnocchi_password: {get_param: GnocchiPassword}
+ gnocchi_backend: {get_param: GnocchiBackend}
+ gnocchi_indexer_backend: {get_param: GnocchiIndexerBackend}
ceilometer_coordination_url:
list_join:
- ''
- - - 'redis://'
- - {get_param: RedisVirtualIP}
- - ':6379'
+ - - 'redis://:'
+ - {get_param: RedisPassword}
+ - '@'
+ - {get_param: RedisVirtualIPUri}
+ - ':6379/'
ceilometer_dsn:
list_join:
- ''
- - 'mysql+pymysql://ceilometer:'
- {get_param: CeilometerPassword}
- '@'
- - {get_param: MysqlVirtualIP}
+ - {get_param: MysqlVirtualIPUri}
- '/ceilometer'
+ gnocchi_dsn:
+ list_join:
+ - ''
+ - - 'mysql+pymysql://gnocchi:'
+ - {get_param: GnocchiPassword}
+ - '@'
+ - {get_param: MysqlVirtualIPUri}
+ - '/gnocchi'
+ gnocchi_internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]}
snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
+ nova_ipv6: {get_param: NovaIPv6}
+ corosync_ipv6: {get_param: CorosyncIPv6}
+ memcached_ipv6: {get_param: MemcachedIPv6}
nova_password: {get_param: NovaPassword}
nova_dsn:
list_join:
@@ -1102,8 +1164,17 @@ resources:
- - 'mysql+pymysql://nova:'
- {get_param: NovaPassword}
- '@'
- - {get_param: MysqlVirtualIP}
+ - {get_param: MysqlVirtualIPUri}
- '/nova'
+ nova_api_dsn:
+ list_join:
+ - ''
+ - - 'mysql+pymysql://nova_api:'
+ - {get_param: NovaPassword}
+ - '@'
+ - {get_param: MysqlVirtualIPUri}
+ - '/nova_api'
+ upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute}
instance_name_template: {get_param: InstanceNameTemplate}
fencing_config: {get_param: FencingConfig}
pcsd_password: {get_param: PcsdPassword}
@@ -1112,15 +1183,10 @@ resources:
rabbit_cookie: {get_param: RabbitCookie}
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
rabbit_client_port: {get_param: RabbitClientPort}
+ rabbit_ipv6: {get_param: RabbitIPv6}
+ rabbit_fd_limit: {get_param: RabbitFDLimit}
mongodb_no_journal: {get_param: MongoDbNoJournal}
- # We need to force this into quotes or hiera will return integer causing
- # the puppet module validation regexp to fail.
- # Remove when: https://github.com/puppetlabs/puppetlabs-rabbitmq/pull/401
- rabbit_fd_limit:
- str_replace:
- template: "'LIMIT'"
- params:
- LIMIT: {get_param: RabbitFDLimit}
+ mongodb_ipv6: {get_param: MongoDbIPv6}
ntp_servers: {get_param: NtpServer}
timezone: {get_param: TimeZone}
control_virtual_interface: {get_param: ControlVirtualInterface}
@@ -1128,6 +1194,7 @@ resources:
swift_hash_suffix: {get_param: SwiftHashSuffix}
swift_password: {get_param: SwiftPassword}
swift_part_power: {get_param: SwiftPartPower}
+ swift_ring_build: {get_param: SwiftRingBuild}
swift_replicas: {get_param: SwiftReplicas}
swift_min_part_hours: {get_param: SwiftMinPartHours}
swift_mount_check: {get_param: SwiftMountCheck}
@@ -1140,11 +1207,15 @@ resources:
- - 'mysql://sahara:'
- {get_param: SaharaPassword}
- '@'
- - {get_param: MysqlVirtualIP}
+ - {get_param: MysqlVirtualIPUri}
- '/sahara'
swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
- cinder_iscsi_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]}
+ cinder_iscsi_network:
+ str_replace:
+ template: "'IP'"
+ params:
+ IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]}
cinder_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
glance_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
glance_registry_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
@@ -1158,11 +1229,14 @@ resources:
neutron_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]}
ceilometer_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ aodh_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
+ gnocchi_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GnocchiApiNetwork]}]}
nova_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
nova_metadata_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
horizon_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
rabbitmq_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
redis_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]}
+ redis_password: {get_param: RedisPassword}
redis_vip: {get_param: RedisVirtualIP}
sahara_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
@@ -1184,6 +1258,7 @@ resources:
- heat_config_%{::deploy_config_name}
- controller_extraconfig
- extraconfig
+ - service_configs
- controller
- database
- object
@@ -1203,7 +1278,11 @@ resources:
- cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
- neutron_nuage_data # Optionally provided by ControllerExtraConfigPre
- midonet_data #Optionally provided by AllNodesExtraConfig
+ - neutron_opencontrail_data # Optionally provided by ControllerExtraConfigPre
+ - neutron_plumgrid_data # Optionally provided by ControllerExtraConfigPre
datafiles:
+ service_configs:
+ mapped_data: {get_param: ServiceConfigSettings}
controller_extraconfig:
mapped_data: {get_param: ControllerExtraConfig}
extraconfig:
@@ -1215,7 +1294,7 @@ resources:
mapped_data:
ceph::profile::params::cluster_network: {get_input: ceph_cluster_network}
ceph::profile::params::public_network: {get_input: ceph_public_network}
- ceph::mon::public_addr: {get_input: ceph_public_ip}
+ ceph::profile::params::public_addr: {get_input: ceph_public_ip}
database:
raw_data: {get_file: hieradata/database.yaml}
object:
@@ -1229,6 +1308,7 @@ resources:
enable_fencing: {get_input: enable_fencing}
enable_load_balancer: {get_input: enable_load_balancer}
hacluster_pwd: {get_input: pcsd_password}
+ corosync_ipv6: {get_input: corosync_ipv6}
tripleo::fencing::config: {get_input: fencing_config}
# Swift
@@ -1239,15 +1319,12 @@ resources:
swift::swift_hash_suffix: {get_input: swift_hash_suffix}
swift::proxy::authtoken::admin_password: {get_input: swift_password}
swift::proxy::workers: {get_input: swift_workers}
+ tripleo::ringbuilder::build_ring: { get_input: swift_ring_build }
tripleo::ringbuilder::part_power: {get_input: swift_part_power}
tripleo::ringbuilder::replicas: {get_input: swift_replicas}
tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours}
swift_mount_check: {get_input: swift_mount_check}
- # NOTE(dprince): build_ring support is currently not wired in.
- # See: https://review.openstack.org/#/c/109225/
- tripleo::ringbuilder::build_ring: True
-
# Cinder
cinder_enable_db_purge: {get_input: cinder_enable_db_purge}
cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend}
@@ -1301,6 +1378,9 @@ resources:
glance_file_pcmk_fstype: {get_input: glance_file_pcmk_fstype}
glance_file_pcmk_manage: {get_input: glance_file_pcmk_manage}
glance_file_pcmk_options: {get_input: glance_file_pcmk_options}
+ glance::notify::rabbitmq::rabbit_userid: {get_input: rabbit_username}
+ glance::notify::rabbitmq::rabbit_password: {get_input: rabbit_password}
+ glance::notify::rabbitmq::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
# Heat
heat_stack_domain_admin_password: {get_input: heat_stack_domain_admin_password}
@@ -1322,41 +1402,21 @@ resources:
heat::api_cloudwatch::workers: {get_input: heat_workers}
heat::api_cfn::bind_host: {get_input: heat_api_network}
heat::api_cfn::workers: {get_input: heat_workers}
+ heat::engine::num_engine_workers: {get_input: heat_workers}
heat::database_connection: {get_input: heat_dsn}
heat::debug: {get_input: debug}
heat::db::mysql::password: {get_input: heat_password}
-
+ heat_enable_db_purge: {get_input: heat_enable_db_purge}
+ heat::keystone::domain::domain_password: {get_input: heat_stack_domain_admin_password}
# Keystone
- keystone::admin_token: {get_input: admin_token}
- keystone_ca_certificate: {get_input: keystone_ca_certificate}
- keystone_signing_key: {get_input: keystone_signing_key}
- keystone_signing_certificate: {get_input: keystone_signing_certificate}
- keystone_ssl_certificate: {get_input: keystone_ssl_certificate}
- keystone_ssl_certificate_key: {get_input: keystone_ssl_certificate_key}
- keystone::database_connection: {get_input: keystone_dsn}
- keystone::public_bind_host: {get_input: keystone_public_api_network}
keystone::admin_bind_host: {get_input: keystone_admin_api_network}
- keystone::debug: {get_input: debug}
- keystone::db::mysql::password: {get_input: admin_token}
- keystone::rabbit_userid: {get_input: rabbit_username}
- keystone::rabbit_password: {get_input: rabbit_password}
- keystone::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
- keystone::rabbit_port: {get_input: rabbit_client_port}
- keystone::notification_driver: {get_input: keystone_notification_driver}
- keystone::notification_format: {get_input: keystone_notification_format}
- keystone::roles::admin::email: {get_input: admin_email}
- keystone::roles::admin::password: {get_input: admin_password}
- keystone::endpoint::public_url: {get_input: keystone_public_url}
- keystone::endpoint::internal_url: {get_input: keystone_internal_url}
- keystone::endpoint::admin_url: {get_input: keystone_identity_uri}
- keystone::endpoint::region: {get_input: keystone_region}
- keystone::admin_workers: {get_input: keystone_workers}
- keystone::public_workers: {get_input: keystone_workers}
- keystone_enable_db_purge: {get_input: keystone_enable_db_purge}
-
+ keystone::public_bind_host: {get_input: keystone_public_api_network}
+ keystone::wsgi::apache::bind_host: {get_input: keystone_public_api_network}
+ keystone::wsgi::apache::admin_bind_host: {get_input: keystone_admin_api_network}
# MongoDB
mongodb::server::bind_ip: {get_input: mongo_db_network}
mongodb::server::nojournal: {get_input: mongodb_no_journal}
+ mongodb::server::ipv6: {get_input: mongodb_ipv6}
# MySQL
admin_password: {get_input: admin_password}
enable_galera: {get_input: enable_galera}
@@ -1381,6 +1441,7 @@ resources:
neutron::server::database_connection: {get_input: neutron_dsn}
neutron::server::api_workers: {get_input: neutron_workers}
neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge}
+ neutron::network_device_mtu: {get_input: neutron_tenant_mtu}
neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling}
neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata}
@@ -1406,7 +1467,7 @@ resources:
neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges}
- neutron::agents::ml2::ovs:bridge_mappings: {get_input: neutron_bridge_mappings}
+ neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings}
neutron_public_interface: {get_input: neutron_public_interface}
neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device}
neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route}
@@ -1426,7 +1487,7 @@ resources:
neutron::keystone::auth::password: {get_input: neutron_password }
neutron::keystone::auth::region: {get_input: keystone_region}
neutron::server::notifications::nova_url: {get_input: nova_internal_url}
- neutron::server::notifications::auth_url: {get_input: neutron_admin_auth_url}
+ neutron::server::notifications::auth_url: {get_input: neutron_auth_url}
neutron::server::notifications::tenant_name: 'service'
neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_input: nova_password}
@@ -1447,16 +1508,58 @@ resources:
ceilometer::agent::auth::auth_password: {get_input: ceilometer_password}
ceilometer::agent::auth::auth_url: {get_input: keystone_auth_uri}
ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url}
+ ceilometer::agent::notification::store_events: {get_input: ceilometer_store_events}
ceilometer::db::mysql::password: {get_input: ceilometer_password}
+ ceilometer::collector::meter_dispatcher: {get_input: ceilometer_meter_dispatcher}
+ ceilometer::dispatcher::gnocchi::url: {get_input: gnocchi_internal_url }
+ ceilometer::dispatcher::gnocchi::filter_project: 'service'
+ ceilometer::dispatcher::gnocchi::archive_policy: 'low'
+ ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml'
snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
+ # Aodh
+ aodh::rabbit_userid: {get_input: rabbit_username}
+ aodh::rabbit_password: {get_input: rabbit_password}
+ aodh::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
+ aodh::rabbit_port: {get_input: rabbit_client_port}
+ aodh::debug: {get_input: debug}
+ aodh::wsgi::apache::ssl: false
+ aodh::wsgi::apache::bind_host: {get_input: aodh_api_network}
+ aodh::api::service_name: 'httpd'
+ aodh::api::host: {get_input: aodh_api_network}
+ aodh::api::keystone_password: {get_input: aodh_password}
+ aodh::api::keystone_auth_uri: {get_input: keystone_auth_uri}
+ aodh::api::keystone_identity_uri: {get_input: keystone_identity_uri}
+ aodh::auth::auth_password: {get_input: aodh_password}
+ aodh::db::mysql::password: {get_input: aodh_password}
+ # for a migration path from ceilometer-alarm to aodh, we use the same database & coordination
+ aodh::evaluator::coordination_url: {get_input: ceilometer_coordination_url}
+
+ # Gnocchi
+ gnocchi_backend: {get_input: gnocchi_backend}
+ gnocchi_indexer_backend: {get_input: gnocchi_indexer_backend}
+ gnocchi_mysql_conn_string: {get_input: gnocchi_dsn}
+ gnocchi::debug: {get_input: debug}
+ gnocchi::wsgi::apache::ssl: false
+ gnocchi::wsgi::apache::bind_host: {get_input: gnocchi_api_network}
+ gnocchi::api::service_name: 'httpd'
+ gnocchi::api::host: {get_input: gnocchi_api_network}
+ gnocchi::api::keystone_password: {get_input: gnocchi_password}
+ gnocchi::api::keystone_auth_uri: {get_input: keystone_auth_uri}
+ gnocchi::api::keystone_identity_uri: {get_input: keystone_identity_uri}
+ gnocchi::db::mysql::password: {get_input: gnocchi_password}
+ gnocchi::storage::swift::swift_authurl: {get_input: keystone_auth_uri}
+ gnocchi::storage::swift::swift_key: {get_input: gnocchi_password}
+
# Nova
nova::rabbit_userid: {get_input: rabbit_username}
nova::rabbit_password: {get_input: rabbit_password}
nova::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
nova::rabbit_port: {get_input: rabbit_client_port}
+ nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute}
nova::debug: {get_input: debug}
+ nova::use_ipv6: {get_input: nova_ipv6}
nova::api::auth_uri: {get_input: keystone_auth_uri}
nova::api::identity_uri: {get_input: keystone_identity_uri}
nova::api::api_bind_address: {get_input: nova_api_network}
@@ -1465,15 +1568,18 @@ resources:
nova::api::osapi_compute_workers: {get_input: nova_workers}
nova::api::ec2_workers: {get_input: nova_workers}
nova::api::metadata_workers: {get_input: nova_workers}
+ nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu}
nova::database_connection: {get_input: nova_dsn}
+ nova::api_database_connection: {get_input: nova_api_dsn}
nova::glance_api_servers: {get_input: glance_api_servers}
nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
nova::api::instance_name_template: {get_input: instance_name_template}
- nova::network::neutron::neutron_admin_password: {get_input: neutron_password}
+ nova::network::neutron::neutron_password: {get_input: neutron_password}
nova::network::neutron::neutron_url: {get_input: neutron_internal_url}
- nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url}
+ nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url}
nova::vncproxy::host: {get_input: nova_api_network}
nova::db::mysql::password: {get_input: nova_password}
+ nova::db::mysql_api::password: {get_input: nova_password}
nova_enable_db_purge: {get_input: nova_enable_db_purge}
# Horizon
@@ -1513,13 +1619,18 @@ resources:
rabbitmq::file_limit: {get_input: rabbit_fd_limit}
rabbitmq::default_user: {get_input: rabbit_username}
rabbitmq::default_pass: {get_input: rabbit_password}
+ rabbit_ipv6: {get_input: rabbit_ipv6}
# Redis
redis::bind: {get_input: redis_network}
+ redis::requirepass: {get_input: redis_password}
+ redis::masterauth: {get_input: redis_password}
+ redis::sentinel_auth_pass: {get_input: redis_password}
redis_vip: {get_input: redis_vip}
# Firewall
tripleo::firewall::manage_firewall: {get_input: manage_firewall}
tripleo::firewall::purge_firewall_rules: {get_input: purge_firewall_rules}
# Misc
+ memcached_ipv6: {get_input: memcached_ipv6}
memcached::listen_ip: {get_input: memcached_network}
neutron_public_interface_ip: {get_input: neutron_public_interface_ip}
ntp::servers: {get_input: ntp_servers}
@@ -1530,6 +1641,9 @@ resources:
tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface}
tripleo::loadbalancer::haproxy_log_address: {get_input: haproxy_log_address}
tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
+ tripleo::loadbalancer::haproxy_stats_user: {get_input: haproxy_stats_user}
+ tripleo::loadbalancer::haproxy_stats_password: {get_input: haproxy_stats_password}
+ tripleo::loadbalancer::redis_password: {get_input: redis_password}
tripleo::packages::enable_install: {get_input: enable_package_install}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
@@ -1598,11 +1712,54 @@ outputs:
Server's IP address and hostname in the /etc/hosts format
value:
str_replace:
- template: IP HOST.DOMAIN HOST
+ template: |
+ PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST
+ EXTERNALIP EXTERNALHOST.DOMAIN EXTERNALHOST
+ INTERNAL_APIIP INTERNAL_APIHOST.DOMAIN INTERNAL_APIHOST
+ STORAGEIP STORAGEHOST.DOMAIN STORAGEHOST
+ STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
+ TENANTIP TENANTHOST.DOMAIN TENANTHOST
+ MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
params:
- IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
- HOST: {get_attr: [Controller, name]}
+ PRIMARYHOST: {get_attr: [Controller, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - external
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - internalapi
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storage
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storagemgmt
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - tenant
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - management
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
@@ -1613,14 +1770,14 @@ outputs:
str_replace:
template: 'r1z1-IP:%PORT%/d1'
params:
- IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
+ IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
swift_proxy_memcache:
description: Swift proxy-memcache value
value:
str_replace:
template: "IP:11211"
params:
- IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
+ IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
config_identifier:
description: identifier which changes if the controller configuration may need re-applying
value:
diff --git a/puppet/deploy-artifacts.sh b/puppet/deploy-artifacts.sh
new file mode 100644
index 00000000..22fde9a7
--- /dev/null
+++ b/puppet/deploy-artifacts.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+TMP_DATA=$(mktemp -d)
+function cleanup {
+ rm -Rf "$TMP_DATA"
+}
+trap cleanup EXIT
+
+if [ -n "$artifact_urls" ]; then
+ for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
+ curl -o $TMP_DATA/file_data "$artifact_urls"
+ if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
+ yum install -y $TMP_DATA/file_data
+ elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
+ pushd /
+ tar xvzf $TMP_DATA/file_data
+ popd
+ else
+ echo "ERROR: Unsupported file format."
+ exit 1
+ fi
+ rm $TMP_DATA/file_data
+ done
+else
+ echo "No artifact_urls was set. Skipping..."
+fi
diff --git a/puppet/deploy-artifacts.yaml b/puppet/deploy-artifacts.yaml
new file mode 100644
index 00000000..17f84163
--- /dev/null
+++ b/puppet/deploy-artifacts.yaml
@@ -0,0 +1,32 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to install deployment artifacts (tarball's and/or
+ distribution packages) via HTTP URLs. The contents of the URL's can
+ be tarballs or distribution packages (RPMs). If a tarball URL is supplied
+ it is extracted onto the target node during deployment. If a package is
+ deployed it is installed from the supplied URL. Note, you need the
+ heat-config-script element built into your images, due to the script group
+ below.
+
+parameters:
+ DeployArtifactURLs:
+ default: []
+ description: A list of HTTP URLs containing deployment artifacts.
+ Currently supports tarballs and RPM packages.
+ type: comma_delimited_list
+
+resources:
+ DeployArtifacts:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: artifact_urls
+ default: {list_join: [' ', {get_param: DeployArtifactURLs}]}
+ config: {get_file: ./deploy-artifacts.sh}
+
+outputs:
+ OS::stack_id:
+ description: The ID of the DeployArtifacts resource.
+ value: {get_resource: DeployArtifacts}
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index 655fd0f2..71445800 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -65,11 +65,18 @@ parameters:
default: false
NetworkNexusSwitchHeartbeatTime:
type: number
- description: Time interval to check the state of the Nexus device
+ description: >
+ Time interval to check the state of the Nexus device. The units of this
+ object are seconds. Setting this object to a value of 0 disables the
+ replay feature.
default: 0
NetworkNexusSwitchReplayCount:
type: number
- description: Number of times to attempt config replay
+ description: >
+ This configuration item is OBSOLETE. The Nexus driver replay behavior
+ is to continue to attempt to connect to the down Nexus device with a
+ period equal to the heartbeat time interval. This was previously the
+ Number of times to attempt config replay.
default: 3
NetworkNexusProviderVlanAutoCreate:
type: boolean
diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml
index 7cefc24b..5942088c 100644
--- a/puppet/extraconfig/ceph/ceph-external-config.yaml
+++ b/puppet/extraconfig/ceph/ceph-external-config.yaml
@@ -38,9 +38,15 @@ parameters:
GlanceRbdPoolName:
default: images
type: string
+ GnocchiRbdPoolName:
+ default: metrics
+ type: string
CephClientUserName:
default: openstack
type: string
+ CephIPv6:
+ default: False
+ type: boolean
resources:
CephClusterConfigImpl:
@@ -54,7 +60,9 @@ resources:
mapped_data:
ceph_storage_count: {get_param: ceph_storage_count}
enable_external_ceph: true
- ceph::profile::params::mon_host: {get_param: ceph_external_mon_ips}
+ ceph_ipv6: {get_param: CephIPv6}
+ ceph_mon_host: {get_param: ceph_external_mon_ips}
+ ceph_mon_host_v6: {get_param: ceph_external_mon_ips}
ceph::profile::params::fsid: {get_param: ceph_fsid}
ceph::profile::params::client_keys:
str_replace:
@@ -63,7 +71,7 @@ resources:
secret: 'CLIENT_KEY',
mode: '0644',
cap_mon: 'allow r',
- cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL'
+ cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
}
}"
params:
@@ -72,21 +80,31 @@ resources:
NOVA_POOL: {get_param: NovaRbdPoolName}
CINDER_POOL: {get_param: CinderRbdPoolName}
GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+ gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName}
+ gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName}
nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
- glance::backend::rbd::rbd_store_pool: {get_param: CephClientUserName}
+ glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
nova::compute::rbd::rbd_keyring:
list_join:
- '.'
- - 'client'
- {get_param: CephClientUserName}
+ gnocchi::storage::ceph::ceph_keyring:
+ list_join:
+ - '.'
+ - - 'client'
+ - {get_param: CephClientUserName}
ceph_client_user_name: {get_param: CephClientUserName}
ceph_pools:
- {get_param: CinderRbdPoolName}
- {get_param: NovaRbdPoolName}
- {get_param: GlanceRbdPoolName}
+ - {get_param: GnocchiRbdPoolName}
outputs:
config_id:
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
new file mode 100644
index 00000000..49c77190
--- /dev/null
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: Configure hieradata for Big Switch agents on compute node
+
+parameters:
+ server:
+ description: ID of the controller node to apply this config to
+ type: string
+ NeutronBigswitchAgentEnabled:
+ description: The state of the neutron-bsn-agent service.
+ type: boolean
+ default: false
+ NeutronBigswitchLLDPEnabled:
+ description: The state of the neutron-bsn-lldp service.
+ type: boolean
+ default: true
+
+
+resources:
+ NeutronBigswitchConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ datafiles:
+ neutron_bigswitch_data:
+ mapped_data:
+ neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+ neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+
+ NeutronBigswitchDeployment:
+ type: OS::Heat::StructuredDeployment
+ properties:
+ name: NeutronBigswitchDeployment
+ config: {get_resource: NeutronBigswitchConfig}
+ server: {get_param: server}
+ input_values:
+ neutron_enable_bigswitch_agent: {get_param: NeutronBigswitchAgentEnabled}
+ neutron_enable_bigswitch_lldp: {get_param: NeutronBigswitchLLDPEnabled}
+
+outputs:
+ deploy_stdout:
+ description: Deployment reference, used to trigger puppet apply on changes
+ value: {get_attr: [NeutronBigswitchDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
new file mode 100644
index 00000000..e496553a
--- /dev/null
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
@@ -0,0 +1,47 @@
+heat_template_version: 2015-04-30
+
+description: Compute node hieradata for Neutron OpenContrail configuration
+
+parameters:
+ server:
+ description: ID of the compute node to apply this config to
+ type: string
+ ContrailApiServerIp:
+ description: IP address of the OpenContrail API server
+ type: string
+ ContrailApiServerPort:
+ description: Port of the OpenContrail API
+ type: string
+ default: 8082
+
+resources:
+ ComputeContrailConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ datafiles:
+ neutron_opencontrail_data:
+ mapped_data:
+ nova::network::neutron::network_api_class: nova.network.neutronv2.api.API
+
+ contrail::vrouter::provision_vrouter::api_address: {get_input: contrail_api_server_ip}
+ contrail::vrouter::provision_vrouter::api_port: {get_input: contrail_api_server_port}
+ contrail::vrouter::provision_vrouter::keystone_admin_user: admin
+ contrail::vrouter::provision_vrouter::keystone_admin_tenant_name: admin
+ contrail::vrouter::provision_vrouter::keystone_admin_password: '"%{::admin_password}"'
+
+ ComputeContrailDeployment:
+ type: OS::Heat::StructuredDeployment
+ properties:
+ config: {get_resource: ComputeContrailConfig}
+ server: {get_param: server}
+ input_values:
+ contrail_api_server_ip: {get_param: ContrailApiServerIp}
+ contrail_api_server_port: {get_param: ContrailApiServerPort}
+
+outputs:
+ deploy_stdout:
+ description: Output of the extra hiera data deployment
+ value: {get_attr: [ComputeContrailDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
index 905f196d..9b6981bb 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-11-12
+heat_template_version: 2015-10-15
description: Configure hieradata for Cinder Dell Storage Center configuration
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
index c73608f1..36db334e 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-11-06
+heat_template_version: 2015-10-15
description: Configure hieradata for Cinder Eqlx configuration
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
index 1e652960..467f57cc 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
@@ -44,7 +44,6 @@ resources:
datafiles:
neutron_bigswitch_data:
mapped_data:
- neutron_enable_bigswitch_ml2: true
neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml
new file mode 100644
index 00000000..5c686fe7
--- /dev/null
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml
@@ -0,0 +1,62 @@
+heat_template_version: 2015-04-30
+
+description: Controller hieradata for Neutron OpenContrail configuration
+
+parameters:
+ server:
+ description: ID of the controller node to apply this config to
+ type: string
+ ContrailApiServerIp:
+ description: IP address of the OpenContrail API server
+ type: string
+ ContrailApiServerPort:
+ description: Port of the OpenContrail API
+ type: string
+ default: 8082
+ ContrailMultiTenancy:
+ description: Whether to enable multi tenancy
+ type: boolean
+ default: false
+ ContrailExtensions:
+ description: List of OpenContrail extensions to be enabled
+ type: comma_delimited_list
+ default: ''
+
+resources:
+ ControllerContrailConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ datafiles:
+ neutron_opencontrail_data:
+ mapped_data:
+ neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions
+
+ neutron::plugins::opencontrail::api_server_ip: {get_input: contrail_api_server_ip}
+ neutron::plugins::opencontrail::api_server_port: {get_input: contrail_api_server_port}
+ neutron::plugins::opencontrail::multi_tenancy: {get_input: contrail_multi_tenancy}
+ neutron::plugins::opencontrail::contrail_extensions: {get_input: contrail_extensions}
+ neutron::plugins::opencontrail::keystone_auth_url: '"%{hiera(''keystone_auth_uri'')}"'
+ neutron::plugins::opencontrail::keystone_admin_user: admin
+ neutron::plugins::opencontrail::keystone_admin_tenant_name: admin
+ neutron::plugins::opencontrail::keystone_admin_password: '"%{hiera(''admin_password'')}"'
+ neutron::plugins::opencontrail::keystone_admin_token: '"%{hiera(''keystone::admin_token'')}"'
+
+ ControllerContrailDeployment:
+ type: OS::Heat::StructuredDeployment
+ properties:
+ config: {get_resource: ControllerContrailConfig}
+ server: {get_param: server}
+ input_values:
+ contrail_api_server_ip: {get_param: ContrailApiServerIp}
+ contrail_api_server_port: {get_param: ContrailApiServerPort}
+ contrail_multi_tenancy: {get_param: ContrailMultiTenancy}
+ contrail_extensions: {get_param: ContrailExtensions}
+
+
+outputs:
+ deploy_stdout:
+ description: Output of the extra hiera data deployment
+ value: {get_attr: [ControllerContrailDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml
new file mode 100755
index 00000000..7c0a7ad2
--- /dev/null
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml
@@ -0,0 +1,113 @@
+heat_template_version: 2015-04-30
+
+description: Controller hieradata for Neutron PLUMgrid configuration
+
+parameters:
+ server:
+ description: ID of the controller node to apply this config to
+ type: string
+ PLUMgridDirectorServer:
+ description: IP address of the PLUMgrid Director Server
+ type: string
+ default: 127.0.0.1
+ PLUMgridDirectorServerPort:
+ description: Port of the PLUMgrid Director Server
+ type: string
+ default: 443
+ PLUMgridUsername:
+ description: Username for PLUMgrid platform
+ type: string
+ PLUMgridPassword:
+ description: Password for PLUMgrid platform
+ type: string
+ hidden: true
+ PLUMgridServerTimeOut:
+ description: Request timeout duration (seconds) to PLUMgrid platform
+ type: string
+ default: 99
+ PLUMgridNovaMetadataIP:
+ description: IP address of Nova Metadata
+ type: string
+ default: 169.254.169.254
+ PLUMgridNovaMetadataPort:
+ description: Port of Nova Metadata
+ type: string
+ default: 8775
+ PLUMgridL2GatewayVendor:
+ description: Vendor for L2 Gateway Switch
+ type: string
+ default: vendor
+ PLUMgridL2GatewayUsername:
+ description: Username for L2 Gateway Switch
+ type: string
+ default: username
+ PLUMgridL2GatewayPassword:
+ description: Password for L2 Gateway Switch
+ type: string
+ hidden: true
+ PLUMgridIdentityVersion:
+ description: Keystone Identity version
+ type: string
+ default: v2.0
+ PLUMgridConnectorType:
+ description: Neutron Network Connector Type
+ type: string
+ default: distributed
+ PLUMgridNeutronPluginVersion:
+ description: PLUMgrid Neutron Plugin version
+ type: string
+ default: present
+ PLUMgridPlumlibVersion:
+ description: PLUMgrid Plumlib version
+ type: string
+ default: present
+
+
+resources:
+ ControllerPLUMgridConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ datafiles:
+ neutron_plumgrid_data:
+ mapped_data:
+ neutron::plugins::plumgrid::director_server: {get_input: plumgrid_director_server}
+ neutron::plugins::plumgrid::director_server_port: {get_input: plumgrid_director_server_port}
+ neutron::plugins::plumgrid::username: {get_input: plumgrid_username}
+ neutron::plugins::plumgrid::password: {get_input: plumgrid_password}
+ neutron::plugins::plumgrid::nova_metadata_ip: {get_input: plumgrid_nova_metadata_ip}
+ neutron::plugins::plumgrid::nova_metadata_port: {get_input: plumgrid_nova_metadata_port}
+ neutron::plugins::plumgrid::l2gateway_vendor: {get_input: plumgrid_l2gateway_vendor}
+ neutron::plugins::plumgrid::l2gateway_sw_username: {get_input: plumgrid_l2gateway_sw_username}
+ neutron::plugins::plumgrid::l2gateway_sw_password: {get_input: plumgrid_l2gateway_sw_password}
+ neutron::plugins::plumgrid::connector_type: {get_input: plumgrid_connector_type}
+ neutron::plugins::plumgrid::identity_version: {get_input: plumgrid_identity_version}
+ neutron::plugins::plumgrid::package_ensure: {get_input: plumgrid_neutron_plugin_version}
+ neutron::plugins::plumgrid::plumlib_package_ensure: {get_input: plumgrid_plumlib_version}
+
+ ControllerPLUMgridDeployment:
+ type: OS::Heat::StructuredDeployment
+ properties:
+ config: {get_resource: ControllerPLUMgridConfig}
+ server: {get_param: server}
+ input_values:
+ plumgrid_director_server: {get_param: PLUMgridDirectorServer}
+ plumgrid_director_server_port: {get_param: PLUMgridDirectorServerPort}
+ plumgrid_username: {get_param: PLUMgridUsername}
+ plumgrid_password: {get_param: PLUMgridPassword}
+ plumgrid_nova_metadata_ip: {get_param: PLUMgridNovaMetadataIP}
+ plumgrid_nova_metadata_port: {get_param: PLUMgridNovaMetadataPort}
+ plumgrid_l2gateway_vendor: {get_param: PLUMgridL2GatewayVendor}
+ plumgrid_l2gateway_sw_username: {get_param: PLUMgridL2GatewayUsername}
+ plumgrid_l2gateway_sw_password: {get_param: PLUMgridL2GatewayPassword}
+ plumgrid_identity_version: {get_param: PLUMgridIdentityVersion}
+ plumgrid_connector_type: {get_param: PLUMgridConnectorType}
+ plumgrid_neutron_plugin_version: {get_param: PLUMgridNeutronPluginVersion}
+ plumgrid_plumlib_version: {get_param: PLUMgridPlumlibVersion}
+
+outputs:
+ deploy_stdout:
+ description: Deployment reference, used to trigger puppet apply on changes
+ value: {get_attr: [ControllerPLUMgridDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/tls/ca-inject.yaml b/puppet/extraconfig/tls/ca-inject.yaml
index 5a36e951..f955034d 100644
--- a/puppet/extraconfig/tls/ca-inject.yaml
+++ b/puppet/extraconfig/tls/ca-inject.yaml
@@ -4,7 +4,7 @@ description: >
This is a template which will inject the trusted anchor.
parameters:
- # Can be overriden via parameter_defaults in the environment
+ # Can be overridden via parameter_defaults in the environment
SSLRootCertificate:
description: >
The content of a CA's SSL certificate file in PEM format.
@@ -45,7 +45,7 @@ resources:
cat > ${cacert_path} << EOF
${cacert_content}
EOF
- chmod 0440 ${cacert_path}
+ chmod 0444 ${cacert_path}
chown root:root ${cacert_path}
${update_anchor_command}
md5sum ${cacert_path} > ${heat_outputs_path}.root_cert_md5sum
diff --git a/puppet/extraconfig/tls/tls-cert-inject.yaml b/puppet/extraconfig/tls/tls-cert-inject.yaml
index 20bb3737..e281ef51 100644
--- a/puppet/extraconfig/tls/tls-cert-inject.yaml
+++ b/puppet/extraconfig/tls/tls-cert-inject.yaml
@@ -5,7 +5,7 @@ description: >
for the load balancer using the given parameters.
parameters:
- # Can be overriden via parameter_defaults in the environment
+ # Can be overridden via parameter_defaults in the environment
SSLCertificate:
description: >
The content of the SSL certificate (without Key) in PEM format.
@@ -21,7 +21,7 @@ parameters:
type: string
hidden: true
- # Can be overriden by parameter_defaults if the user wants to try deploying
+ # Can be overridden by parameter_defaults if the user wants to try deploying
# this in a distro that doesn't support this path.
DeployedSSLCertificatePath:
default: '/etc/pki/tls/private/overcloud_endpoint.pem'
@@ -63,6 +63,14 @@ resources:
openssl rsa -noout -modulus -in ${cert_path} \
| openssl md5 | cut -c 10- \
> ${heat_outputs_path}.key_modulus
+ # We need to reload haproxy in case the certificate changed because
+ # puppet doesn't know the contents of the cert file. The pacemaker
+ # case is handled separately in a pacemaker-specific resource.
+ pacemaker_status=$(systemctl is-active pacemaker)
+ haproxy_status=$(systemctl is-active haproxy)
+ if [ "$pacemaker_status" != "active" -a "$haproxy_status" = "active"]; then
+ systemctl reload haproxy
+ fi
ControllerTLSDeployment:
type: OS::Heat::SoftwareDeployment
diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml
index b4b51abf..34965959 100644
--- a/puppet/hieradata/common.yaml
+++ b/puppet/hieradata/common.yaml
@@ -3,17 +3,24 @@ ssh::server::storeconfigs_enabled: false
# ceilometer settings used by compute and controller ceilo auth settings
ceilometer::agent::auth::auth_region: 'regionOne'
-# FIXME: Might be better to use 'service' tenant here but this requires
-# changes in the tripleo-incubator keystone role setup
-ceilometer::agent::auth::auth_tenant_name: 'admin'
+ceilometer::agent::auth::auth_tenant_name: 'service'
+
+aodh::auth::auth_region: 'regionOne'
+aodh::auth::auth_tenant_name: 'service'
+
+gnocchi::auth::auth_region: 'regionOne'
+gnocchi::auth::auth_tenant_name: 'service'
nova::api::admin_tenant_name: 'service'
-nova::network::neutron::neutron_admin_tenant_name: 'service'
-nova::network::neutron::neutron_admin_username: 'neutron'
+nova::network::neutron::neutron_project_name: 'service'
+nova::network::neutron::neutron_username: 'neutron'
nova::network::neutron::dhcp_domain: ''
neutron::allow_overlapping_ips: true
+kernel_modules:
+ nf_conntrack: {}
+
sysctl_settings:
net.ipv4.tcp_keepalive_intvl:
value: 1
@@ -21,6 +28,17 @@ sysctl_settings:
value: 5
net.ipv4.tcp_keepalive_time:
value: 5
+ net.nf_conntrack_max:
+ value: 500000
+ net.netfilter.nf_conntrack_max:
+ value: 500000
+ # prevent neutron bridges from autoconfiguring ipv6 addresses
+ net.ipv6.conf.default.accept_ra:
+ value: 0
+ net.ipv6.conf.default.autoconf:
+ value: 0
+ net.core.netdev_max_backlog:
+ value: 10000
nova::rabbit_heartbeat_timeout_threshold: 60
neutron::rabbit_heartbeat_timeout_threshold: 60
diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml
index fa8dcc81..865210c9 100644
--- a/puppet/hieradata/compute.yaml
+++ b/puppet/hieradata/compute.yaml
@@ -1,13 +1,12 @@
# Hiera data here applies to all compute nodes
nova::notify_on_state_change: 'vm_and_task_state'
-
+nova::notification_driver: messagingv2
nova::compute::enabled: true
nova::compute::instance_usage_audit: true
nova::compute::instance_usage_audit_period: 'hour'
nova::compute::vnc_enabled: true
-nova::compute::libvirt::vncserver_listen: '0.0.0.0'
nova::compute::libvirt::migration_support: true
nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index f8ef6408..79db9418 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -1,10 +1,21 @@
# Hiera data here applies to all controller nodes
+
nova::api::enabled: true
nova::conductor::enabled: true
nova::consoleauth::enabled: true
nova::vncproxy::enabled: true
nova::scheduler::enabled: true
+# gnocchi
+gnocchi::db::sync::extra_opts: '--skip-storage'
+gnocchi::storage::swift::swift_user: 'service:gnocchi'
+gnocchi::storage::swift::swift_auth_version: 2
+gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26'
+gnocchi::statsd::user_id: '27c0d3f8-e7ee-42f0-8317-72237d1c5ae3'
+gnocchi::statsd::project_id: '6c38cd8d-099a-4cb2-aecf-17be688e8616'
+gnocchi::statsd::flush_delay: 10
+gnocchi::statsd::archive_policy_name: 'low'
+
# rabbitmq
rabbitmq::delete_guest_user: false
rabbitmq::wipe_db_on_cookie_change: true
@@ -29,14 +40,21 @@ redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
+# keystone
+keystone::roles::admin::email: 'root@localhost'
+
# service tenant
glance::api::keystone_tenant: 'service'
+aodh::api::keystone_tenant: 'service'
glance::registry::keystone_tenant: 'service'
neutron::server::auth_tenant: 'service'
neutron::agents::metadata::auth_tenant: 'service'
+neutron::agents::l3::router_delete_namespaces: True
+neutron::agents::dhcp::dhcp_delete_namespaces: True
cinder::api::keystone_tenant: 'service'
swift::proxy::authtoken::admin_tenant_name: 'service'
ceilometer::api::keystone_tenant: 'service'
+gnocchi::api::keystone_tenant: 'service'
heat::keystone_tenant: 'service'
sahara::admin_tenant_name: 'service'
@@ -50,6 +68,8 @@ keystone::config::keystone_config:
value: 'HTTP_X_FORWARDED_PROTO'
ec2/driver:
value: 'keystone.contrib.ec2.backends.sql.Ec2'
+keystone::service_name: 'httpd'
+keystone::wsgi::apache::ssl: false
#swift
swift::proxy::pipeline:
@@ -72,7 +92,6 @@ glance::api::pipeline: 'keystone'
glance::api::show_image_direct_url: true
glance::registry::pipeline: 'keystone'
glance::backend::swift::swift_store_create_container_on_put: true
-glance::backend::rbd::rbd_store_user: 'openstack'
glance_file_pcmk_directory: '/var/lib/glance/images'
# neutron
@@ -82,10 +101,11 @@ neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf
# nova
nova::notify_on_state_change: 'vm_and_task_state'
nova::api::default_floating_pool: 'public'
-nova::api::osapi_v3: true
+nova::api::sync_db_api: true
nova::scheduler::filter::ram_allocation_ratio: '1.0'
nova::cron::archive_deleted_rows::hour: '*/12'
nova::cron::archive_deleted_rows::destination: '/dev/null'
+nova::notification_driver: messaging
# ceilometer
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
@@ -93,17 +113,27 @@ ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
# cinder
cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
cinder::cron::db_purge::destination: '/dev/null'
+cinder::host: hostgroup
+cinder_user_enabled_backends: []
# heat
heat::engine::configure_delegated_roles: false
heat::engine::trusts_delegated_roles: []
heat::instance_user: ''
+heat::cron::purge_deleted::age: 30
+heat::cron::purge_deleted::age_type: 'days'
+heat::cron::purge_deleted::maxdelay: 3600
+heat::cron::purge_deleted::destination: '/dev/null'
+heat::keystone::domain::domain_name: 'heat_stack'
+heat::keystone::domain::domain_admin: 'heat_stack_domain_admin'
+heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost'
# pacemaker
pacemaker::corosync::cluster_name: 'tripleo_cluster'
pacemaker::corosync::manage_fw: false
pacemaker::resource_defaults::defaults:
resource-stickiness: { value: INFINITY }
+corosync_token_timeout: 10000
# horizon
horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
@@ -131,6 +161,8 @@ tripleo::loadbalancer::redis: true
tripleo::loadbalancer::sahara: true
tripleo::loadbalancer::swift_proxy_server: true
tripleo::loadbalancer::ceilometer: true
+tripleo::loadbalancer::aodh: true
+tripleo::loadbalancer::gnocchi: true
tripleo::loadbalancer::heat_api: true
tripleo::loadbalancer::heat_cloudwatch: true
tripleo::loadbalancer::heat_cfn: true
@@ -243,3 +275,11 @@ tripleo::firewall::firewall_rules:
'127 snmp':
port: 161
proto: 'udp'
+ '128 aodh':
+ port:
+ - 8042
+ - 13042
+ '129 gnocchi-api':
+ port:
+ - 8041
+ - 13041
diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml
index 89577505..4eb199c8 100644
--- a/puppet/hieradata/database.yaml
+++ b/puppet/hieradata/database.yaml
@@ -6,6 +6,13 @@ nova::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+nova::db::mysql_api::user: nova_api
+nova::db::mysql_api::host: "%{hiera('mysql_virtual_ip')}"
+nova::db::mysql_api::dbname: nova_api
+nova::db::mysql_api::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+
# Glance
glance::db::mysql::user: glance
glance::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
@@ -54,6 +61,14 @@ ceilometer::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+# Gnocchi
+gnocchi::db::mysql::user: gnocchi
+gnocchi::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
+gnocchi::db::mysql::dbname: gnocchi
+gnocchi::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+
sahara::db::mysql::user: sahara
sahara::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
sahara::db::mysql::dbname: sahara
diff --git a/puppet/hieradata/volume.yaml b/puppet/hieradata/volume.yaml
index f4cd78a9..8640c0a7 100644
--- a/puppet/hieradata/volume.yaml
+++ b/puppet/hieradata/volume.yaml
@@ -9,4 +9,6 @@ cinder::config::cinder_config:
DEFAULT/swift_catalog_info:
value: 'object-store:swift:internalURL'
+cinder_user_enabled_backends: []
+
volume_classes: [] \ No newline at end of file
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index 1d801adc..fd7faff1 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -16,7 +16,9 @@
include ::tripleo::packages
include ::tripleo::firewall
+create_resources(kmod::load, hiera('kernel_modules'), {})
create_resources(sysctl::value, hiera('sysctl_settings'), {})
+Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
if count(hiera('ntp::servers')) > 0 {
include ::ntp
@@ -38,6 +40,14 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
} -> Class['ceph::profile::osd']
}
+if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+} else {
+ $mon_host = hiera('ceph_mon_host')
+}
+class { '::ceph::profile::params':
+ mon_host => $mon_host,
+}
include ::ceph::conf
include ::ceph::profile::client
include ::ceph::profile::osd
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index 593cc500..cc58cb14 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -16,7 +16,9 @@
include ::tripleo::packages
include ::tripleo::firewall
+create_resources(kmod::load, hiera('kernel_modules'), {})
create_resources(sysctl::value, hiera('sysctl_settings'), {})
+Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
if count(hiera('ntp::servers')) > 0 {
include ::ntp
@@ -37,18 +39,31 @@ exec { 'libvirt-default-net-destroy':
before => Service['libvirt'],
}
+# When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique
+exec { 'reset-iscsi-initiator-name':
+ command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi',
+ onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset',
+}->
+
+file { '/etc/iscsi/.initiator_reset':
+ ensure => present,
+}
+
include ::nova
include ::nova::config
include ::nova::compute
-nova_config {
- 'DEFAULT/my_ip': value => $ipaddress;
- 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
-}
-
$rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false)
$rbd_persistent_storage = hiera('rbd_persistent_storage', false)
if $rbd_ephemeral_storage or $rbd_persistent_storage {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
@@ -70,7 +85,27 @@ if hiera('cinder_enable_nfs_backend', false) {
package {'nfs-utils': } -> Service['nova-compute']
}
-include ::nova::compute::libvirt
+if str2bool(hiera('nova::use_ipv6', false)) {
+ $vncserver_listen = '::0'
+} else {
+ $vncserver_listen = '0.0.0.0'
+}
+class { '::nova::compute::libvirt' :
+ vncserver_listen => $vncserver_listen,
+}
+
+nova_config {
+ 'DEFAULT/my_ip': value => $ipaddress;
+ 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
+ 'DEFAULT/host': value => $fqdn;
+ # TUNNELLED mode provides a security enhancement when using shared storage but is not
+ # supported when not using shared storage.
+ # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12
+ # In future versions of QEMU (2.6, mostly), Dan's native encryption
+ # work will obsolete the need to use TUNNELLED transport mode.
+ 'libvirt/live_migration_tunnelled': value => $rbd_ephemeral_storage;
+}
+
if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
file {'/etc/libvirt/qemu.conf':
ensure => present,
@@ -108,8 +143,41 @@ elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV
cassandra_seeds => $cassandra_node_ips
}
}
+elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
+
+ include ::contrail::vrouter
+ # NOTE: it's not possible to use this class without a functional
+ # contrail controller up and running
+ #class {'::contrail::vrouter::provision_vrouter':
+ # require => Class['contrail::vrouter'],
+ #}
+}
+elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
+ # forward all ipv4 traffic
+ # this is required for the vms to pass through the gateways public interface
+ sysctl::value { 'net.ipv4.ip_forward': value => '1' }
+
+ # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on
+ file { '/etc/sudoers.d/ifc_ctl_sudoers':
+ ensure => file,
+ owner => root,
+ group => root,
+ mode => '0440',
+ content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n",
+ }
+}
else {
+ # NOTE: this code won't live in puppet-neutron until Neutron OVS agent
+ # can be gracefully restarted. See https://review.openstack.org/#/c/297211
+ # In the meantime, it's safe to restart the agent on each change in neutron.conf,
+ # because Puppet changes are supposed to be done during bootstrap and upgrades.
+ # Some resource managed by Neutron_config (like messaging and logging options) require
+ # a restart of OVS agent. This code does it.
+ # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code
+ # from here and fix it in puppet-neutron.
+ Neutron_config<||> ~> Service['neutron-ovs-agent-service']
+
include ::neutron::plugins::ml2
include ::neutron::agents::ml2::ovs
@@ -119,8 +187,15 @@ else {
n1kv_version => hiera('n1kv_vem_version', undef),
}
}
+
+ if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+ include ::neutron::agents::bigswitch
+ }
}
+neutron_config {
+ 'DEFAULT/host': value => $fqdn;
+}
include ::ceilometer
include ::ceilometer::config
@@ -134,7 +209,7 @@ snmp::snmpv3_user { $snmpd_user:
}
class { '::snmp':
agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
+ snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
hiera_include('compute_classes')
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 70aebddd..d7bb025a 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -20,7 +20,9 @@ $enable_load_balancer = hiera('enable_load_balancer', true)
if hiera('step') >= 1 {
+ create_resources(kmod::load, hiera('kernel_modules'), {})
create_resources(sysctl::value, hiera('sysctl_settings'), {})
+ Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
$controller_node_ips = split(hiera('controller_node_ips'), ',')
@@ -44,16 +46,26 @@ if hiera('step') >= 2 {
# MongoDB
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
include ::mongodb::globals
-
+ include ::mongodb::client
include ::mongodb::server
- $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and
+ # without the brackets as 'members' argument for the 'mongodb_replset'
+ # resource.
+ if str2bool(hiera('mongodb::server::ipv6', false)) {
+ $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ } else {
+ $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ }
$mongo_node_string = join($mongo_node_ips_with_port, ',')
$mongodb_replset = hiera('mongodb::server::replset')
$ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
if downcase(hiera('bootstrap_nodeid')) == $::hostname {
mongodb_replset { $mongodb_replset :
- members => $mongo_node_ips_with_port,
+ members => $mongo_node_ips_with_port_nobr,
}
}
}
@@ -83,11 +95,15 @@ if hiera('step') >= 2 {
$mysql_config_file = '/etc/my.cnf.d/server.cnf'
}
# TODO Galara
+ # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
+ # set bind-address to a hostname instead of an ip address; to move Mysql
+ # from internal_api on another network we'll have to customize both
+ # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
class { '::mysql::server':
config_file => $mysql_config_file,
override_options => {
'mysqld' => {
- 'bind-address' => hiera('mysql_bind_host'),
+ 'bind-address' => $::hostname,
'max_connections' => hiera('mysql_max_connections'),
'open_files_limit' => '-1',
},
@@ -100,23 +116,38 @@ if hiera('step') >= 2 {
include ::keystone::db::mysql
include ::glance::db::mysql
include ::nova::db::mysql
+ include ::nova::db::mysql_api
include ::neutron::db::mysql
include ::cinder::db::mysql
include ::heat::db::mysql
include ::sahara::db::mysql
+ if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
+ include ::gnocchi::db::mysql
+ }
if downcase(hiera('ceilometer_backend')) == 'mysql' {
include ::ceilometer::db::mysql
+ include ::aodh::db::mysql
}
$rabbit_nodes = hiera('rabbit_node_ips')
if count($rabbit_nodes) > 1 {
+
+ $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
+ if $rabbit_ipv6 {
+ $rabbit_env = merge(hiera('rabbitmq_environment'), {
+ 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
+ })
+ } else {
+ $rabbit_env = hiera('rabbitmq_environment')
+ }
+
class { '::rabbitmq':
config_cluster => true,
cluster_nodes => $rabbit_nodes,
tcp_keepalive => false,
config_kernel_variables => hiera('rabbitmq_kernel_variables'),
config_variables => hiera('rabbitmq_config_variables'),
- environment_variables => hiera('rabbitmq_environment'),
+ environment_variables => $rabbit_env,
}
rabbitmq_policy { 'ha-all@/':
pattern => '^(?!amq\.).*',
@@ -134,8 +165,15 @@ if hiera('step') >= 2 {
$enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
+ $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
class { '::ceph::profile::params':
- mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
+ mon_initial_members => $mon_initial_members,
+ mon_host => $mon_host,
}
include ::ceph::conf
include ::ceph::profile::mon
@@ -161,48 +199,21 @@ if hiera('step') >= 2 {
}
if str2bool(hiera('enable_external_ceph', false)) {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
}
} #END STEP 2
-if hiera('step') >= 3 {
-
- include ::keystone
- include ::keystone::config
- include ::keystone::roles::admin
- include ::keystone::endpoint
-
- #TODO: need a cleanup-keystone-tokens.sh solution here
-
- file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
- ensure => 'directory',
- owner => 'keystone',
- group => 'keystone',
- require => Package['keystone'],
- }
- file { '/etc/keystone/ssl/certs/signing_cert.pem':
- content => hiera('keystone_signing_certificate'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/certs'],
- }
- file { '/etc/keystone/ssl/private/signing_key.pem':
- content => hiera('keystone_signing_key'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/private'],
- }
- file { '/etc/keystone/ssl/certs/ca.pem':
- content => hiera('keystone_ca_certificate'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/certs'],
- }
+if hiera('step') >= 4 {
$glance_backend = downcase(hiera('glance_backend', 'swift'))
case $glance_backend {
@@ -214,17 +225,25 @@ if hiera('step') >= 3 {
$http_store = ['glance.store.http.Store']
$glance_store = concat($http_store, $backend_store)
- # TODO: notifications, scrubber, etc.
+ # TODO: scrubber and other additional optional features
include ::glance
include ::glance::config
class { '::glance::api':
known_stores => $glance_store,
}
include ::glance::registry
+ include ::glance::notify::rabbitmq
include join(['::glance::backend::', $glance_backend])
+ $nova_ipv6 = hiera('nova::use_ipv6', false)
+ if $nova_ipv6 {
+ $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
+ } else {
+ $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
+ }
+
class { '::nova' :
- memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'),
+ memcached_servers => $memcached_servers
}
include ::nova::config
include ::nova::api
@@ -246,7 +265,8 @@ if hiera('step') >= 3 {
if hiera('enable_zookeeper_on_controller') {
class {'::tripleo::cluster::zookeeper':
zookeeper_server_ips => $zookeeper_node_ips,
- zookeeper_client_ip => $ipaddress,
+ # TODO: create a 'bind' hiera key for zookeeper
+ zookeeper_client_ip => hiera('neutron::bind_host'),
zookeeper_hostnames => hiera('controller_node_names')
}
}
@@ -255,7 +275,8 @@ if hiera('step') >= 3 {
if hiera('enable_cassandra_on_controller') {
class {'::tripleo::cluster::cassandra':
cassandra_servers => $cassandra_node_ips,
- cassandra_ip => $ipaddress
+ # TODO: create a 'bind' hiera key for cassandra
+ cassandra_ip => hiera('neutron::bind_host'),
}
}
@@ -266,10 +287,11 @@ if hiera('step') >= 3 {
class {'::tripleo::network::midonet::api':
zookeeper_servers => $zookeeper_node_ips,
- vip => $ipaddress,
- keystone_ip => $ipaddress,
+ vip => hiera('tripleo::loadbalancer::public_virtual_ip'),
+ keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'),
keystone_admin_token => hiera('keystone::admin_token'),
- bind_address => $ipaddress,
+ # TODO: create a 'bind' hiera key for api
+ bind_address => hiera('neutron::bind_host'),
admin_password => hiera('admin_password')
}
@@ -289,11 +311,21 @@ if hiera('step') >= 3 {
include ::neutron::server
include ::neutron::server::notifications
- # If the value of core plugin is set to 'nuage',
- # include nuage core plugin, and it does not
- # need the l3, dhcp and metadata agents
+ # If the value of core plugin is set to 'nuage' or'opencontrail' or 'plumgrid',
+ # include nuage or opencontrail or plumgrid core plugins
+ # else use the default value of 'ml2'
if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
include ::neutron::plugins::nuage
+ } elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
+ include ::neutron::plugins::opencontrail
+ }
+ elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
+ class { '::neutron::plugins::plumgrid' :
+ connection => hiera('neutron::server::database_connection'),
+ controller_priv_host => hiera('keystone_admin_api_vip'),
+ admin_password => hiera('admin_password'),
+ metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'),
+ }
} else {
include ::neutron::agents::l3
include ::neutron::agents::dhcp
@@ -312,7 +344,7 @@ if hiera('step') >= 3 {
if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
class {'::neutron::plugins::midonet':
- midonet_api_ip => $ipaddress,
+ midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'),
keystone_tenant => hiera('neutron::server::auth_tenant'),
keystone_password => hiera('neutron::server::auth_password')
}
@@ -344,8 +376,9 @@ if hiera('step') >= 3 {
include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
}
- if hiera('neutron_enable_bigswitch_ml2', false) {
+ if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
include ::neutron::plugins::ml2::bigswitch::restproxy
+ include ::neutron::agents::bigswitch
}
neutron_l3_agent_config {
'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
@@ -363,10 +396,12 @@ if hiera('step') >= 3 {
include ::cinder
include ::cinder::config
+ include ::tripleo::ssl::cinder_config
include ::cinder::api
include ::cinder::glance
include ::cinder::scheduler
include ::cinder::volume
+ include ::cinder::ceilometer
class { '::cinder::setup_test_volume':
size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
}
@@ -400,6 +435,7 @@ if hiera('step') >= 3 {
$cinder_rbd_backend = 'tripleo_ceph'
cinder::backend::rbd { $cinder_rbd_backend :
+ backend_host => hiera('cinder::host'),
rbd_pool => hiera('cinder_rbd_pool_name'),
rbd_user => hiera('ceph_client_user_name'),
rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
@@ -410,10 +446,6 @@ if hiera('step') >= 3 {
if hiera('cinder_enable_eqlx_backend', false) {
$cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
- cinder_config {
- "${cinder_eqlx_backend}/host": value => 'hostgroup';
- }
-
cinder::backend::eqlx { $cinder_eqlx_backend :
volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
san_ip => hiera('cinder::backend::eqlx::san_ip', undef),
@@ -421,7 +453,7 @@ if hiera('step') >= 3 {
san_password => hiera('cinder::backend::eqlx::san_password', undef),
san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef),
eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
- eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef),
+ eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef),
eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
@@ -431,10 +463,6 @@ if hiera('step') >= 3 {
if hiera('cinder_enable_dellsc_backend', false) {
$cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
- cinder_config {
- "${cinder_dellsc_backend}/host": value => 'hostgroup';
- }
-
cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
@@ -443,7 +471,7 @@ if hiera('step') >= 3 {
dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
- dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef),
+ dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef),
dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
}
@@ -452,10 +480,6 @@ if hiera('step') >= 3 {
if hiera('cinder_enable_netapp_backend', false) {
$cinder_netapp_backend = hiera('cinder::backend::netapp::title')
- cinder_config {
- "${cinder_netapp_backend}/host": value => 'hostgroup';
- }
-
if hiera('cinder::backend::netapp::nfs_shares', undef) {
$cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
}
@@ -504,7 +528,7 @@ if hiera('step') >= 3 {
$cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
class { '::cinder::backends' :
- enabled_backends => $cinder_enabled_backends,
+ enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
}
# swift proxy
@@ -557,14 +581,32 @@ if hiera('step') >= 3 {
include ::ceilometer::expirer
include ::ceilometer::collector
include ::ceilometer::agent::auth
+ include ::ceilometer::dispatcher::gnocchi
class { '::ceilometer::db' :
database_connection => $ceilometer_database_connection,
}
Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
+ # Aodh
+ class { '::aodh' :
+ database_connection => $ceilometer_database_connection,
+ }
+ include ::aodh::db::sync
+ # To manage the upgrade:
+ Exec['ceilometer-dbsync'] -> Exec['aodh-db-sync']
+ include ::aodh::auth
+ include ::aodh::api
+ include ::aodh::wsgi::apache
+ include ::aodh::evaluator
+ include ::aodh::notifier
+ include ::aodh::listener
+ include ::aodh::client
+
# Heat
- include ::heat
+ class { '::heat' :
+ notification_driver => 'messaging',
+ }
include ::heat::config
include ::heat::api
include ::heat::api_cfn
@@ -584,11 +626,38 @@ if hiera('step') >= 3 {
}
$neutron_options = {'profile_support' => $_profile_support }
+ $memcached_ipv6 = hiera('memcached_ipv6', false)
+ if $memcached_ipv6 {
+ $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
+ } else {
+ $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
+ }
+
class { '::horizon':
- cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
+ cache_server_ip => $horizon_memcached_servers,
neutron_options => $neutron_options,
}
+ # Gnocchi
+ $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
+ class { '::gnocchi':
+ database_connection => $gnocchi_database_connection,
+ }
+ include ::gnocchi::api
+ include ::gnocchi::wsgi::apache
+ include ::gnocchi::client
+ include ::gnocchi::db::sync
+ include ::gnocchi::storage
+ include ::gnocchi::metricd
+ include ::gnocchi::statsd
+ $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
+ case $gnocchi_backend {
+ 'swift': { include ::gnocchi::storage::swift }
+ 'file': { include ::gnocchi::storage::file }
+ 'rbd': { include ::gnocchi::storage::ceph }
+ default: { fail('Unrecognized gnocchi_backend parameter.') }
+ }
+
$snmpd_user = hiera('snmpd_readonly_user_name')
snmp::snmpv3_user { $snmpd_user:
authtype => 'MD5',
@@ -596,28 +665,44 @@ if hiera('step') >= 3 {
}
class { '::snmp':
agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
+ snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
hiera_include('controller_classes')
-} #END STEP 3
+} #END STEP 4
-if hiera('step') >= 4 {
- $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
+if hiera('step') >= 5 {
$nova_enable_db_purge = hiera('nova_enable_db_purge', true)
$cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
+ $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
- if $keystone_enable_db_purge {
- include ::keystone::cron::token_flush
- }
if $nova_enable_db_purge {
include ::nova::cron::archive_deleted_rows
}
if $cinder_enable_db_purge {
include ::cinder::cron::db_purge
}
-} #END STEP 4
+ if $heat_enable_db_purge {
+ include ::heat::cron::purge_deleted
+ }
+
+ if downcase(hiera('bootstrap_nodeid')) == $::hostname {
+ # Class ::heat::keystone::domain has to run on bootstrap node
+ # because it creates DB entities via API calls.
+ include ::heat::keystone::domain
+
+ Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain']
+ } else {
+ # On non-bootstrap node we don't need to create Keystone resources again
+ class { '::heat::keystone::domain':
+ manage_domain => false,
+ manage_user => false,
+ manage_role => false,
+ }
+ }
+
+} #END STEP 5
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')])
package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index a9adbd5d..3a6dbc06 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -18,6 +18,25 @@ Pcmk_resource <| |> {
try_sleep => 3,
}
+# TODO(jistr): use pcs resource provider instead of just no-ops
+Service <|
+ tag == 'aodh-service' or
+ tag == 'cinder-service' or
+ tag == 'ceilometer-service' or
+ tag == 'glance-service' or
+ tag == 'gnocchi-service' or
+ tag == 'heat-service' or
+ tag == 'keystone-service' or
+ tag == 'neutron-service' or
+ tag == 'nova-service' or
+ tag == 'sahara-service'
+|> {
+ hasrestart => true,
+ restart => '/bin/true',
+ start => '/bin/true',
+ stop => '/bin/true',
+}
+
include ::tripleo::packages
include ::tripleo::firewall
@@ -29,17 +48,19 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) {
$sync_db = false
}
-$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
+$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 6
$enable_load_balancer = hiera('enable_load_balancer', true)
# When to start and enable services which haven't been Pacemakerized
# FIXME: remove when we start all OpenStack services using Pacemaker
-# (occurences of this variable will be gradually replaced with false)
-$non_pcmk_start = hiera('step') >= 4
+# (occurrences of this variable will be gradually replaced with false)
+$non_pcmk_start = hiera('step') >= 5
if hiera('step') >= 1 {
+ create_resources(kmod::load, hiera('kernel_modules'), {})
create_resources(sysctl::value, hiera('sysctl_settings'), {})
+ Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
include ::timezone
@@ -60,15 +81,19 @@ if hiera('step') >= 1 {
}
$pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
- user { 'hacluster':
- ensure => present,
- } ->
+ $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
+ if $corosync_ipv6 {
+ $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' }
+ } else {
+ $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) }
+ }
class { '::pacemaker':
hacluster_pwd => hiera('hacluster_pwd'),
} ->
class { '::pacemaker::corosync':
- cluster_members => $pacemaker_cluster_members,
- setup_cluster => $pacemaker_master,
+ cluster_members => $pacemaker_cluster_members,
+ setup_cluster => $pacemaker_master,
+ cluster_setup_extras => $cluster_setup_extras,
}
class { '::pacemaker::stonith':
disable => !$enable_fencing,
@@ -91,12 +116,21 @@ if hiera('step') >= 1 {
# avoid races where non-master nodes attempt to start without
# config (eg. binding on 0.0.0.0)
# The module ignores erlang_cookie if cluster_config is false
+ $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
+ if $rabbit_ipv6 {
+ $rabbit_env = merge(hiera('rabbitmq_environment'), {
+ 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
+ })
+ } else {
+ $rabbit_env = hiera('rabbitmq_environment')
+ }
+
class { '::rabbitmq':
service_manage => false,
tcp_keepalive => false,
config_kernel_variables => hiera('rabbitmq_kernel_variables'),
config_variables => hiera('rabbitmq_config_variables'),
- environment_variables => hiera('rabbitmq_environment'),
+ environment_variables => $rabbit_env,
} ->
file { '/var/lib/rabbitmq/.erlang.cookie':
ensure => file,
@@ -109,6 +143,7 @@ if hiera('step') >= 1 {
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
include ::mongodb::globals
+ include ::mongodb::client
class { '::mongodb::server' :
service_manage => false,
}
@@ -134,6 +169,11 @@ if hiera('step') >= 1 {
$galera_nodes = downcase(hiera('galera_node_names', $::hostname))
$galera_nodes_count = count(split($galera_nodes, ','))
+ # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
+ # set bind-address to a hostname instead of an ip address; to move Mysql
+ # from internal_api on another network we'll have to customize both
+ # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
+ $mysql_bind_host = hiera('mysql_bind_host')
$mysqld_options = {
'mysqld' => {
'skip-name-resolve' => '1',
@@ -143,11 +183,13 @@ if hiera('step') >= 1 {
'innodb_locks_unsafe_for_binlog'=> '1',
'query_cache_size' => '0',
'query_cache_type' => '0',
- 'bind-address' => hiera('mysql_bind_host'),
+ 'bind-address' => $::hostname,
'max_connections' => hiera('mysql_max_connections'),
'open_files_limit' => '-1',
+ 'wsrep_on' => 'ON',
'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so',
'wsrep_cluster_name' => 'galera_cluster',
+ 'wsrep_cluster_address' => "gcomm://${galera_nodes}",
'wsrep_slave_threads' => '1',
'wsrep_certify_nonPK' => '1',
'wsrep_max_ws_rows' => '131072',
@@ -158,8 +200,8 @@ if hiera('step') >= 1 {
'wsrep_auto_increment_control' => '1',
'wsrep_drupal_282555_workaround'=> '0',
'wsrep_causal_reads' => '0',
- 'wsrep_notify_cmd' => '',
'wsrep_sst_method' => 'rsync',
+ 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
},
}
@@ -178,8 +220,19 @@ if hiera('step') >= 1 {
if hiera('step') >= 2 {
# NOTE(gfidente): the following vars are needed on all nodes so they
- # need to stay out of pacemaker_master conditional
- $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ # need to stay out of pacemaker_master conditional.
+ # The addresses mangling will hopefully go away when we'll be able to
+ # configure the connection string via hostnames, until then, we need to pass
+ # the list of IPv6 addresses *with* port and without the brackets as 'members'
+ # argument for the 'mongodb_replset' resource.
+ if str2bool(hiera('mongodb::server::ipv6', false)) {
+ $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ } else {
+ $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+ }
$mongodb_replset = hiera('mongodb::server::replset')
if $pacemaker_master {
@@ -188,6 +241,11 @@ if hiera('step') >= 2 {
include ::pacemaker::resource_defaults
+ # Create an openstack-core dummy resource. See RHBZ 1290121
+ pacemaker::resource::ocf { 'openstack-core':
+ ocf_agent_name => 'heartbeat:Dummy',
+ clone_params => true,
+ }
# FIXME: we should not have to access tripleo::loadbalancer class
# parameters here to configure pacemaker VIPs. The configuration
# of pacemaker VIPs could move into puppet-tripleo or we should
@@ -197,147 +255,46 @@ if hiera('step') >= 2 {
}
$control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
- pacemaker::resource::ip { 'control_vip':
+ tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_control_vip':
+ vip_name => 'control',
ip_address => $control_vip,
}
- pacemaker::constraint::base { 'control_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${control_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['control_vip']],
- }
- pacemaker::constraint::colocation { 'control_vip-with-haproxy':
- source => "ip-${control_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['control_vip']],
- }
$public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
- if $public_vip and $public_vip != $control_vip {
- pacemaker::resource::ip { 'public_vip':
- ip_address => $public_vip,
- }
- pacemaker::constraint::base { 'public_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${public_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['public_vip']],
- }
- pacemaker::constraint::colocation { 'public_vip-with-haproxy':
- source => "ip-${public_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['public_vip']],
- }
+ tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_public_vip':
+ ensure => $public_vip and $public_vip != $control_vip,
+ vip_name => 'public',
+ ip_address => $public_vip,
}
$redis_vip = hiera('redis_vip')
- if $redis_vip and $redis_vip != $control_vip {
- pacemaker::resource::ip { 'redis_vip':
- ip_address => $redis_vip,
- }
- pacemaker::constraint::base { 'redis_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${redis_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['redis_vip']],
- }
- pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
- source => "ip-${redis_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['redis_vip']],
- }
+ tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_redis_vip':
+ ensure => $redis_vip and $redis_vip != $control_vip,
+ vip_name => 'redis',
+ ip_address => $redis_vip,
}
+
$internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
- if $internal_api_vip and $internal_api_vip != $control_vip {
- pacemaker::resource::ip { 'internal_api_vip':
- ip_address => $internal_api_vip,
- }
- pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${internal_api_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['internal_api_vip']],
- }
- pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
- source => "ip-${internal_api_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['internal_api_vip']],
- }
+ tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_internal_api_vip':
+ ensure => $internal_api_vip and $internal_api_vip != $control_vip,
+ vip_name => 'internal_api',
+ ip_address => $internal_api_vip,
}
$storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
- if $storage_vip and $storage_vip != $control_vip {
- pacemaker::resource::ip { 'storage_vip':
- ip_address => $storage_vip,
- }
- pacemaker::constraint::base { 'storage_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${storage_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_vip']],
- }
- pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
- source => "ip-${storage_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_vip']],
- }
+ tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_vip':
+ ensure => $storage_vip and $storage_vip != $control_vip,
+ vip_name => 'storage',
+ ip_address => $storage_vip,
}
$storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
- if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
- pacemaker::resource::ip { 'storage_mgmt_vip':
- ip_address => $storage_mgmt_vip,
- }
- pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${storage_mgmt_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_mgmt_vip']],
- }
- pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
- source => "ip-${storage_mgmt_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_mgmt_vip']],
- }
+ tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_mgmt_vip':
+ ensure => $storage_mgmt_vip and $storage_mgmt_vip != $control_vip,
+ vip_name => 'storage_mgmt',
+ ip_address => $storage_mgmt_vip,
}
-
}
pacemaker::resource::service { $::memcached::params::service_name :
@@ -349,6 +306,7 @@ if hiera('step') >= 2 {
ocf_agent_name => 'heartbeat:rabbitmq-cluster',
resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
clone_params => 'ordered=true interleave=true',
+ meta_params => 'notify=true',
require => Class['::rabbitmq'],
}
@@ -366,7 +324,7 @@ if hiera('step') >= 2 {
before => Mongodb_replset[$mongodb_replset],
}
mongodb_replset { $mongodb_replset :
- members => $mongo_node_ips_with_port,
+ members => $mongo_node_ips_with_port_nobr,
}
}
@@ -430,6 +388,9 @@ MYSQL_HOST=localhost\n",
class { '::nova::db::mysql':
require => Exec['galera-ready'],
}
+ class { '::nova::db::mysql_api':
+ require => Exec['galera-ready'],
+ }
class { '::neutron::db::mysql':
require => Exec['galera-ready'],
}
@@ -446,6 +407,11 @@ MYSQL_HOST=localhost\n",
}
}
+ if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
+ class { '::gnocchi::db::mysql':
+ require => Exec['galera-ready'],
+ }
+ }
class { '::sahara::db::mysql':
require => Exec['galera-ready'],
}
@@ -458,8 +424,15 @@ MYSQL_HOST=localhost\n",
$enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
if $enable_ceph {
+ $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
class { '::ceph::profile::params':
- mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
+ mon_initial_members => $mon_initial_members,
+ mon_host => $mon_host,
}
include ::ceph::conf
include ::ceph::profile::mon
@@ -485,6 +458,14 @@ MYSQL_HOST=localhost\n",
}
if str2bool(hiera('enable_external_ceph', false)) {
+ if str2bool(hiera('ceph_ipv6', false)) {
+ $mon_host = hiera('ceph_mon_host_v6')
+ } else {
+ $mon_host = hiera('ceph_mon_host')
+ }
+ class { '::ceph::profile::params':
+ mon_host => $mon_host,
+ }
include ::ceph::conf
include ::ceph::profile::client
}
@@ -492,44 +473,7 @@ MYSQL_HOST=localhost\n",
} #END STEP 2
-if hiera('step') >= 3 {
-
- class { '::keystone':
- sync_db => $sync_db,
- manage_service => false,
- enabled => false,
- }
- include ::keystone::config
-
- #TODO: need a cleanup-keystone-tokens.sh solution here
-
- file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
- ensure => 'directory',
- owner => 'keystone',
- group => 'keystone',
- require => Package['keystone'],
- }
- file { '/etc/keystone/ssl/certs/signing_cert.pem':
- content => hiera('keystone_signing_certificate'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/certs'],
- }
- file { '/etc/keystone/ssl/private/signing_key.pem':
- content => hiera('keystone_signing_key'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/private'],
- }
- file { '/etc/keystone/ssl/certs/ca.pem':
- content => hiera('keystone_ca_certificate'),
- owner => 'keystone',
- group => 'keystone',
- notify => Service['keystone'],
- require => File['/etc/keystone/ssl/certs'],
- }
+if hiera('step') >= 4 {
$glance_backend = downcase(hiera('glance_backend', 'swift'))
case $glance_backend {
@@ -541,17 +485,6 @@ if hiera('step') >= 3 {
$http_store = ['glance.store.http.Store']
$glance_store = concat($http_store, $backend_store)
- if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) {
- $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"'
- pacemaker::resource::filesystem { 'glance-fs':
- device => hiera('glance_file_pcmk_device'),
- directory => hiera('glance_file_pcmk_directory'),
- fstype => hiera('glance_file_pcmk_fstype'),
- fsoptions => join([$secontext, hiera('glance_file_pcmk_options', '')],','),
- clone_params => '',
- }
- }
-
# TODO: notifications, scrubber, etc.
include ::glance
include ::glance::config
@@ -565,16 +498,25 @@ if hiera('step') >= 3 {
manage_service => false,
enabled => false,
}
+ include ::glance::notify::rabbitmq
include join(['::glance::backend::', $glance_backend])
+ $nova_ipv6 = hiera('nova::use_ipv6', false)
+ if $nova_ipv6 {
+ $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
+ } else {
+ $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
+ }
+
class { '::nova' :
- memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'),
+ memcached_servers => $memcached_servers
}
include ::nova::config
class { '::nova::api' :
sync_db => $sync_db,
+ sync_db_api => $sync_db,
manage_service => false,
enabled => false,
}
@@ -611,8 +553,9 @@ if hiera('step') >= 3 {
if hiera('enable_zookeeper_on_controller') {
class {'::tripleo::cluster::zookeeper':
zookeeper_server_ips => $zookeeper_node_ips,
- zookeeper_client_ip => $ipaddress,
- zookeeper_hostnames => hiera('controller_node_names')
+ # TODO: create a 'bind' hiera key for zookeeper
+ zookeeper_client_ip => hiera('neutron::bind_host'),
+ zookeeper_hostnames => split(hiera('controller_node_names'), ',')
}
}
@@ -620,7 +563,8 @@ if hiera('step') >= 3 {
if hiera('enable_cassandra_on_controller') {
class {'::tripleo::cluster::cassandra':
cassandra_servers => $cassandra_node_ips,
- cassandra_ip => $ipaddress
+ # TODO: create a 'bind' hiera key for cassandra
+ cassandra_ip => hiera('neutron::bind_host'),
}
}
@@ -630,11 +574,12 @@ if hiera('step') >= 3 {
}
class {'::tripleo::network::midonet::api':
- zookeeper_servers => hiera('neutron_api_node_ips'),
- vip => $public_vip,
- keystone_ip => $public_vip,
+ zookeeper_servers => $zookeeper_node_ips,
+ vip => hiera('tripleo::loadbalancer::public_virtual_ip'),
+ keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'),
keystone_admin_token => hiera('keystone::admin_token'),
- bind_address => $ipaddress,
+ # TODO: create a 'bind' hiera key for api
+ bind_address => hiera('neutron::bind_host'),
admin_password => hiera('admin_password')
}
@@ -659,13 +604,24 @@ if hiera('step') >= 3 {
if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
include ::neutron::plugins::nuage
}
+ if hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
+ include ::neutron::plugins::opencontrail
+ }
if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
class {'::neutron::plugins::midonet':
- midonet_api_ip => $public_vip,
+ midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'),
keystone_tenant => hiera('neutron::server::auth_tenant'),
keystone_password => hiera('neutron::server::auth_password')
}
}
+ if hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
+ class { '::neutron::plugins::plumgrid' :
+ connection => hiera('neutron::server::database_connection'),
+ controller_priv_host => hiera('keystone_admin_api_vip'),
+ admin_password => hiera('admin_password'),
+ metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'),
+ }
+ }
if hiera('neutron::enable_dhcp_agent',true) {
class { '::neutron::agents::dhcp' :
manage_service => false,
@@ -718,8 +674,9 @@ if hiera('step') >= 3 {
}
}
- if hiera('neutron_enable_bigswitch_ml2', false) {
+ if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
include ::neutron::plugins::ml2::bigswitch::restproxy
+ include ::neutron::agents::bigswitch
}
neutron_l3_agent_config {
'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
@@ -727,9 +684,13 @@ if hiera('step') >= 3 {
neutron_dhcp_agent_config {
'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
}
+ neutron_config {
+ 'DEFAULT/notification_driver': value => 'messaging';
+ }
include ::cinder
include ::cinder::config
+ include ::tripleo::ssl::cinder_config
class { '::cinder::api':
sync_db => $sync_db,
manage_service => false,
@@ -744,6 +705,7 @@ if hiera('step') >= 3 {
enabled => false,
}
include ::cinder::glance
+ include ::cinder::ceilometer
class { '::cinder::setup_test_volume':
size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
}
@@ -777,6 +739,7 @@ if hiera('step') >= 3 {
$cinder_rbd_backend = 'tripleo_ceph'
cinder::backend::rbd { $cinder_rbd_backend :
+ backend_host => hiera('cinder::host'),
rbd_pool => hiera('cinder_rbd_pool_name'),
rbd_user => hiera('ceph_client_user_name'),
rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
@@ -787,10 +750,6 @@ if hiera('step') >= 3 {
if hiera('cinder_enable_eqlx_backend', false) {
$cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
- cinder_config {
- "${cinder_eqlx_backend}/host": value => 'hostgroup';
- }
-
cinder::backend::eqlx { $cinder_eqlx_backend :
volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
san_ip => hiera('cinder::backend::eqlx::san_ip', undef),
@@ -798,7 +757,7 @@ if hiera('step') >= 3 {
san_password => hiera('cinder::backend::eqlx::san_password', undef),
san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef),
eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
- eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef),
+ eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef),
eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
@@ -808,10 +767,6 @@ if hiera('step') >= 3 {
if hiera('cinder_enable_dellsc_backend', false) {
$cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
- cinder_config {
- "${cinder_dellsc_backend}/host": value => 'hostgroup';
- }
-
cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
@@ -820,7 +775,7 @@ if hiera('step') >= 3 {
dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
- dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef),
+ dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef),
dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
}
@@ -829,10 +784,6 @@ if hiera('step') >= 3 {
if hiera('cinder_enable_netapp_backend', false) {
$cinder_netapp_backend = hiera('cinder::backend::netapp::title')
- cinder_config {
- "${cinder_netapp_backend}/host": value => 'hostgroup';
- }
-
if hiera('cinder::backend::netapp::nfs_shares', undef) {
$cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
}
@@ -881,7 +832,7 @@ if hiera('step') >= 3 {
$cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
class { '::cinder::backends' :
- enabled_backends => $cinder_enabled_backends,
+ enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
}
class { '::sahara':
@@ -976,13 +927,15 @@ if hiera('step') >= 3 {
sync_db => $sync_db,
}
include ::ceilometer::agent::auth
+ include ::ceilometer::dispatcher::gnocchi
Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
# Heat
include ::heat::config
class { '::heat' :
- sync_db => $sync_db,
+ sync_db => $sync_db,
+ notification_driver => 'messaging',
}
class { '::heat::api' :
manage_service => false,
@@ -1014,11 +967,79 @@ if hiera('step') >= 3 {
$_profile_support = 'None'
}
$neutron_options = {'profile_support' => $_profile_support }
+
+ $memcached_ipv6 = hiera('memcached_ipv6', false)
+ if $memcached_ipv6 {
+ $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
+ } else {
+ $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
+ }
+
class { '::horizon':
- cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
+ cache_server_ip => $horizon_memcached_servers,
neutron_options => $neutron_options,
}
+ # Aodh
+ class { '::aodh' :
+ database_connection => $ceilometer_database_connection,
+ }
+ include ::aodh::config
+ include ::aodh::auth
+ include ::aodh::client
+ include ::aodh::wsgi::apache
+ class { '::aodh::api':
+ manage_service => false,
+ enabled => false,
+ service_name => 'httpd',
+ }
+ class { '::aodh::evaluator':
+ manage_service => false,
+ enabled => false,
+ }
+ class { '::aodh::notifier':
+ manage_service => false,
+ enabled => false,
+ }
+ class { '::aodh::listener':
+ manage_service => false,
+ enabled => false,
+ }
+
+ # Gnocchi
+ $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
+ include ::gnocchi::client
+ if $sync_db {
+ include ::gnocchi::db::sync
+ }
+ include ::gnocchi::storage
+ $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
+ case $gnocchi_backend {
+ 'swift': { include ::gnocchi::storage::swift }
+ 'file': { include ::gnocchi::storage::file }
+ 'rbd': { include ::gnocchi::storage::ceph }
+ default: { fail('Unrecognized gnocchi_backend parameter.') }
+ }
+ class { '::gnocchi':
+ database_connection => $gnocchi_database_connection,
+ }
+ class { '::gnocchi::api' :
+ manage_service => false,
+ enabled => false,
+ service_name => 'httpd',
+ }
+ class { '::gnocchi::wsgi::apache' :
+ ssl => false,
+ }
+ class { '::gnocchi::metricd' :
+ manage_service => false,
+ enabled => false,
+ }
+ class { '::gnocchi::statsd' :
+ manage_service => false,
+ enabled => false,
+ }
+
$snmpd_user = hiera('snmpd_readonly_user_name')
snmp::snmpv3_user { $snmpd_user:
authtype => 'MD5',
@@ -1026,81 +1047,62 @@ if hiera('step') >= 3 {
}
class { '::snmp':
agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
+ snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
hiera_include('controller_classes')
-} #END STEP 3
+} #END STEP 4
-if hiera('step') >= 4 {
- $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
+if hiera('step') >= 5 {
$nova_enable_db_purge = hiera('nova_enable_db_purge', true)
$cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
+ $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
- if $keystone_enable_db_purge {
- include ::keystone::cron::token_flush
- }
if $nova_enable_db_purge {
include ::nova::cron::archive_deleted_rows
}
if $cinder_enable_db_purge {
include ::cinder::cron::db_purge
}
+ if $heat_enable_db_purge {
+ include ::heat::cron::purge_deleted
+ }
if $pacemaker_master {
- # Keystone
- pacemaker::resource::service { $::keystone::params::service_name :
- clone_params => 'interleave=true',
- verify_on_create => true,
- require => [File['/etc/keystone/ssl/certs/ca.pem'],
- File['/etc/keystone/ssl/private/signing_key.pem'],
- File['/etc/keystone/ssl/certs/signing_cert.pem']],
- }
- if $enable_load_balancer {
- pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
- constraint_type => 'order',
- first_resource => 'haproxy-clone',
- second_resource => "${::keystone::params::service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
- }
- }
- pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
+ pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
constraint_type => 'order',
- first_resource => 'rabbitmq-clone',
- second_resource => "${::keystone::params::service_name}-clone",
+ first_resource => 'openstack-core-clone',
+ second_resource => "${::apache::params::service_name}-clone",
first_action => 'start',
second_action => 'start',
- require => [Pacemaker::Resource::Ocf['rabbitmq'],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ require => [Pacemaker::Resource::Service[$::apache::params::service_name],
+ Pacemaker::Resource::Ocf['openstack-core']],
}
- pacemaker::constraint::base { 'memcached-then-keystone-constraint':
+ pacemaker::constraint::base { 'memcached-then-openstack-core-constraint':
constraint_type => 'order',
first_resource => 'memcached-clone',
- second_resource => "${::keystone::params::service_name}-clone",
+ second_resource => 'openstack-core-clone',
first_action => 'start',
second_action => 'start',
require => [Pacemaker::Resource::Service['memcached'],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ Pacemaker::Resource::Ocf['openstack-core']],
}
- pacemaker::constraint::base { 'galera-then-keystone-constraint':
+ pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
constraint_type => 'order',
first_resource => 'galera-master',
- second_resource => "${::keystone::params::service_name}-clone",
+ second_resource => 'openstack-core-clone',
first_action => 'promote',
second_action => 'start',
require => [Pacemaker::Resource::Ocf['galera'],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ Pacemaker::Resource::Ocf['openstack-core']],
}
# Cinder
pacemaker::resource::service { $::cinder::params::api_service :
clone_params => 'interleave=true',
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
+ require => Pacemaker::Resource::Ocf['openstack-core'],
}
pacemaker::resource::service { $::cinder::params::scheduler_service :
clone_params => 'interleave=true',
@@ -1109,12 +1111,12 @@ if hiera('step') >= 4 {
pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
+ first_resource => 'openstack-core-clone',
second_resource => "${::cinder::params::api_service}-clone",
first_action => 'start',
second_action => 'start',
- require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ require => [Pacemaker::Resource::Ocf['openstack-core'],
+ Pacemaker::Resource::Service[$::cinder::params::api_service]],
}
pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
constraint_type => 'order',
@@ -1152,25 +1154,46 @@ if hiera('step') >= 4 {
# Sahara
pacemaker::resource::service { $::sahara::params::api_service_name :
clone_params => 'interleave=true',
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
+ require => Pacemaker::Resource::Ocf['openstack-core'],
}
pacemaker::resource::service { $::sahara::params::engine_service_name :
clone_params => 'interleave=true',
}
pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
+ first_resource => 'openstack-core-clone',
second_resource => "${::sahara::params::api_service_name}-clone",
first_action => 'start',
second_action => 'start',
require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ Pacemaker::Resource::Ocf['openstack-core']],
+ }
+ pacemaker::constraint::base { 'sahara-api-then-sahara-engine-constraint':
+ constraint_type => 'order',
+ first_resource => "${::sahara::params::api_service_name}-clone",
+ second_resource => "${::sahara::params::engine_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
+ Pacemaker::Resource::Service[$::sahara::params::engine_service_name]],
}
# Glance
+ if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) {
+ $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"'
+ pacemaker::resource::filesystem { 'glance-fs':
+ device => hiera('glance_file_pcmk_device'),
+ directory => hiera('glance_file_pcmk_directory'),
+ fstype => hiera('glance_file_pcmk_fstype'),
+ fsoptions => join([$secontext, hiera('glance_file_pcmk_options', '')],','),
+ verify_on_create => true,
+ clone_params => '',
+ }
+ }
+
pacemaker::resource::service { $::glance::params::registry_service_name :
clone_params => 'interleave=true',
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
+ require => Pacemaker::Resource::Ocf['openstack-core'],
}
pacemaker::resource::service { $::glance::params::api_service_name :
clone_params => 'interleave=true',
@@ -1178,12 +1201,12 @@ if hiera('step') >= 4 {
pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
+ first_resource => 'openstack-core-clone',
second_resource => "${::glance::params::registry_service_name}-clone",
first_action => 'start',
second_action => 'start',
require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ Pacemaker::Resource::Ocf['openstack-core']],
}
pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
constraint_type => 'order',
@@ -1202,7 +1225,7 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::glance::params::api_service_name]],
}
- if hiera('step') == 4 {
+ if hiera('step') == 5 {
# Neutron
# NOTE(gfidente): Neutron will try to populate the database with some data
# as soon as neutron-server is started; to avoid races we want to make this
@@ -1221,12 +1244,12 @@ if hiera('step') >= 4 {
} ->
pacemaker::resource::service { $::neutron::params::server_service:
clone_params => 'interleave=true',
- require => Pacemaker::Resource::Service[$::keystone::params::service_name]
+ require => Pacemaker::Resource::Ocf['openstack-core']
}
} else {
pacemaker::resource::service { $::neutron::params::server_service:
clone_params => 'interleave=true',
- require => Pacemaker::Resource::Service[$::keystone::params::service_name]
+ require => Pacemaker::Resource::Ocf['openstack-core']
}
}
if hiera('neutron::enable_l3_agent', true) {
@@ -1298,38 +1321,37 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
}
}
-
pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
+ first_resource => 'openstack-core-clone',
second_resource => "${::neutron::params::server_service}-clone",
first_action => 'start',
second_action => 'start',
- require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
+ require => [Pacemaker::Resource::Ocf['openstack-core'],
Pacemaker::Resource::Service[$::neutron::params::server_service]],
}
if hiera('neutron::enable_ovs_agent',true) {
- pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
+ pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
constraint_type => 'order',
- first_resource => "${::neutron::params::server_service}-clone",
- second_resource => "${::neutron::params::ovs_agent_service}-clone",
+ first_resource => "${::neutron::params::ovs_agent_service}-clone",
+ second_resource => "${::neutron::params::dhcp_agent_service}-clone",
first_action => 'start',
second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
+ require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
}
}
if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) {
- pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
+ pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
constraint_type => 'order',
- first_resource => "${::neutron::params::ovs_agent_service}-clone",
- second_resource => "${::neutron::params::dhcp_agent_service}-clone",
+ first_resource => "${::neutron::params::server_service}-clone",
+ second_resource => "${::neutron::params::ovs_agent_service}-clone",
first_action => 'start',
second_action => 'start',
- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
+ require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
+ Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
+ }
- }
pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
source => "${::neutron::params::dhcp_agent_service}-clone",
target => "${::neutron::params::ovs_agent_service}-clone",
@@ -1338,7 +1360,7 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
}
}
- if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) {
+ if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_l3_agent',true) {
pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
constraint_type => 'order',
first_resource => "${::neutron::params::dhcp_agent_service}-clone",
@@ -1415,34 +1437,29 @@ if hiera('step') >= 4 {
# Nova
pacemaker::resource::service { $::nova::params::api_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::conductor_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::consoleauth_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
+ require => Pacemaker::Resource::Ocf['openstack-core'],
}
pacemaker::resource::service { $::nova::params::vncproxy_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::scheduler_service_name :
clone_params => 'interleave=true',
- op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
+ first_resource => 'openstack-core-clone',
second_resource => "${::nova::params::consoleauth_service_name}-clone",
first_action => 'start',
second_action => 'start',
require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ Pacemaker::Resource::Ocf['openstack-core']],
}
pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
constraint_type => 'order',
@@ -1509,19 +1526,19 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
}
- # Ceilometer
+ # Ceilometer and Aodh
case downcase(hiera('ceilometer_backend')) {
/mysql/: {
- pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
+ pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
clone_params => 'interleave=true',
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
+ require => Pacemaker::Resource::Ocf['openstack-core'],
}
}
default: {
- pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
+ pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
clone_params => 'interleave=true',
- require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
- Pacemaker::Resource::Service[$::mongodb::params::service_name]],
+ require => [Pacemaker::Resource::Ocf['openstack-core'],
+ Pacemaker::Resource::Service[$::mongodb::params::service_name]],
}
}
}
@@ -1542,8 +1559,10 @@ if hiera('step') >= 4 {
# Fedora doesn't know `require-all` parameter for constraints yet
if $::operatingsystem == 'Fedora' {
$redis_ceilometer_constraint_params = undef
+ $redis_aodh_constraint_params = undef
} else {
$redis_ceilometer_constraint_params = 'require-all=false'
+ $redis_aodh_constraint_params = 'require-all=false'
}
pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
constraint_type => 'order',
@@ -1555,14 +1574,33 @@ if hiera('step') >= 4 {
require => [Pacemaker::Resource::Ocf['redis'],
Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
}
+ pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
+ constraint_type => 'order',
+ first_resource => 'redis-master',
+ second_resource => "${::aodh::params::evaluator_service_name}-clone",
+ first_action => 'promote',
+ second_action => 'start',
+ constraint_params => $redis_aodh_constraint_params,
+ require => [Pacemaker::Resource::Ocf['redis'],
+ Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
+ }
pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
+ first_resource => 'openstack-core-clone',
second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
first_action => 'start',
second_action => 'start',
require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ Pacemaker::Resource::Ocf['openstack-core']],
+ }
+ pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint':
+ constraint_type => 'order',
+ first_resource => 'openstack-core-clone',
+ second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
+ Pacemaker::Resource::Ocf['openstack-core']],
}
pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
constraint_type => 'order',
@@ -1605,6 +1643,64 @@ if hiera('step') >= 4 {
require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
Pacemaker::Resource::Ocf['delay']],
}
+ # Aodh
+ pacemaker::resource::service { $::aodh::params::evaluator_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::resource::service { $::aodh::params::notifier_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::resource::service { $::aodh::params::listener_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint':
+ constraint_type => 'order',
+ first_resource => 'delay-clone',
+ second_resource => "${::aodh::params::evaluator_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Ocf['delay']],
+ }
+ pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation':
+ source => "${::aodh::params::evaluator_service_name}-clone",
+ target => 'delay-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Ocf['delay']],
+ }
+ pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
+ constraint_type => 'order',
+ first_resource => "${::aodh::params::evaluator_service_name}-clone",
+ second_resource => "${::aodh::params::notifier_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
+ }
+ pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
+ source => "${::aodh::params::notifier_service_name}-clone",
+ target => "${::aodh::params::evaluator_service_name}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
+ }
+ pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint':
+ constraint_type => 'order',
+ first_resource => "${::aodh::params::evaluator_service_name}-clone",
+ second_resource => "${::aodh::params::listener_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
+ }
+ pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation':
+ source => "${::aodh::params::listener_service_name}-clone",
+ target => "${::aodh::params::evaluator_service_name}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
+ }
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
constraint_type => 'order',
@@ -1617,6 +1713,30 @@ if hiera('step') >= 4 {
}
}
+ # gnocchi
+ pacemaker::resource::service { $::gnocchi::params::metricd_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::resource::service { $::gnocchi::params::statsd_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint':
+ constraint_type => 'order',
+ first_resource => "${::gnocchi::params::metricd_service_name}-clone",
+ second_resource => "${::gnocchi::params::statsd_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
+ Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
+ }
+ pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation':
+ source => "${::gnocchi::params::statsd_service_name}-clone",
+ target => "${::gnocchi::params::metricd_service_name}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
+ Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
+ }
+
# Heat
pacemaker::resource::service { $::heat::params::api_service_name :
clone_params => 'interleave=true',
@@ -1630,15 +1750,6 @@ if hiera('step') >= 4 {
pacemaker::resource::service { $::heat::params::engine_service_name :
clone_params => 'interleave=true',
}
- pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
- constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
- second_resource => "${::heat::params::api_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
- }
pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
constraint_type => 'order',
first_resource => "${::heat::params::api_service_name}-clone",
@@ -1697,9 +1808,13 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
}
- # Horizon
- pacemaker::resource::service { $::horizon::params::http_service:
- clone_params => 'interleave=true',
+ # Horizon and Keystone
+ pacemaker::resource::service { $::apache::params::service_name:
+ clone_params => 'interleave=true',
+ verify_on_create => true,
+ require => [File['/etc/keystone/ssl/certs/ca.pem'],
+ File['/etc/keystone/ssl/private/signing_key.pem'],
+ File['/etc/keystone/ssl/certs/signing_cert.pem']],
}
#VSM
@@ -1729,21 +1844,6 @@ if hiera('step') >= 4 {
}
-} #END STEP 4
-
-if hiera('step') >= 5 {
-
- if $pacemaker_master {
-
- class {'::keystone::roles::admin' :
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
- } ->
- class {'::keystone::endpoint' :
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
- }
-
- }
-
} #END STEP 5
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp
index 63ac396e..ae074589 100644
--- a/puppet/manifests/overcloud_object.pp
+++ b/puppet/manifests/overcloud_object.pp
@@ -16,7 +16,9 @@
include ::tripleo::packages
include ::tripleo::firewall
+create_resources(kmod::load, hiera('kernel_modules'), {})
create_resources(sysctl::value, hiera('sysctl_settings'), {})
+Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
if count(hiera('ntp::servers')) > 0 {
include ::ntp
@@ -48,7 +50,7 @@ snmp::snmpv3_user { $snmpd_user:
}
class { '::snmp':
agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
+ snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
hiera_include('object_classes')
diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp
index 5a69725a..134dc43b 100644
--- a/puppet/manifests/overcloud_volume.pp
+++ b/puppet/manifests/overcloud_volume.pp
@@ -16,7 +16,9 @@
include ::tripleo::packages
include ::tripleo::firewall
+create_resources(kmod::load, hiera('kernel_modules'), {})
create_resources(sysctl::value, hiera('sysctl_settings'), {})
+Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
if count(hiera('ntp::servers')) > 0 {
include ::ntp
@@ -42,7 +44,7 @@ if $cinder_enable_iscsi {
$cinder_enabled_backends = any2array($cinder_iscsi_backend)
class { '::cinder::backends' :
- enabled_backends => $cinder_enabled_backends,
+ enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
}
$snmpd_user = hiera('snmpd_readonly_user_name')
@@ -52,7 +54,7 @@ snmp::snmpv3_user { $snmpd_user:
}
class { '::snmp':
agentaddress => ['udp:161','udp6:[::1]:161'],
- snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
+ snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
hiera_include('volume_classes')
diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp
index 2d880d33..a623da29 100644
--- a/puppet/manifests/ringbuilder.pp
+++ b/puppet/manifests/ringbuilder.pp
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-include ::tripleo::packages
-
define add_devices(
$swift_zones = '1'
){
@@ -91,6 +89,6 @@ class tripleo::ringbuilder (
}
}
-include ::tripleo::ringbuilder
-
-package_manifest{'/var/lib/tripleo/installed-packages/ringbuilder': ensure => present}
+if hiera('step') >= 3 {
+ include ::tripleo::ringbuilder
+}
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
new file mode 100644
index 00000000..38d2ac64
--- /dev/null
+++ b/puppet/services/README.rst
@@ -0,0 +1,50 @@
+========
+services
+========
+
+A TripleO nested stack Heat template that encapsulates generic configuration
+data to configure a specific service. This generally includes everything
+needed to configure the service excluding the local bind ports which
+are still managed in the per-node role templates directly (controller.yaml,
+compute.yaml, etc.). All other (global) service settings go into
+the puppet/service templates.
+
+Input Parameters
+----------------
+
+Each service may define its own input parameters and defaults.
+Operators will use the parameter_defaults section of any Heat
+environment to set per service parameters.
+
+Config Settings
+---------------
+
+Each service may define a config_settings output variable which returns
+Hiera settings to be configured.
+
+Steps
+-----
+
+Each service may define an output variable which returns a puppet manifest
+snippet that will run at each of the following steps. Earlier manifests
+are re-asserted when applying latter ones.
+
+ * config_settings: Custom hiera settings for this service.
+
+ * step_config: A puppet manifest that is used to step through the deployment
+ sequence. Each sequence is given a "step" (via hiera('step') that provides
+ information for when puppet classes should activate themselves.
+
+ Steps correlate to the following:
+
+ 1) Load Balancer configuration
+
+ 2) Core Services (Database/Rabbit/NTP/etc.)
+
+ 3) Early Openstack Service setup (Ringbuilder, etc.)
+
+ 4) General OpenStack Services
+
+ 5) Service activation (Pacemaker)
+
+ 6) Fencing (Pacemaker)
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
new file mode 100644
index 00000000..1654f0e7
--- /dev/null
+++ b/puppet/services/keystone.yaml
@@ -0,0 +1,135 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Keystone service configured with Puppet
+
+parameters:
+ KeystoneCACertificate:
+ default: ''
+ description: Keystone self-signed certificate authority certificate.
+ type: string
+ KeystoneEnableDBPurge:
+ default: true
+ description: |
+ Whether to create cron job for purging soft deleted rows in Keystone database.
+ type: boolean
+ KeystoneSigningCertificate:
+ default: ''
+ description: Keystone certificate for verifying token validity.
+ type: string
+ KeystoneSigningKey:
+ default: ''
+ description: Keystone key for signing tokens.
+ type: string
+ hidden: true
+ KeystoneSSLCertificate:
+ default: ''
+ description: Keystone certificate for verifying token validity.
+ type: string
+ KeystoneSSLCertificateKey:
+ default: ''
+ description: Keystone key for signing tokens.
+ type: string
+ hidden: true
+ KeystoneNotificationDriver:
+ description: Comma-separated list of Oslo notification drivers used by Keystone
+ default: ['messaging']
+ type: comma_delimited_list
+ KeystoneNotificationFormat:
+ description: The Keystone notification format
+ default: 'basic'
+ type: string
+ constraints:
+ - allowed_values: [ 'basic', 'cadf' ]
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ KeystoneWorkers:
+ default: 0
+ description: Number of workers for Keystone service.
+ type: number
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+ Debug:
+ type: string
+ default: ''
+ AdminEmail:
+ default: 'admin@example.com'
+ description: The email for the keystone admin account.
+ type: string
+ hidden: true
+ AdminPassword:
+ description: The password for the keystone admin account, used for monitoring, querying neutron etc.
+ type: string
+ hidden: true
+ AdminToken:
+ description: The keystone auth secret and db password.
+ type: string
+ hidden: true
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+
+outputs:
+ role_data:
+ description: Role data for the Keystone role.
+ value:
+ config_settings:
+ keystone_dsn: &keystone_dsn
+ list_join:
+ - ''
+ - - 'mysql+pymysql://keystone:'
+ - {get_param: AdminToken}
+ - '@'
+ - {get_param: MysqlVirtualIPUri}
+ - '/keystone'
+ keystone::database_connection: *keystone_dsn
+ keystone::admin_token: {get_param: AdminToken}
+ keystone::roles::admin::password: {get_param: AdminPassword}
+ keystone_ca_certificate: {get_param: KeystoneCACertificate}
+ keystone_signing_key: {get_param: KeystoneSigningKey}
+ keystone_signing_certificate: {get_param: KeystoneSigningCertificate}
+ keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
+ keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
+ keystone::debug: {get_param: Debug}
+ keystone::db::mysql::password: {get_param: AdminToken}
+ keystone::rabbit_userid: {get_param: RabbitUserName}
+ keystone::rabbit_password: {get_param: RabbitPassword}
+ keystone::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ keystone::rabbit_port: {get_param: RabbitClientPort}
+ keystone::notification_driver: {get_param: KeystoneNotificationDriver}
+ keystone::notification_format: {get_param: KeystoneNotificationFormat}
+ keystone::roles::admin::email: {get_param: AdminEmail}
+ keystone::roles::admin::password: {get_param: AdminPassword}
+ keystone::endpoint::public_url: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]}
+ keystone::endpoint::internal_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ keystone::endpoint::region: {get_param: KeystoneRegion}
+ keystone::admin_workers: {get_param: KeystoneWorkers}
+ keystone::public_workers: {get_param: KeystoneWorkers}
+ keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
+ keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]}
+ step_config: |
+ include ::tripleo::profile::base::keystone
diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml
new file mode 100644
index 00000000..8fcab15f
--- /dev/null
+++ b/puppet/services/pacemaker/keystone.yaml
@@ -0,0 +1,34 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Keystone service with Pacemaker configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ type: string
+ default: ''
+
+resources:
+
+ KeystoneServiceBase:
+ type: ../keystone.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+outputs:
+ role_data:
+ description: Role data for the Keystone pacemaker role.
+ value:
+ config_settings:
+ map_merge:
+ - get_attr: [KeystoneServiceBase, role_data, config_settings]
+ #-
+ # custom keystone hiera goes here if we need it!?
+ step_config: |
+ include ::tripleo::profile::pacemaker::keystone
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
new file mode 100644
index 00000000..f9681634
--- /dev/null
+++ b/puppet/services/services.yaml
@@ -0,0 +1,40 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Utility stack to convert an array of services into a set of combined
+ role configs.
+
+parameters:
+ Services:
+ default: []
+ description: |
+ List nested stack service templates.
+ type: comma_delimited_list
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MysqlVirtualIPUri:
+ default: ''
+ type: string
+ description: The URI virtual IP for the MySQL service.
+
+resources:
+
+ ServiceChain:
+ type: OS::Heat::ResourceChain
+ properties:
+ resources: {get_param: Services}
+ concurrent: true
+ resource_properties:
+ EndpointMap: {get_param: EndpointMap}
+ MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri}
+
+outputs:
+ config_settings:
+ description: Configuration settings.
+ value: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
+ step_config:
+ description: Step configuration.
+ value: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml
index a55b3959..eb06b241 100644
--- a/puppet/swift-storage-post.yaml
+++ b/puppet/swift-storage-post.yaml
@@ -12,9 +12,19 @@ parameters:
type: json
description: Value which changes if the node configuration may need to be re-applied
-
resources:
+ StorageArtifactsConfig:
+ type: deploy-artifacts.yaml
+
+ StorageArtifactsDeploy:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: StorageArtifactsConfig}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
StoragePuppetConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -28,6 +38,7 @@ resources:
StorageDeployment_Step1:
type: OS::Heat::StructuredDeployments
+ depends_on: StorageArtifactsDeploy
properties:
name: StorageDeployment_Step1
servers: {get_param: servers}
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index 142e47cc..ea226263 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -30,6 +30,10 @@ parameters:
default: 10
description: Partition Power to use when building Swift rings
type: number
+ RingBuild:
+ default: true
+ description: Whether to manage Swift rings or not
+ type: boolean
Replicas:
type: number
default: 3
@@ -68,6 +72,10 @@ parameters:
Hostname:
type: string
default: '' # Defaults to Heat created hostname
+ HostnameMap:
+ type: json
+ default: {}
+ description: Optional mapping to override hostnames
ExtraConfig:
default: {}
description: |
@@ -79,6 +87,9 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
+ SwiftStorageIPs:
+ default: {}
+ type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -107,6 +118,9 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ NodeIndex:
+ type: number
+ default: 0
resources:
@@ -120,7 +134,10 @@ resources:
- network: ctlplane
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
- name: {get_param: Hostname}
+ name:
+ str_replace:
+ template: {get_param: Hostname}
+ params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
metadata: {get_param: ServerMetadata}
scheduler_hints: {get_param: SchedulerHints}
@@ -149,31 +166,43 @@ resources:
type: OS::TripleO::SwiftStorage::Ports::ExternalPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
InternalApiPort:
type: OS::TripleO::SwiftStorage::Ports::InternalApiPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StoragePort:
type: OS::TripleO::SwiftStorage::Ports::StoragePort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
StorageMgmtPort:
type: OS::TripleO::SwiftStorage::Ports::StorageMgmtPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
TenantPort:
type: OS::TripleO::SwiftStorage::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
ManagementPort:
type: OS::TripleO::SwiftStorage::Ports::ManagementPort
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ IPPool: {get_param: SwiftStorageIPs}
+ NodeIndex: {get_param: NodeIndex}
NetworkConfig:
type: OS::TripleO::ObjectStorage::Net::SoftwareConfig
@@ -191,11 +220,17 @@ resources:
properties:
ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
ManagementIp: {get_attr: [ManagementPort, ip_address]}
+ ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -232,17 +267,14 @@ resources:
raw_data: {get_file: hieradata/object.yaml}
mapped_data: # data supplied directly to this deployment configuration, etc
swift::swift_hash_suffix: { get_input: swift_hash_suffix }
+ tripleo::ringbuilder::build_ring: { get_input: swift_ring_build }
tripleo::ringbuilder::part_power: { get_input: swift_part_power }
tripleo::ringbuilder::replicas: {get_input: swift_replicas }
- # Swift
swift::storage::all::storage_local_net_ip: {get_input: swift_management_network}
swift_mount_check: {get_input: swift_mount_check }
tripleo::ringbuilder::min_part_hours: { get_input: swift_min_part_hours }
ntp::servers: {get_input: ntp_servers}
timezone::timezone: {get_input: timezone}
- # NOTE(dprince): build_ring support is currently not wired in.
- # See: https://review.openstack.org/#/c/109225/
- tripleo::ringbuilder::build_ring: True
snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
tripleo::packages::enable_install: {get_input: enable_package_install}
@@ -263,6 +295,7 @@ resources:
swift_hash_suffix: {get_param: HashSuffix}
swift_mount_check: {get_param: MountCheck}
swift_min_part_hours: {get_param: MinPartHours}
+ swift_ring_build: {get_param: RingBuild}
swift_part_power: {get_param: PartPower}
swift_replicas: { get_param: Replicas}
ntp_servers: {get_param: NtpServer}
@@ -302,11 +335,54 @@ outputs:
hosts_entry:
value:
str_replace:
- template: "IP HOST.DOMAIN HOST"
+ template: |
+ PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST
+ EXTERNALIP EXTERNALHOST.DOMAIN EXTERNALHOST
+ INTERNAL_APIIP INTERNAL_APIHOST.DOMAIN INTERNAL_APIHOST
+ STORAGEIP STORAGEHOST.DOMAIN STORAGEHOST
+ STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
+ TENANTIP TENANTHOST.DOMAIN TENANTHOST
+ MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
params:
- IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]}
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
- HOST: {get_attr: [SwiftStorage, name]}
+ PRIMARYHOST: {get_attr: [SwiftStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - external
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - internalapi
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storage
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storagemgmt
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - tenant
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - management
nova_server_resource:
description: Heat resource handle for the swift storage server
value:
@@ -317,7 +393,7 @@ outputs:
str_replace:
template: 'r1z1-IP:%PORT%/d1'
params:
- IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
+ IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
external_ip_address:
description: IP address of the server in the external network
value: {get_attr: [ExternalPort, ip_address]}
diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml
index c49a1047..3e8e9182 100644
--- a/puppet/vip-config.yaml
+++ b/puppet/vip-config.yaml
@@ -16,6 +16,8 @@ resources:
keystone_admin_api_vip: {get_input: keystone_admin_api_vip}
keystone_public_api_vip: {get_input: keystone_public_api_vip}
neutron_api_vip: {get_input: neutron_api_vip}
+ # TODO: pass a `midonet_api_vip` var
+ midonet_api_vip: {get_input: neutron_api_vip}
cinder_api_vip: {get_input: cinder_api_vip}
glance_api_vip: {get_input: glance_api_vip}
glance_registry_vip: {get_input: glance_registry_vip}
@@ -24,6 +26,8 @@ resources:
nova_api_vip: {get_input: nova_api_vip}
nova_metadata_vip: {get_input: nova_metadata_vip}
ceilometer_api_vip: {get_input: ceilometer_api_vip}
+ aodh_api_vip: {get_input: aodh_api_vip}
+ gnocchi_api_vip: {get_input: gnocchi_api_vip}
heat_api_vip: {get_input: heat_api_vip}
horizon_vip: {get_input: horizon_vip}
redis_vip: {get_input: redis_vip}
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index cb5669a7..2da873d0 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -16,26 +16,54 @@ import sys
import traceback
import yaml
-base_path = sys.argv[1]
-exit_val = 0
-failed_files = []
+
+def exit_usage():
+ print('Usage %s <yaml file or directory>' % sys.argv[0])
+ sys.exit(1)
def validate(filename):
+ print('Validating %s' % filename)
try:
- yaml.load(open(filename).read())
+ tpl = yaml.load(open(filename).read())
except Exception:
print(traceback.format_exc())
return 1
+ # yaml is OK, now walk the parameters and output a warning for unused ones
+ for p in tpl.get('parameters', {}):
+ str_p = '\'%s\'' % p
+ in_resources = str_p in str(tpl.get('resources', {}))
+ in_outputs = str_p in str(tpl.get('outputs', {}))
+ if not in_resources and not in_outputs:
+ print('Warning: parameter %s in template %s appears to be unused'
+ % (p, filename))
+
return 0
-for subdir, dirs, files in os.walk(base_path):
- for f in files:
- if f.endswith('.yaml'):
- file_path = os.path.join(subdir, f)
- failed = validate(file_path)
- if failed:
- failed_files.append(file_path)
- exit_val |= failed
+if len(sys.argv) < 2:
+ exit_usage()
+
+path_args = sys.argv[1:]
+exit_val = 0
+failed_files = []
+
+for base_path in path_args:
+ if os.path.isdir(base_path):
+ for subdir, dirs, files in os.walk(base_path):
+ for f in files:
+ if f.endswith('.yaml'):
+ file_path = os.path.join(subdir, f)
+ failed = validate(file_path)
+ if failed:
+ failed_files.append(file_path)
+ exit_val |= failed
+ elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
+ failed = validate(base_path)
+ if failed:
+ failed_files.append(base_path)
+ exit_val |= failed
+ else:
+ print('Unexpected argument %s' % base_path)
+ exit_usage()
if failed_files:
print('Validation failed on:')
diff --git a/tox.ini b/tox.ini
index 974971f6..5d09e0a4 100644
--- a/tox.ini
+++ b/tox.ini
@@ -10,5 +10,5 @@ deps = -r{toxinidir}/requirements.txt
[testenv:venv]
commands = {posargs}
-[testenv:linters]
+[testenv:pep8]
commands = python ./tools/yaml-validate.py .
diff --git a/validation-scripts/all-nodes.sh b/validation-scripts/all-nodes.sh
index 8057f201..1c834e76 100644
--- a/validation-scripts/all-nodes.sh
+++ b/validation-scripts/all-nodes.sh
@@ -1,27 +1,49 @@
#!/bin/bash
+set -e
+
+function ping_retry() {
+ local IP_ADDR=$1
+ local TIMES=${2:-'10'}
+ local COUNT=0
+ local PING_CMD=ping
+ if [[ $IP_ADDR =~ ":" ]]; then
+ PING_CMD=ping6
+ fi
+ until [ $COUNT -ge $TIMES ]; do
+ if $PING_CMD -W 300 -c 1 $IP_ADDR &> /dev/null; then
+ echo "Ping to $IP_ADDR succeeded."
+ return 0
+ fi
+ echo "Ping to $IP_ADDR failed. Retrying..."
+ COUNT=$(($COUNT + 1))
+ done
+ return 1
+}
# For each unique remote IP (specified via Heat) we check to
# see if one of the locally configured networks matches and if so we
# attempt a ping test the remote network IP.
function ping_controller_ips() {
local REMOTE_IPS=$1
-
for REMOTE_IP in $(echo $REMOTE_IPS | sed -e "s| |\n|g" | sort -u); do
-
- for LOCAL_NETWORK in $(ip r | grep -v default | cut -d " " -f 1); do
- local LOCAL_CIDR=$(echo $LOCAL_NETWORK | cut -d "/" -f 2)
- local LOCAL_NETMASK=$(ipcalc -m $LOCAL_NETWORK | grep NETMASK | cut -d "=" -f 2)
- local REMOTE_NETWORK=$(ipcalc -np $REMOTE_IP $LOCAL_NETMASK | grep NETWORK | cut -d "=" -f 2)
-
- if [ $REMOTE_NETWORK/$LOCAL_CIDR == $LOCAL_NETWORK ]; then
- echo -n "Trying to ping $REMOTE_IP for local network $LOCAL_NETWORK..."
- if ! ping -W 300 -c 1 $REMOTE_IP &> /dev/null; then
- echo "FAILURE"
- echo "$REMOTE_IP is not pingable. Local Network: $LOCAL_NETWORK" >&2
- exit 1
- fi
- echo "SUCCESS"
- fi
+ if [[ $REMOTE_IP =~ ":" ]]; then
+ networks=$(ip -6 r | grep -v default | cut -d " " -f 1 | grep -v "unreachable")
+ else
+ networks=$(ip r | grep -v default | cut -d " " -f 1)
+ fi
+ for LOCAL_NETWORK in $networks; do
+ in_network=$(python -c "import ipaddr; net=ipaddr.IPNetwork('$LOCAL_NETWORK'); addr=ipaddr.IPAddress('$REMOTE_IP'); print(addr in net)")
+ if [[ $in_network == "True" ]]; then
+ echo "Trying to ping $REMOTE_IP for local network ${LOCAL_NETWORK}."
+ set +e
+ if ! ping_retry $REMOTE_IP; then
+ echo "FAILURE"
+ echo "$REMOTE_IP is not pingable. Local Network: $LOCAL_NETWORK" >&2
+ exit 1
+ fi
+ set -e
+ echo "SUCCESS"
+ fi
done
done
}
@@ -32,14 +54,16 @@ function ping_controller_ips() {
# multiple gateways.
function ping_default_gateways() {
DEFAULT_GW=$(ip r | grep ^default | cut -d " " -f 3)
+ set +e
for GW in $DEFAULT_GW; do
echo -n "Trying to ping default gateway ${GW}..."
- if ! ping -c 1 $GW &> /dev/null; then
+ if ! ping_retry $GW; then
echo "FAILURE"
echo "$GW is not pingable."
exit 1
fi
done
+ set -e
echo "SUCCESS"
}