aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--.testr.conf4
-rw-r--r--ci/environments/ceph-min-osds.yaml2
-rw-r--r--ci/environments/multinode-container-upgrade.yaml70
-rw-r--r--ci/environments/multinode.yaml2
-rw-r--r--ci/environments/scenario002-multinode-containers.yaml7
-rw-r--r--ci/environments/scenario003-multinode-containers.yaml10
-rw-r--r--ci/environments/scenario004-multinode-containers.yaml7
-rw-r--r--common/README1
-rw-r--r--deployed-server/deployed-server.yaml6
-rwxr-xr-xdocker/docker-puppet.py47
-rw-r--r--docker/docker-steps.j2117
-rwxr-xr-xdocker/docker-toool3
-rw-r--r--docker/services/aodh-api.yaml5
-rw-r--r--docker/services/ceilometer-agent-ipmi.yaml113
-rw-r--r--docker/services/cinder-api.yaml156
-rw-r--r--docker/services/cinder-backup.yaml132
-rw-r--r--docker/services/cinder-scheduler.yaml120
-rw-r--r--docker/services/cinder-volume.yaml167
-rw-r--r--docker/services/collectd.yaml20
-rw-r--r--docker/services/congress-api.yaml5
-rw-r--r--docker/services/containers-common.yaml69
-rw-r--r--docker/services/database/mysql-client.yaml66
-rw-r--r--docker/services/database/mysql.yaml5
-rw-r--r--docker/services/database/redis.yaml2
-rw-r--r--docker/services/ec2-api.yaml153
-rw-r--r--docker/services/glance-api.yaml5
-rw-r--r--docker/services/gnocchi-api.yaml5
-rw-r--r--docker/services/heat-engine.yaml5
-rw-r--r--docker/services/horizon.yaml4
-rw-r--r--docker/services/ironic-api.yaml13
-rw-r--r--docker/services/iscsid.yaml109
-rw-r--r--docker/services/keystone.yaml5
-rw-r--r--docker/services/manila-api.yaml125
-rw-r--r--docker/services/manila-scheduler.yaml105
-rw-r--r--docker/services/memcached.yaml1
-rw-r--r--docker/services/mistral-api.yaml8
-rw-r--r--docker/services/multipathd.yaml89
-rw-r--r--docker/services/neutron-api.yaml5
-rw-r--r--docker/services/nova-api.yaml12
-rw-r--r--docker/services/nova-consoleauth.yaml108
-rw-r--r--docker/services/nova-vnc-proxy.yaml108
-rw-r--r--docker/services/pacemaker/cinder-backup.yaml152
-rw-r--r--docker/services/pacemaker/cinder-volume.yaml170
-rw-r--r--docker/services/pacemaker/haproxy.yaml23
-rw-r--r--docker/services/panko-api.yaml5
-rw-r--r--docker/services/rabbitmq.yaml2
-rw-r--r--docker/services/sahara-api.yaml119
-rw-r--r--docker/services/sahara-engine.yaml110
-rw-r--r--docker/services/sensu-client.yaml141
-rw-r--r--docker/services/services.yaml105
-rw-r--r--docker/services/swift-ringbuilder.yaml15
-rw-r--r--docker/services/swift-storage.yaml23
-rw-r--r--docker/services/tacker.yaml5
-rw-r--r--environments/cinder-dellsc-config.yaml1
-rw-r--r--environments/cinder-netapp-config.yaml4
-rw-r--r--environments/docker-services-tls-everywhere.yaml10
-rw-r--r--environments/docker.yaml17
-rw-r--r--environments/enable-tls.yaml4
-rw-r--r--environments/host-config-and-reboot.j2.yaml18
-rw-r--r--environments/host-config-pre-network.j2.yaml16
-rw-r--r--environments/hyperconverged-ceph.yaml1
-rw-r--r--environments/inject-trust-anchor-hiera.yaml4
-rw-r--r--environments/inject-trust-anchor.yaml4
-rw-r--r--environments/network-isolation.j2.yaml37
-rw-r--r--environments/network-isolation.yaml59
-rw-r--r--environments/networking/neutron-midonet.yaml66
-rw-r--r--environments/neutron-midonet.yaml4
-rw-r--r--environments/neutron-ml2-ovn-ha.yaml24
-rw-r--r--environments/neutron-opendaylight-dpdk.yaml37
-rw-r--r--environments/neutron-ovs-dpdk.yaml37
-rw-r--r--environments/nonha-arch.yaml16
-rw-r--r--environments/overcloud-baremetal.j2.yaml19
-rw-r--r--environments/overcloud-services.yaml7
-rw-r--r--environments/predictable-placement/custom-hostnames.yaml33
-rw-r--r--environments/puppet-ceph-external.yaml4
-rw-r--r--environments/services-docker/ec2-api.yaml2
-rw-r--r--environments/services-docker/manila.yaml3
-rw-r--r--environments/services-docker/sahara.yaml3
-rw-r--r--environments/services-docker/sensu-client.yaml3
-rw-r--r--environments/services-docker/undercloud-ceilometer.yaml1
-rw-r--r--environments/services/ironic.yaml3
-rw-r--r--environments/ssl/enable-tls.yaml41
-rw-r--r--environments/ssl/inject-trust-anchor-hiera.yaml22
-rw-r--r--environments/ssl/inject-trust-anchor.yaml20
-rw-r--r--environments/ssl/tls-endpoints-public-dns.yaml131
-rw-r--r--environments/ssl/tls-endpoints-public-ip.yaml131
-rw-r--r--environments/ssl/tls-everywhere-endpoints-dns.yaml131
-rw-r--r--environments/storage/cinder-netapp-config.yaml119
-rw-r--r--environments/storage/cinder-nfs.yaml27
-rw-r--r--environments/storage/enable-ceph.yaml35
-rw-r--r--environments/storage/external-ceph.yaml78
-rw-r--r--environments/storage/glance-nfs.yaml34
-rw-r--r--environments/tls-endpoints-public-dns.yaml10
-rw-r--r--environments/tls-endpoints-public-ip.yaml10
-rw-r--r--environments/tls-everywhere-endpoints-dns.yaml10
-rw-r--r--environments/undercloud.yaml2
-rw-r--r--extraconfig/pre_network/ansible_host_config.yaml (renamed from extraconfig/pre_network/ansible_host_config.ansible)10
-rw-r--r--extraconfig/pre_network/host_config_and_reboot.role.j2.yaml10
-rw-r--r--extraconfig/pre_network/host_config_and_reboot.yaml246
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh2
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker.j2.yaml8
-rwxr-xr-xextraconfig/tasks/yum_update.sh2
-rw-r--r--net-config-bond.yaml13
-rw-r--r--network/config/bond-with-vlans/ceph-storage.yaml10
-rw-r--r--network/config/bond-with-vlans/cinder-storage.yaml10
-rw-r--r--network/config/bond-with-vlans/compute-dpdk.yaml5
-rw-r--r--network/config/bond-with-vlans/compute.yaml10
-rw-r--r--network/config/bond-with-vlans/controller-no-external.yaml10
-rw-r--r--network/config/bond-with-vlans/controller-v6.yaml10
-rw-r--r--network/config/bond-with-vlans/controller.yaml10
-rw-r--r--network/config/bond-with-vlans/swift-storage.yaml10
-rw-r--r--network/endpoints/endpoint_data.yaml2
-rw-r--r--network/endpoints/endpoint_map.yaml6
-rw-r--r--network/ports/ctlplane_vip.yaml2
-rw-r--r--network/ports/net_ip_list_map.yaml14
-rwxr-xr-xnetwork/scripts/run-os-net-config.sh2
-rw-r--r--network/service_net_map.j2.yaml2
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml7
-rw-r--r--overcloud.j2.yaml51
-rw-r--r--plan-samples/README.rst22
-rw-r--r--plan-samples/plan-environment-derived-params.yaml35
-rw-r--r--puppet/all-nodes-config.yaml18
-rw-r--r--puppet/blockstorage-role.yaml44
-rw-r--r--puppet/cephstorage-role.yaml44
-rw-r--r--puppet/compute-role.yaml46
-rw-r--r--puppet/controller-role.yaml53
-rw-r--r--puppet/deploy-artifacts.sh10
-rw-r--r--puppet/major_upgrade_steps.j2.yaml5
-rw-r--r--puppet/objectstorage-role.yaml45
-rw-r--r--puppet/post.j2.yaml7
-rw-r--r--puppet/puppet-steps.j2105
-rw-r--r--puppet/role.role.j2.yaml54
-rw-r--r--puppet/services/README.rst24
-rw-r--r--puppet/services/cinder-backend-dellsc.yaml4
-rw-r--r--puppet/services/cinder-backend-netapp.yaml8
-rw-r--r--puppet/services/cinder-volume.yaml16
-rw-r--r--puppet/services/database/mysql.yaml13
-rw-r--r--puppet/services/database/redis.yaml20
-rw-r--r--puppet/services/gnocchi-base.yaml10
-rw-r--r--puppet/services/haproxy.yaml5
-rw-r--r--puppet/services/horizon.yaml3
-rw-r--r--puppet/services/ironic-api.yaml31
-rw-r--r--puppet/services/ironic-conductor.yaml6
-rw-r--r--puppet/services/ironic-inspector.yaml151
-rw-r--r--puppet/services/keystone.yaml41
-rw-r--r--puppet/services/neutron-ovs-agent.yaml10
-rw-r--r--puppet/services/neutron-ovs-dpdk-agent.yaml51
-rw-r--r--puppet/services/neutron-sriov-agent.yaml22
-rw-r--r--puppet/services/nova-api.yaml2
-rw-r--r--puppet/services/nova-scheduler.yaml9
-rw-r--r--puppet/services/opendaylight-ovs.yaml40
-rw-r--r--puppet/services/openvswitch-upgrade.yaml50
-rw-r--r--puppet/services/openvswitch.yaml178
-rw-r--r--puppet/services/ovn-dbs.yaml1
-rw-r--r--puppet/services/pacemaker/database/mysql.yaml7
-rw-r--r--puppet/services/pacemaker/ovn-dbs.yaml61
-rw-r--r--puppet/services/pacemaker_remote.yaml38
-rw-r--r--puppet/services/panko-api.yaml4
-rw-r--r--releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml6
-rw-r--r--releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml4
-rw-r--r--releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml10
-rw-r--r--releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml5
-rw-r--r--releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml6
-rw-r--r--releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml6
-rw-r--r--releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml6
-rw-r--r--releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml5
-rw-r--r--releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml4
-rw-r--r--releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml4
-rw-r--r--releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml8
-rw-r--r--releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml3
-rw-r--r--releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml4
-rw-r--r--releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml5
-rw-r--r--releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml5
-rw-r--r--releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml3
-rw-r--r--releasenotes/notes/ovn-ha-c0139ac519680872.yaml3
-rw-r--r--releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml11
-rw-r--r--releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml23
-rw-r--r--releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml8
-rw-r--r--releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml7
-rw-r--r--releasenotes/notes/vipmap-output-4a9ce99930960346.yaml5
-rw-r--r--roles/BlockStorage.yaml5
-rw-r--r--roles/CephStorage.yaml3
-rw-r--r--roles/Compute.yaml5
-rw-r--r--roles/Controller.yaml7
-rw-r--r--roles/ControllerOpenstack.yaml6
-rw-r--r--roles/Database.yaml2
-rw-r--r--roles/IronicConductor.yaml21
-rw-r--r--roles/Messaging.yaml2
-rw-r--r--roles/Networker.yaml2
-rw-r--r--roles/ObjectStorage.yaml4
-rw-r--r--roles/README.rst4
-rw-r--r--roles/Telemetry.yaml2
-rw-r--r--roles/Undercloud.yaml1
-rw-r--r--roles_data.yaml24
-rw-r--r--roles_data_undercloud.yaml3
-rw-r--r--sample-env-generator/README.rst160
-rw-r--r--sample-env-generator/networking.yaml32
-rw-r--r--sample-env-generator/predictable-placement.yaml17
-rw-r--r--sample-env-generator/ssl.yaml459
-rw-r--r--sample-env-generator/storage.yaml133
-rw-r--r--services.yaml (renamed from puppet/services/services.yaml)19
-rw-r--r--test-requirements.txt8
-rwxr-xr-xtools/yaml-validate.py94
-rw-r--r--tox.ini10
-rw-r--r--tripleo_heat_templates/__init__.py0
-rwxr-xr-xtripleo_heat_templates/environment_generator.py212
-rw-r--r--tripleo_heat_templates/tests/__init__.py0
-rw-r--r--tripleo_heat_templates/tests/test_environment_generator.py498
209 files changed, 7041 insertions, 684 deletions
diff --git a/.gitignore b/.gitignore
index cea6064d..2d067214 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,8 +22,10 @@ lib64
pip-log.txt
# Unit test / coverage reports
+cover
.coverage
.tox
+.testrepository
nosetests.xml
# Translations
diff --git a/.testr.conf b/.testr.conf
new file mode 100644
index 00000000..5837838f
--- /dev/null
+++ b/.testr.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./tripleo_heat_templates ./tripleo_heat_templates $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/ci/environments/ceph-min-osds.yaml b/ci/environments/ceph-min-osds.yaml
new file mode 100644
index 00000000..4e72d313
--- /dev/null
+++ b/ci/environments/ceph-min-osds.yaml
@@ -0,0 +1,2 @@
+parameter_defaults:
+ CephPoolDefaultSize: 1
diff --git a/ci/environments/multinode-container-upgrade.yaml b/ci/environments/multinode-container-upgrade.yaml
deleted file mode 100644
index 24bb1f49..00000000
--- a/ci/environments/multinode-container-upgrade.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-# NOTE: This is an environment specific for containers CI. Mainly we
-# deploy non-pacemakerized overcloud. Once we are able to deploy and
-# upgrade pacemakerized and containerized overcloud, we should remove
-# this file and use normal CI multinode environments/scenarios.
-
-resource_registry:
- OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
- OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
-
- # NOTE: This is needed because of upgrades from Ocata to Pike. We
- # deploy the initial environment with Ocata templates, and
- # overcloud-resource-registry.yaml there doesn't have this Docker
- # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
- # remove this.
- OS::TripleO::Services::Docker: OS::Heat::None
-
-parameter_defaults:
- ControllerServices:
- - OS::TripleO::Services::CephMon
- - OS::TripleO::Services::CephOSD
- - OS::TripleO::Services::CinderApi
- - OS::TripleO::Services::CinderScheduler
- - OS::TripleO::Services::CinderVolume
- - OS::TripleO::Services::Docker
- - OS::TripleO::Services::Kernel
- - OS::TripleO::Services::Keystone
- - OS::TripleO::Services::GlanceApi
- - OS::TripleO::Services::HeatApi
- - OS::TripleO::Services::HeatApiCfn
- - OS::TripleO::Services::HeatApiCloudwatch
- - OS::TripleO::Services::HeatEngine
- - OS::TripleO::Services::MySQL
- - OS::TripleO::Services::MySQLClient
- - OS::TripleO::Services::NeutronDhcpAgent
- - OS::TripleO::Services::NeutronL3Agent
- - OS::TripleO::Services::NeutronMetadataAgent
- - OS::TripleO::Services::NeutronServer
- - OS::TripleO::Services::NeutronCorePlugin
- - OS::TripleO::Services::NeutronOvsAgent
- - OS::TripleO::Services::RabbitMQ
- - OS::TripleO::Services::HAproxy
- - OS::TripleO::Services::Keepalived
- - OS::TripleO::Services::Memcached
- - OS::TripleO::Services::Pacemaker
- - OS::TripleO::Services::NovaConductor
- - OS::TripleO::Services::NovaApi
- - OS::TripleO::Services::NovaPlacement
- - OS::TripleO::Services::NovaMetadata
- - OS::TripleO::Services::NovaScheduler
- - OS::TripleO::Services::Ntp
- - OS::TripleO::Services::SwiftProxy
- - OS::TripleO::Services::SwiftStorage
- - OS::TripleO::Services::SwiftRingBuilder
- - OS::TripleO::Services::Snmp
- - OS::TripleO::Services::Timezone
- - OS::TripleO::Services::TripleoPackages
- - OS::TripleO::Services::NovaCompute
- - OS::TripleO::Services::NovaLibvirt
- - OS::TripleO::Services::Sshd
- ControllerExtraConfig:
- nova::compute::libvirt::services::libvirt_virt_type: qemu
- nova::compute::libvirt::libvirt_virt_type: qemu
- # Required for Centos 7.3 and Qemu 2.6.0
- nova::compute::libvirt::libvirt_cpu_mode: 'none'
- #NOTE(gfidente): not great but we need this to deploy on ext4
- #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
- ceph::profile::params::osd_max_object_name_len: 256
- ceph::profile::params::osd_max_object_namespace_len: 64
- SwiftCeilometerPipelineEnabled: False
- Debug: True
diff --git a/ci/environments/multinode.yaml b/ci/environments/multinode.yaml
index 20e37e37..102787a6 100644
--- a/ci/environments/multinode.yaml
+++ b/ci/environments/multinode.yaml
@@ -52,6 +52,8 @@ parameter_defaults:
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Horizon
+ - OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
diff --git a/ci/environments/scenario002-multinode-containers.yaml b/ci/environments/scenario002-multinode-containers.yaml
index 7191deae..92c834b6 100644
--- a/ci/environments/scenario002-multinode-containers.yaml
+++ b/ci/environments/scenario002-multinode-containers.yaml
@@ -6,9 +6,10 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
- OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml
- OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml
- OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml
+ # TODO: Barbican is not yet containerized: https://review.openstack.org/#/c/474327
+ # OS::TripleO::Services::BarbicanApi: ../../docker/services/barbican-api.yaml
+ OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
+ OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
# overcloud-resource-registry.yaml there doesn't have this Docker
diff --git a/ci/environments/scenario003-multinode-containers.yaml b/ci/environments/scenario003-multinode-containers.yaml
index cfb05077..7b917aef 100644
--- a/ci/environments/scenario003-multinode-containers.yaml
+++ b/ci/environments/scenario003-multinode-containers.yaml
@@ -6,11 +6,11 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
- OS::TripleO::Services::SaharaApi: ../../puppet/services/sahara-api.yaml
- OS::TripleO::Services::SaharaEngine: ../../puppet/services/sahara-engine.yaml
- OS::TripleO::Services::MistralApi: ../../puppet/services/mistral-api.yaml
- OS::TripleO::Services::MistralEngine: ../../puppet/services/mistral-engine.yaml
- OS::TripleO::Services::MistralExecutor: ../../puppet/services/mistral-executor.yaml
+ OS::TripleO::Services::SaharaApi: ../../docker/services/sahara-api.yaml
+ OS::TripleO::Services::SaharaEngine: ../../docker/services/sahara-engine.yaml
+ OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml
+ OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml
+ OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
# overcloud-resource-registry.yaml there doesn't have this Docker
diff --git a/ci/environments/scenario004-multinode-containers.yaml b/ci/environments/scenario004-multinode-containers.yaml
index 7a6724de..1d6d5917 100644
--- a/ci/environments/scenario004-multinode-containers.yaml
+++ b/ci/environments/scenario004-multinode-containers.yaml
@@ -6,6 +6,7 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ # TODO deploy ceph with ceph-ansible: https://review.openstack.org/#/c/465066/
OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml
OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
@@ -13,10 +14,12 @@ resource_registry:
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
- OS::TripleO::Services::ManilaApi: ../../puppet/services/manila-api.yaml
- OS::TripleO::Services::ManilaScheduler: ../../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../../docker/services/manila-scheduler.yaml
+ # NOTE: being containerized here: https://review.openstack.org/#/c/471527/
OS::TripleO::Services::ManilaShare: ../../puppet/services/manila-share.yaml
OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
+ # TODO: containerize NeutronBgpVpnApi
OS::TripleO::Services::NeutronBgpVpnApi: ../../puppet/services/neutron-bgpvpn-api.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
diff --git a/common/README b/common/README
new file mode 100644
index 00000000..6a523118
--- /dev/null
+++ b/common/README
@@ -0,0 +1 @@
+This will contain some common templates but it needs to be added to the RPM spec first
diff --git a/deployed-server/deployed-server.yaml b/deployed-server/deployed-server.yaml
index 0847bfbf..16deb7d6 100644
--- a/deployed-server/deployed-server.yaml
+++ b/deployed-server/deployed-server.yaml
@@ -44,6 +44,9 @@ parameters:
Command or script snippet to run on all overcloud nodes to
initialize the upgrade process. E.g. a repository switch.
default: ''
+ deployment_swift_data:
+ type: json
+ default: {}
resources:
deployed-server:
@@ -51,6 +54,7 @@ resources:
properties:
name: {get_param: name}
software_config_transport: {get_param: software_config_transport}
+ deployment_swift_data: {get_param: deployment_swift_data}
UpgradeInitConfig:
type: OS::Heat::SoftwareConfig
@@ -133,3 +137,5 @@ outputs:
- {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
name:
value: {get_attr: [HostsEntryDeployment, hostname]}
+ os_collect_config:
+ value: {get_attr: [deployed-server, os_collect_config]}
diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py
index 340a9e9f..13211676 100755
--- a/docker/docker-puppet.py
+++ b/docker/docker-puppet.py
@@ -190,37 +190,30 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
if [ -n "$PUPPET_TAGS" ]; then
TAGS="--tags \"$PUPPET_TAGS\""
fi
+
+ # workaround LP1696283
+ mkdir -p /etc/ssh
+ touch /etc/ssh/ssh_known_hosts
+
FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
# Disables archiving
if [ -z "$NO_ARCHIVE" ]; then
- rm -Rf /var/lib/config-data/${NAME}
-
- # copying etc should be enough for most services
- mkdir -p /var/lib/config-data/${NAME}/etc
- cp -a /etc/* /var/lib/config-data/${NAME}/etc/
-
- # workaround LP1696283
- mkdir -p /var/lib/config-data/${NAME}/etc/ssh
- touch /var/lib/config-data/${NAME}/etc/ssh/ssh_known_hosts
-
- if [ -d /root/ ]; then
- cp -a /root/ /var/lib/config-data/${NAME}/root/
- fi
- if [ -d /var/lib/ironic/tftpboot/ ]; then
- mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
- cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/tftpboot/
- fi
- if [ -d /var/lib/ironic/httpboot/ ]; then
- mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
- cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/httpboot/
- fi
-
- # apache services may files placed in /var/www/
- if [ -d /var/www/ ]; then
- mkdir -p /var/lib/config-data/${NAME}/var/www
- cp -a /var/www/* /var/lib/config-data/${NAME}/var/www/
- fi
+ archivedirs=("/etc" "/root" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www")
+ rsync_srcs=""
+ for d in "${archivedirs[@]}"; do
+ if [ -d "$d" ]; then
+ rsync_srcs+=" $d"
+ fi
+ done
+ rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME}
+
+ # Also make a copy of files modified during puppet run
+ # This is useful for debugging
+ mkdir -p /var/lib/config-data/puppet-generated/${NAME}
+ rsync -a -R -0 --delay-updates --delete-after \
+ --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \
+ / /var/lib/config-data/puppet-generated/${NAME}
# Write a checksum of the config-data dir, this is used as a
# salt to trigger container restart when the config changes
diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2
index a56ca02b..83772028 100644
--- a/docker/docker-steps.j2
+++ b/docker/docker-steps.j2
@@ -21,6 +21,9 @@ parameters:
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
@@ -35,6 +38,21 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ ctlplane_service_ips:
+ type: json
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {% for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {% endfor %}
+{% endfor %}
resources:
@@ -68,6 +86,53 @@ resources:
- name: bootstrap_server_id
config: {get_file: deploy-steps-playbook.yaml}
+{%- for step in range(1, deploy_steps_max) %}
+# BEGIN service_workflow_tasks handling
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on:
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {% for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {% endfor %}
+
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ always_update: true
+# END service_workflow_tasks handling
+{% endfor %}
+
{% for role in roles %}
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
@@ -139,10 +204,6 @@ resources:
- name: Write kolla config json files
copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes
with_dict: "{{kolla_config}}"
- - name: Install paunch FIXME remove when packaged
- shell: |
- yum -y install python-pip
- pip install paunch
########################################################
# Bootstrap tasks, only performed on bootstrap_server_id
########################################################
@@ -199,14 +260,23 @@ resources:
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
depends_on:
- {% for dep in roles %}
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
- {{dep.name}}Deployment_Step{{step -1}}
- {% endfor %}
- {% endif %}
+ {% endfor %}
+ {% endif %}
properties:
name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
@@ -220,26 +290,31 @@ resources:
{% endfor %}
# END CONFIG STEPS
- {{role.name}}PostConfig:
- type: OS::TripleO::Tasks::{{role.name}}PostConfig
+ # Note, this should be the last step to execute configuration changes.
+ # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+ # after all the previous deployment steps.
+ {{role.name}}ExtraConfigPost:
depends_on:
{% for dep in roles %}
- {{dep.name}}Deployment_Step5
{% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
+ servers: {get_param: [servers, {{role.name}}]}
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- {{role.name}}ExtraConfigPost:
+ # The {{role.name}}PostConfig steps are in charge of
+ # quiescing all services, i.e. in the Controller case,
+ # we should run a full service reload.
+ {{role.name}}PostConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PostConfig
depends_on:
{% for dep in roles %}
- - {{dep.name}}PostConfig
+ - {{dep.name}}ExtraConfigPost
{% endfor %}
- type: OS::TripleO::NodeExtraConfigPost
properties:
- servers: {get_param: [servers, {{role.name}}]}
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
{% endfor %}
diff --git a/docker/docker-toool b/docker/docker-toool
index 36aba4a7..0b87ea92 100755
--- a/docker/docker-toool
+++ b/docker/docker-toool
@@ -75,6 +75,9 @@ def parse_opts(argv):
def docker_arg_map(key, value):
value = str(value).encode('ascii', 'ignore')
+ if len(value) == 0:
+ return ''
+
return {
'environment': "--env=%s" % value,
# 'image': value,
diff --git a/docker/services/aodh-api.yaml b/docker/services/aodh-api.yaml
index 4b93ddd7..bda5469a 100644
--- a/docker/services/aodh-api.yaml
+++ b/docker/services/aodh-api.yaml
@@ -86,16 +86,15 @@ outputs:
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
aodh_init_log:
- start_order: 0
image: *aodh_image
user: root
volumes:
- /var/log/containers/aodh:/var/log/aodh
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R aodh:aodh /var/log/aodh']
+ step_3:
aodh_db_sync:
- start_order: 1
image: *aodh_image
net: host
privileged: false
diff --git a/docker/services/ceilometer-agent-ipmi.yaml b/docker/services/ceilometer-agent-ipmi.yaml
new file mode 100644
index 00000000..02793e48
--- /dev/null
+++ b/docker/services/ceilometer-agent-ipmi.yaml
@@ -0,0 +1,113 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Ceilometer Agent Ipmi service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCeilometerIpmiImage:
+ description: image
+ default: 'centos-binary-ceilometer-ipmi:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CeilometerAgentIpmiBase:
+ type: ../../puppet/services/ceilometer-agent-ipmi.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Agent Ipmi role.
+ value:
+ service_name: {get_attr: [CeilometerAgentIpmiBase, role_data, service_name]}
+ config_settings: {get_attr: [CeilometerAgentIpmiBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CeilometerAgentIpmiBase, role_data, step_config]
+ service_config_settings: {get_attr: [CeilometerAgentIpmiBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ceilometer
+ puppet_tags: ceilometer_config
+ step_config: *step_config
+ config_image: &ceilometer_agent_ipmi_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerIpmiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ceilometer-agent-ipmi.json:
+ command: /usr/bin/ceilometer-polling --polling-namespaces ipmi
+ docker_config:
+ step_3:
+ ceilometer_init_log:
+ start_order: 0
+ image: *ceilometer_agent_ipmi_image
+ user: root
+ command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
+ volumes:
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ step_4:
+ ceilometer_agent_ipmi:
+ image: *ceilometer_agent_ipmi_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_5:
+ ceilometer_gnocchi_upgrade:
+ start_order: 1
+ image: *ceilometer_agent_ipmi_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ command: "/usr/bin/bootstrap_host_exec ceilometer su ceilometer -s /bin/bash -c '/usr/bin/ceilometer-upgrade --skip-metering-database'"
+ upgrade_tasks:
+ - name: Stop and disable ceilometer agent ipmi service
+ tags: step2
+ service: name=openstack-ceilometer-agent-ipmi state=stopped enabled=no
diff --git a/docker/services/cinder-api.yaml b/docker/services/cinder-api.yaml
new file mode 100644
index 00000000..94bd66d8
--- /dev/null
+++ b/docker/services/cinder-api.yaml
@@ -0,0 +1,156 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderApiImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CinderBase:
+ type: ../../puppet/services/cinder-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder API role.
+ value:
+ service_name: {get_attr: [CinderBase, role_data, service_name]}
+ config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CinderBase, role_data, step_config]
+ service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_2:
+ cinder_api_init_logs:
+ image: &cinder_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderApiImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_3:
+ cinder_api_db_sync:
+ image: *cinder_api_image
+ net: host
+ privileged: false
+ detach: false
+ user: root
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+ - /var/log/containers/cinder:/var/log/cinder
+ command:
+ - '/usr/bin/bootstrap_host_exec'
+ - 'cinder_api'
+ - "su cinder -s /bin/bash -c 'cinder-manage db sync'"
+ step_4:
+ cinder_api:
+ image: *cinder_api_image
+ net: host
+ privileged: false
+ restart: always
+ # NOTE(mandre) kolla image changes the user to 'cinder', we need it
+ # to be root to run httpd
+ user: root
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/cinder_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+ - /var/lib/config-data/cinder/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/cinder/var/www/:/var/www/:ro
+ - /var/log/containers/cinder:/var/log/cinder
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/cinder
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable cinder_api service
+ tags: step2
+ service: name=httpd state=stopped enabled=no
diff --git a/docker/services/cinder-backup.yaml b/docker/services/cinder-backup.yaml
new file mode 100644
index 00000000..0958a7e8
--- /dev/null
+++ b/docker/services/cinder-backup.yaml
@@ -0,0 +1,132 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Backup service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderBackupImage:
+ description: image
+ default: 'centos-binary-cinder-backup:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CinderBase:
+ type: ../../puppet/services/cinder-backup.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Backup role.
+ value:
+ service_name: {get_attr: [CinderBase, role_data, service_name]}
+ config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CinderBase, role_data, step_config]
+ service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_backup.json:
+ command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/lib/cinder
+ owner: cinder:cinder
+ recurse: true
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_3:
+ cinder_backup_init_logs:
+ start_order: 0
+ image: &cinder_backup_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderBackupImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_4:
+ cinder_backup:
+ image: *cinder_backup_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/cinder_backup.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+ - /var/lib/config-data/ceph/etc/ceph/:/etc/ceph/:ro #FIXME: we need to generate a ceph.conf with puppet for this
+ - /dev/:/dev/
+ - /run/:/run/
+ - /sys:/sys
+ - /lib/modules:/lib/modules:ro
+ - /etc/iscsi:/etc/iscsi
+ - /var/lib/cinder:/var/lib/cinder
+ - /var/log/containers/cinder:/var/log/cinder
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/lib/cinder
+ - /var/log/containers/cinder
+ upgrade_tasks:
+ - name: Stop and disable cinder_backup service
+ tags: step2
+ service: name=openstack-cinder-backup state=stopped enabled=no
diff --git a/docker/services/cinder-scheduler.yaml b/docker/services/cinder-scheduler.yaml
new file mode 100644
index 00000000..8199c34b
--- /dev/null
+++ b/docker/services/cinder-scheduler.yaml
@@ -0,0 +1,120 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Scheduler service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderSchedulerImage:
+ description: image
+ default: 'centos-binary-cinder-scheduler:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CinderBase:
+ type: ../../puppet/services/cinder-scheduler.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Scheduler role.
+ value:
+ service_name: {get_attr: [CinderBase, role_data, service_name]}
+ config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CinderBase, role_data, step_config]
+ service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_scheduler.json:
+ command: /usr/bin/cinder-scheduler --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_2:
+ cinder_scheduler_init_logs:
+ image: &cinder_scheduler_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderSchedulerImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_4:
+ cinder_scheduler:
+ image: *cinder_scheduler_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/cinder_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+ - /var/log/containers/cinder:/var/log/cinder
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/cinder
+ upgrade_tasks:
+ - name: Stop and disable cinder_scheduler service
+ tags: step2
+ service: name=openstack-cinder-scheduler state=stopped enabled=no
diff --git a/docker/services/cinder-volume.yaml b/docker/services/cinder-volume.yaml
new file mode 100644
index 00000000..26eb10e7
--- /dev/null
+++ b/docker/services/cinder-volume.yaml
@@ -0,0 +1,167 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Volume service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderVolumeImage:
+ description: image
+ default: 'centos-binary-cinder-volume:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ # custom parameters for the Cinder volume role
+ CinderEnableIscsiBackend:
+ default: true
+ description: Whether to enable or not the Iscsi backend for Cinder
+ type: boolean
+ CinderLVMLoopDeviceSize:
+ default: 10280
+ description: The size of the loopback file used by the cinder LVM driver.
+ type: number
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CinderBase:
+ type: ../../puppet/services/cinder-volume.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Volume role.
+ value:
+ service_name: {get_attr: [CinderBase, role_data, service_name]}
+ config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CinderBase, role_data, step_config]
+ service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_volume.json:
+ command: /usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_3:
+ cinder_volume_init_logs:
+ start_order: 0
+ image: &cinder_volume_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderVolumeImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_4:
+ cinder_volume:
+ image: *cinder_volume_image
+ net: host
+ privileged: true
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/cinder_volume.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+ - /var/lib/config-data/ceph/etc/ceph/:/etc/ceph/:ro #FIXME: we need to generate a ceph.conf with puppet for this
+ - /dev/:/dev/
+ - /run/:/run/
+ - /sys:/sys
+ - /etc/iscsi:/etc/iscsi
+ - /var/lib/cinder:/var/lib/cinder
+ - /var/log/containers/cinder:/var/log/cinder
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/cinder
+ - /var/lib/cinder
+ - name: cinder_enable_iscsi_backend fact
+ set_fact:
+ cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
+ - name: cinder create LVM volume group dd
+ command:
+ list_join:
+ - ''
+ - - 'dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek='
+ - str_replace:
+ template: VALUE
+ params:
+ VALUE: {get_param: CinderLVMLoopDeviceSize}
+ - 'M'
+ args:
+ creates: /var/lib/cinder/cinder-volumes
+ when: cinder_enable_iscsi_backend
+ - name: cinder create LVM volume group
+ shell: |
+ if ! losetup /dev/loop2; then
+ losetup /dev/loop2 /var/lib/cinder/cinder-volumes
+ fi
+ if ! pvdisplay | grep cinder-volumes; then
+ pvcreate /dev/loop2
+ fi
+ if ! vgdisplay | grep cinder-volumes; then
+ vgcreate cinder-volumes /dev/loop2
+ fi
+ args:
+ executable: /bin/bash
+ creates: /dev/loop2
+ when: cinder_enable_iscsi_backend
+ upgrade_tasks:
+ - name: Stop and disable cinder_volume service
+ tags: step2
+ service: name=openstack-cinder-volume state=stopped enabled=no
diff --git a/docker/services/collectd.yaml b/docker/services/collectd.yaml
index 7354898b..6c58a589 100644
--- a/docker/services/collectd.yaml
+++ b/docker/services/collectd.yaml
@@ -55,7 +55,11 @@ outputs:
description: Role data for the collectd role.
value:
service_name: {get_attr: [CollectdBase, role_data, service_name]}
- config_settings: {get_attr: [CollectdBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - get_attr: [CollectdBase, role_data, config_settings]
+ - tripleo::profile::base::metrics::collectd::enable_file_logging: true
+ collectd::plugin::logfile::log_file: /var/log/collectd/collectd.log
step_config: &step_config
get_attr: [CollectdBase, role_data, step_config]
service_config_settings: {get_attr: [CollectdBase, role_data, service_config_settings]}
@@ -71,6 +75,10 @@ outputs:
kolla_config:
/var/lib/kolla/config_files/collectd.json:
command: /usr/sbin/collectd -f
+ permissions:
+ - path: /var/log/collectd
+ owner: collectd:collectd
+ recurse: true
docker_config:
step_3:
collectd:
@@ -84,11 +92,17 @@ outputs:
-
- /var/run/docker.sock:/var/run/docker.sock:rw
- /var/lib/kolla/config_files/collectd.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/collectd/etc/collectd/:/etc/collectd/:ro
+ - /var/lib/config-data/collectd/etc/collectd.conf:/etc/collectd.conf:ro
+ - /var/lib/config-data/collectd/etc/collectd.d:/etc/collectd.d:ro
+ - /var/log/containers/collectd:/var/log/collectd:rw
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/collectd
+ state: directory
upgrade_tasks:
- name: Stop and disable collectd service
tags: step2
service: name=collectd.service state=stopped enabled=no
-
diff --git a/docker/services/congress-api.yaml b/docker/services/congress-api.yaml
index 3ee1d91d..92b0eeb9 100644
--- a/docker/services/congress-api.yaml
+++ b/docker/services/congress-api.yaml
@@ -82,9 +82,8 @@ outputs:
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
congress_init_logs:
- start_order: 0
image: &congress_image
list_join:
- '/'
@@ -94,8 +93,8 @@ outputs:
volumes:
- /var/log/containers/congress:/var/log/congress
command: ['/bin/bash', '-c', 'chown -R congress:congress /var/log/congress']
+ step_3:
congress_db_sync:
- start_order: 1
image: *congress_image
net: host
privileged: false
diff --git a/docker/services/containers-common.yaml b/docker/services/containers-common.yaml
index 973d9994..d104853f 100644
--- a/docker/services/containers-common.yaml
+++ b/docker/services/containers-common.yaml
@@ -3,19 +3,64 @@ heat_template_version: pike
description: >
Contains a static list of common things necessary for containers
+parameters:
+
+ # Required parameters
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
outputs:
volumes:
description: Common volumes for the containers.
value:
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- # required for bootstrap_host_exec
- - /etc/puppet:/etc/puppet:ro
- # OpenSSL trusted CAs
- - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
- - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
- - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
- - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
- # Syslog socket
- - /dev/log:/dev/log
- - /etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro
+ list_concat:
+ - - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ # required for bootstrap_host_exec
+ - /etc/puppet:/etc/puppet:ro
+ # OpenSSL trusted CAs
+ - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
+ - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
+ - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
+ - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
+ # Syslog socket
+ - /dev/log:/dev/log
+ - /etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro
+ - if:
+ - internal_tls_enabled
+ - - {get_param: InternalTLSCAFile}
+ - null
diff --git a/docker/services/database/mysql-client.yaml b/docker/services/database/mysql-client.yaml
new file mode 100644
index 00000000..b0ad3760
--- /dev/null
+++ b/docker/services/database/mysql-client.yaml
@@ -0,0 +1,66 @@
+heat_template_version: pike
+
+description: >
+ Configuration for containerized MySQL clients
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMysqlImage:
+ description: image
+ default: 'centos-binary-mariadb:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
+
+outputs:
+ role_data:
+ description: Role for setting mysql client parameters
+ value:
+ service_name: mysql_client
+ config_settings:
+ tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS}
+ tripleo::profile::base::database::mysql::client::ssl_ca: {get_param: InternalTLSCAFile}
+ # BEGIN DOCKER SETTINGS #
+ step_config: ""
+ puppet_config:
+ config_volume: mysql_client
+ puppet_tags: file # set this even though file is the default
+ step_config: "include ::tripleo::profile::base::database::mysql::client"
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
+ # no need for a docker config, this service only generates configuration files
+ docker_config: {}
diff --git a/docker/services/database/mysql.yaml b/docker/services/database/mysql.yaml
index c73db857..9eabb719 100644
--- a/docker/services/database/mysql.yaml
+++ b/docker/services/database/mysql.yaml
@@ -87,17 +87,16 @@ outputs:
recurse: true
docker_config:
# Kolla_bootstrap runs before permissions set by kolla_config
- step_2:
+ step_1:
mysql_init_logs:
- start_order: 0
image: *mysql_image
privileged: false
user: root
volumes:
- /var/log/containers/mysql:/var/log/mariadb
command: ['/bin/bash', '-c', 'chown -R mysql:mysql /var/log/mariadb']
+ step_2:
mysql_bootstrap:
- start_order: 1
detach: false
image: *mysql_image
net: host
diff --git a/docker/services/database/redis.yaml b/docker/services/database/redis.yaml
index 9e84dd5f..9d0d30c8 100644
--- a/docker/services/database/redis.yaml
+++ b/docker/services/database/redis.yaml
@@ -79,6 +79,7 @@ outputs:
step_1:
redis_init_logs:
start_order: 0
+ detach: false
image: *redis_image
privileged: false
user: root
@@ -86,6 +87,7 @@ outputs:
- /var/log/containers/redis:/var/log/redis
command: ['/bin/bash', '-c', 'chown -R redis:redis /var/log/redis']
redis:
+ start_order: 1
image: *redis_image
net: host
privileged: false
diff --git a/docker/services/ec2-api.yaml b/docker/services/ec2-api.yaml
new file mode 100644
index 00000000..bc3654b0
--- /dev/null
+++ b/docker/services/ec2-api.yaml
@@ -0,0 +1,153 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized EC2 API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerEc2ApiImage:
+ description: image
+ default: 'centos-binary-ec2-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ Ec2ApiPuppetBase:
+ type: ../../puppet/services/ec2-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the EC2 API role.
+ value:
+ service_name: {get_attr: [Ec2ApiPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [Ec2ApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ec2api
+ puppet_tags: ec2api_api_paste_ini,ec2api_config
+ step_config: *step_config
+ config_image: &ec2_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ec2_api.json:
+ command: /usr/bin/ec2-api
+ permissions:
+ - path: /var/log/ec2api
+ owner: ec2api:ec2api
+ recurse: true
+ /var/lib/kolla/config_files/ec2_api_metadata.json:
+ command: /usr/bin/ec2-api-metadata
+ permissions:
+ - path: /var/log/ec2api # default log dir for metadata service as well
+ owner: ec2api:ec2api
+ recurse: true
+ docker_config:
+ # db sync runs before permissions set by kolla_config
+ step_2:
+ ec2_api_init_logs:
+ image: *ec2_api_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/ec2_api:/var/log/ec2api
+ # mount ec2_api_metadata to "ec2api-metadata" only here to fix
+ # permissions of both directories in one go
+ - /var/log/containers/ec2_api_metadata:/var/log/ec2api-metadata
+ command: ['/bin/bash', '-c', 'chown -R ec2api:ec2api /var/log/ec2api /var/log/ec2api-metadata']
+ step_3:
+ ec2_api_db_sync:
+ image: *ec2_api_image
+ net: host
+ detach: false
+ privileged: false
+ user: root
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+ - /var/log/containers/ec2_api:/var/log/ec2api
+ command: "/usr/bin/bootstrap_host_exec ec2_api su ec2api -s /bin/bash -c '/usr/bin/ec2-api-manage db_sync'"
+ step_4:
+ ec2_api:
+ image: *ec2_api_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ec2_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+ - /var/log/containers/ec2_api:/var/log/ec2api
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ ec2_api_metadata:
+ image: *ec2_api_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ec2_api_metadata.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+ - /var/log/containers/ec2_api_metadata:/var/log/ec2api
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent log directories
+ file:
+ path: /var/log/containers/{{ item }}
+ state: directory
+ with_items:
+ - ec2_api
+ - ec2_api_metadata
+ upgrade_tasks:
+ - name: Stop and disable EC2-API services
+ tags: step2
+ service: name={{ item }} state=stopped enabled=no
+ with_items:
+ - openstack-ec2-api
+ - openstack-ec2-api-metadata
diff --git a/docker/services/glance-api.yaml b/docker/services/glance-api.yaml
index c3af5231..5c244012 100644
--- a/docker/services/glance-api.yaml
+++ b/docker/services/glance-api.yaml
@@ -85,17 +85,16 @@ outputs:
command: /usr/sbin/httpd -DFOREGROUND
docker_config:
# Kolla_bootstrap/db_sync runs before permissions set by kolla_config
- step_3:
+ step_2:
glance_init_logs:
- start_order: 0
image: *glance_image
privileged: false
user: root
volumes:
- /var/log/containers/glance:/var/log/glance
command: ['/bin/bash', '-c', 'chown -R glance:glance /var/log/glance']
+ step_3:
glance_api_db_sync:
- start_order: 1
image: *glance_image
net: host
privileged: false
diff --git a/docker/services/gnocchi-api.yaml b/docker/services/gnocchi-api.yaml
index e3b72bc5..bd1c3168 100644
--- a/docker/services/gnocchi-api.yaml
+++ b/docker/services/gnocchi-api.yaml
@@ -86,16 +86,15 @@ outputs:
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
gnocchi_init_log:
- start_order: 0
image: *gnocchi_image
user: root
volumes:
- /var/log/containers/gnocchi:/var/log/gnocchi
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
+ step_3:
gnocchi_db_sync:
- start_order: 1
image: *gnocchi_image
net: host
detach: false
diff --git a/docker/services/heat-engine.yaml b/docker/services/heat-engine.yaml
index 0adad538..7a3312dd 100644
--- a/docker/services/heat-engine.yaml
+++ b/docker/services/heat-engine.yaml
@@ -80,16 +80,15 @@ outputs:
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
heat_init_log:
- start_order: 0
image: *heat_engine_image
user: root
volumes:
- /var/log/containers/heat:/var/log/heat
command: ['/bin/bash', '-c', 'chown -R heat:heat /var/log/heat']
+ step_3:
heat_engine_db_sync:
- start_order: 1
image: *heat_engine_image
net: host
privileged: false
diff --git a/docker/services/horizon.yaml b/docker/services/horizon.yaml
index 022eb5dd..13bd091c 100644
--- a/docker/services/horizon.yaml
+++ b/docker/services/horizon.yaml
@@ -86,7 +86,7 @@ outputs:
owner: apache:apache
recurse: false
docker_config:
- step_3:
+ step_2:
horizon_fix_perms:
image: *horizon_image
user: root
@@ -99,8 +99,8 @@ outputs:
volumes:
- /var/log/containers/horizon:/var/log/horizon
- /var/lib/config-data/horizon/etc/:/etc/
+ step_3:
horizon:
- start_order: 1
image: *horizon_image
net: host
privileged: false
diff --git a/docker/services/ironic-api.yaml b/docker/services/ironic-api.yaml
index 650ce4cf..1c8aa5bd 100644
--- a/docker/services/ironic-api.yaml
+++ b/docker/services/ironic-api.yaml
@@ -61,6 +61,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [IronicApiBase, role_data, config_settings]
+ - apache::default_vhost: false
step_config: &step_config
get_attr: [IronicApiBase, role_data, step_config]
service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]}
@@ -75,16 +76,15 @@ outputs:
- [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/ironic_api.json:
- command: /usr/bin/ironic-api
+ command: /usr/sbin/httpd -DFOREGROUND
permissions:
- path: /var/log/ironic
owner: ironic:ironic
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
ironic_init_logs:
- start_order: 0
image: &ironic_image
list_join:
- '/'
@@ -94,6 +94,7 @@ outputs:
volumes:
- /var/log/containers/ironic:/var/log/ironic
command: ['/bin/bash', '-c', 'chown -R ironic:ironic /var/log/ironic']
+ step_3:
ironic_db_sync:
start_order: 1
image: *ironic_image
@@ -113,7 +114,7 @@ outputs:
start_order: 10
image: *ironic_image
net: host
- privileged: false
+ user: root
restart: always
volumes:
list_concat:
@@ -121,6 +122,10 @@ outputs:
-
- /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/ironic/etc/ironic:/etc/ironic:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf/:/etc/httpd/conf/:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
+ - /var/lib/config-data/ironic/var/www/:/var/www/:ro
- /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/iscsid.yaml b/docker/services/iscsid.yaml
new file mode 100644
index 00000000..53f5aff2
--- /dev/null
+++ b/docker/services/iscsid.yaml
@@ -0,0 +1,109 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Iscsid service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerIscsidImage:
+ description: image
+ default: 'centos-binary-iscsid:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Iscsid API role.
+ value:
+ service_name: iscsid
+ config_settings: {}
+ step_config: ''
+ service_config_settings: {}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: iscsid
+ #puppet_tags: file
+ step_config: ''
+ config_image: &iscsid_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIscsidImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/iscsid.json:
+ command: /usr/sbin/iscsid -f
+ docker_config:
+ step_3:
+ iscsid:
+ start_order: 2
+ image: *iscsid_image
+ net: host
+ privileged: true
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro
+ - /dev/:/dev/
+ - /run/:/run/
+ - /sys:/sys
+ - /lib/modules:/lib/modules:ro
+ - /etc/iscsi:/etc/iscsi
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /etc/iscsi
+ file:
+ path: /etc/iscsi
+ state: directory
+ - name: stat /lib/systemd/system/iscsid.socket
+ stat: path=/lib/systemd/system/iscsid.socket
+ register: stat_iscsid_socket
+ - name: Stop and disable iscsid.socket service
+ service: name=iscsid.socket state=stopped enabled=no
+ when: stat_iscsid_socket.stat.exists
+ upgrade_tasks:
+ - name: stat /lib/systemd/system/iscsid.service
+ stat: path=/lib/systemd/system/iscsid.service
+ register: stat_iscsid_service
+ - name: Stop and disable iscsid service
+ tags: step2
+ service: name=iscsid state=stopped enabled=no
+ when: stat_iscsid_service.stat.exists
+ - name: stat /lib/systemd/system/iscsid.socket
+ stat: path=/lib/systemd/system/iscsid.socket
+ register: stat_iscsid_socket
+ - name: Stop and disable iscsid.socket service
+ tags: step2
+ service: name=iscsid.socket state=stopped enabled=no
+ when: stat_iscsid_socket.stat.exists
+ metadata_settings: {}
diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml
index 5b253b46..4cd44f21 100644
--- a/docker/services/keystone.yaml
+++ b/docker/services/keystone.yaml
@@ -95,16 +95,15 @@ outputs:
command: /usr/sbin/httpd -DFOREGROUND
docker_config:
# Kolla_bootstrap/db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
keystone_init_log:
- start_order: 0
image: *keystone_image
user: root
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R keystone:keystone /var/log/keystone']
volumes:
- /var/log/containers/keystone:/var/log/keystone
+ step_3:
keystone_db_sync:
- start_order: 1
image: *keystone_image
net: host
privileged: false
diff --git a/docker/services/manila-api.yaml b/docker/services/manila-api.yaml
new file mode 100644
index 00000000..62fdaaf0
--- /dev/null
+++ b/docker/services/manila-api.yaml
@@ -0,0 +1,125 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Manila API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerManilaApiImage:
+ description: image
+ default: 'centos-binary-manila-api:latest'
+ type: string
+ DockerManilaConfigImage:
+ description: image
+ default: 'centos-binary-manila-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ ManilaApiPuppetBase:
+ type: ../../puppet/services/manila-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Manila API role.
+ value:
+ service_name: {get_attr: [ManilaApiPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [ManilaApiPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ {get_attr: [ManilaApiPuppetBase, role_data, step_config]}
+ service_config_settings: {get_attr: [ManilaApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: manila
+ puppet_tags: manila_config,manila_api_paste_ini
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerManilaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/manila_api.json:
+ command: /usr/bin/manila-api --config-file /usr/share/manila/manila-dist.conf --config-file /etc/manila/manila.conf
+ permissions:
+ - path: /var/log/manila
+ owner: manila:manila
+ recurse: true
+ docker_config:
+ step_2:
+ manila_init_logs:
+ image: &manila_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerManilaApiImage} ]
+ user: root
+ volumes:
+ - /var/log/containers/manila:/var/log/manila
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R manila:manila /var/log/manila']
+ step_3:
+ manila_api_db_sync:
+ user: root
+ image: *manila_api_image
+ net: host
+ detach: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+ - /var/log/containers/manila:/var/log/manila
+ command: "/usr/bin/bootstrap_host_exec manila_api su manila -s /bin/bash -c '/usr/bin/manila-manage db sync'"
+ step_4:
+ manila_api:
+ image: *manila_api_image
+ net: host
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/manila_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+ - /var/log/containers/manila:/var/log/manila
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: Create persistent manila logs directory
+ file:
+ path: /var/log/containers/manila
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable manila_api service
+ tags: step2
+ service: name=openstack-manila-api state=stopped enabled=no
diff --git a/docker/services/manila-scheduler.yaml b/docker/services/manila-scheduler.yaml
new file mode 100644
index 00000000..fbc80fc5
--- /dev/null
+++ b/docker/services/manila-scheduler.yaml
@@ -0,0 +1,105 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Manila Scheduler service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerManilaSchedulerImage:
+ description: image
+ default: 'centos-binary-manila-scheduler:latest'
+ type: string
+ DockerManilaConfigImage:
+ description: image
+ default: 'centos-binary-manila-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ ManilaSchedulerPuppetBase:
+ type: ../../puppet/services/manila-scheduler.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Manila Scheduler role.
+ value:
+ service_name: {get_attr: [ManilaSchedulerPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [ManilaSchedulerPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ {get_attr: [ManilaSchedulerPuppetBase, role_data, step_config]}
+ service_config_settings: {get_attr: [ManilaSchedulerPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: manila
+ puppet_tags: manila_config,manila_scheduler_paste_ini
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerManilaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/manila_scheduler.json:
+ command: /usr/bin/manila-scheduler --config-file /usr/share/manila/manila-dist.conf --config-file /etc/manila/manila.conf
+ permissions:
+ - path: /var/log/manila
+ owner: manila:manila
+ recurse: true
+ docker_config:
+ step_4:
+ manila_scheduler:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerManilaSchedulerImage} ]
+ net: host
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/manila_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+ - /var/log/containers/manila:/var/log/manila
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: Create persistent manila logs directory
+ file:
+ path: /var/log/containers/manila
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable manila_scheduler service
+ tags: step2
+ service: name=openstack-manila-scheduler state=stopped enabled=no
diff --git a/docker/services/memcached.yaml b/docker/services/memcached.yaml
index d85a0878..d4539649 100644
--- a/docker/services/memcached.yaml
+++ b/docker/services/memcached.yaml
@@ -72,6 +72,7 @@ outputs:
step_1:
memcached_init_logs:
start_order: 0
+ detach: false
image: *memcached_image
privileged: false
user: root
diff --git a/docker/services/mistral-api.yaml b/docker/services/mistral-api.yaml
index cc7e613e..30c3cde1 100644
--- a/docker/services/mistral-api.yaml
+++ b/docker/services/mistral-api.yaml
@@ -82,9 +82,8 @@ outputs:
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
mistral_init_logs:
- start_order: 0
image: &mistral_image
list_join:
- '/'
@@ -94,8 +93,9 @@ outputs:
volumes:
- /var/log/containers/mistral:/var/log/mistral
command: ['/bin/bash', '-c', 'chown -R mistral:mistral /var/log/mistral']
+ step_3:
mistral_db_sync:
- start_order: 1
+ start_order: 0
image: *mistral_image
net: host
privileged: false
@@ -109,7 +109,7 @@ outputs:
- /var/log/containers/mistral:/var/log/mistral
command: "/usr/bin/bootstrap_host_exec mistral_api su mistral -s /bin/bash -c 'mistral-db-manage --config-file /etc/mistral/mistral.conf upgrade head'"
mistral_db_populate:
- start_order: 2
+ start_order: 1
image: *mistral_image
net: host
privileged: false
diff --git a/docker/services/multipathd.yaml b/docker/services/multipathd.yaml
new file mode 100644
index 00000000..d8927d4b
--- /dev/null
+++ b/docker/services/multipathd.yaml
@@ -0,0 +1,89 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Multipathd service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMultipathdImage:
+ description: image
+ default: 'centos-binary-multipathd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Multipathd API role.
+ value:
+ service_name: multipathd
+ config_settings: {}
+ step_config: ''
+ service_config_settings: {}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: multipathd
+ #puppet_tags: file
+ step_config: ''
+ config_image: &multipathd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/multipathd.json:
+ command: /usr/sbin/multipathd -d
+ docker_config:
+ step_3:
+ multipathd:
+ start_order: 1
+ image: *multipathd_image
+ net: host
+ privileged: true
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro
+ - /dev/:/dev/
+ - /run/:/run/
+ - /sys:/sys
+ - /lib/modules:/lib/modules:ro
+ - /etc/iscsi:/etc/iscsi
+ - /var/lib/cinder:/var/lib/cinder
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ upgrade_tasks:
+ - name: Stop and disable multipathd service
+ tags: step2
+ service: name=multipathd state=stopped enabled=no
+ metadata_settings: {}
diff --git a/docker/services/neutron-api.yaml b/docker/services/neutron-api.yaml
index fbdf75ab..6c2d4cae 100644
--- a/docker/services/neutron-api.yaml
+++ b/docker/services/neutron-api.yaml
@@ -92,9 +92,8 @@ outputs:
command: /usr/sbin/httpd -DFOREGROUND
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
neutron_init_logs:
- start_order: 0
image: &neutron_api_image
list_join:
- '/'
@@ -104,8 +103,8 @@ outputs:
volumes:
- /var/log/containers/neutron:/var/log/neutron
command: ['/bin/bash', '-c', 'chown -R neutron:neutron /var/log/neutron']
+ step_3:
neutron_db_sync:
- start_order: 1
image: *neutron_api_image
net: host
privileged: false
diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml
index 2375dada..c97f45de 100644
--- a/docker/services/nova-api.yaml
+++ b/docker/services/nova-api.yaml
@@ -86,9 +86,8 @@ outputs:
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
nova_init_logs:
- start_order: 0
image: &nova_api_image
list_join:
- '/'
@@ -98,8 +97,9 @@ outputs:
volumes:
- /var/log/containers/nova:/var/log/nova
command: ['/bin/bash', '-c', 'chown -R nova:nova /var/log/nova']
+ step_3:
nova_api_db_sync:
- start_order: 1
+ start_order: 0
image: *nova_api_image
net: host
detach: false
@@ -116,7 +116,7 @@ outputs:
# to be capable of upgrading a baremetal setup. This is to ensure the name
# of the cell is 'default'
nova_api_map_cell0:
- start_order: 2
+ start_order: 1
image: *nova_api_image
net: host
detach: false
@@ -124,7 +124,7 @@ outputs:
volumes: *nova_api_volumes
command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 map_cell0'"
nova_api_create_default_cell:
- start_order: 3
+ start_order: 2
image: *nova_api_image
net: host
detach: false
@@ -136,7 +136,7 @@ outputs:
user: root
command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 create_cell --name=default'"
nova_db_sync:
- start_order: 4
+ start_order: 3
image: *nova_api_image
net: host
detach: false
diff --git a/docker/services/nova-consoleauth.yaml b/docker/services/nova-consoleauth.yaml
new file mode 100644
index 00000000..19f25d8e
--- /dev/null
+++ b/docker/services/nova-consoleauth.yaml
@@ -0,0 +1,108 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Nova Consoleauth service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaConsoleauthImage:
+ description: image
+ default: 'centos-binary-nova-consoleauth:latest'
+ type: string
+ DockerNovaConfigImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ NovaConsoleauthPuppetBase:
+ type: ../../puppet/services/nova-consoleauth.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Consoleauth service.
+ value:
+ service_name: {get_attr: [NovaConsoleauthPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [NovaConsoleauthPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NovaConsoleauthPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [NovaConsoleauthPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_consoleauth.json:
+ command: /usr/bin/nova-consoleauth
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
+ docker_config:
+ step_4:
+ nova_consoleauth:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConsoleauthImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_consoleauth.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /var/log/containers/nova:/var/log/nova
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable nova_consoleauth service
+ tags: step2
+ service: name=openstack-nova-consoleauth state=stopped enabled=no
diff --git a/docker/services/nova-vnc-proxy.yaml b/docker/services/nova-vnc-proxy.yaml
new file mode 100644
index 00000000..97d2d154
--- /dev/null
+++ b/docker/services/nova-vnc-proxy.yaml
@@ -0,0 +1,108 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Nova Vncproxy service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaVncProxyImage:
+ description: image
+ default: 'centos-binary-nova-novncproxy:latest'
+ type: string
+ DockerNovaConfigImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ NovaVncProxyPuppetBase:
+ type: ../../puppet/services/nova-vnc-proxy.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Vncproxy service.
+ value:
+ service_name: {get_attr: [NovaVncProxyPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [NovaVncProxyPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NovaVncProxyPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [NovaVncProxyPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_vnc_proxy.json:
+ command: /usr/bin/nova-novncproxy --web /usr/share/novnc/
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
+ docker_config:
+ step_4:
+ nova_vnc_proxy:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaVncProxyImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_vnc_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /var/log/containers/nova:/var/log/nova
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable nova_vnc_proxy service
+ tags: step2
+ service: name=openstack-nova-novncproxy state=stopped enabled=no
diff --git a/docker/services/pacemaker/cinder-backup.yaml b/docker/services/pacemaker/cinder-backup.yaml
new file mode 100644
index 00000000..7cac9d48
--- /dev/null
+++ b/docker/services/pacemaker/cinder-backup.yaml
@@ -0,0 +1,152 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Backup service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderBackupImage:
+ description: image
+ default: 'centos-binary-cinder-backup:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ CinderBackupBackend:
+ default: swift
+ description: The short name of the Cinder Backup backend to use.
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'ceph']
+ CinderBackupRbdPoolName:
+ default: backups
+ type: string
+ CephClientUserName:
+ default: openstack
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+resources:
+
+ CinderBackupBase:
+ type: ../../../puppet/services/cinder-backup.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ CinderBackupBackend: {get_param: CinderBackupBackend}
+ CinderBackupRbdPoolName: {get_param: CinderBackupRbdPoolName}
+ CephClientUserName: {get_param: CephClientUserName}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Backup role.
+ value:
+ service_name: {get_attr: [CinderBackupBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [CinderBackupBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::cinder::backup_bundle::cinder_backup_docker_image: &cinder_backup_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderBackupImage} ]
+ cinder::backup::manage_service: false
+ cinder::backup::enabled: false
+ step_config: ""
+ service_config_settings: {get_attr: [CinderBackupBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: {get_attr: [CinderBackupBase, role_data, step_config]}
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_backup.json:
+ command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/lib/cinder
+ owner: cinder:cinder
+ recurse: true
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_3:
+ cinder_backup_init_logs:
+ start_order: 0
+ image: *cinder_backup_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_5:
+ cinder_backup_init_bundle:
+ start_order: 1
+ detach: false
+ net: host
+ user: root
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 5}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file_line,concat,augeas,TAGS --debug -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::constraint::location'
+ CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::cinder::backup_bundle'
+ image: *cinder_backup_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/lib/cinder
+ - /var/log/containers/cinder
+ upgrade_tasks:
+ - name: Stop and disable cinder_backup service
+ tags: step2
+ service: name=openstack-cinder-backup state=stopped enabled=no
diff --git a/docker/services/pacemaker/cinder-volume.yaml b/docker/services/pacemaker/cinder-volume.yaml
new file mode 100644
index 00000000..987ebaf0
--- /dev/null
+++ b/docker/services/pacemaker/cinder-volume.yaml
@@ -0,0 +1,170 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Volume service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderVolumeImage:
+ description: image
+ default: 'centos-binary-cinder-volume:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ # custom parameters for the Cinder volume role
+ CinderEnableIscsiBackend:
+ default: true
+ description: Whether to enable or not the Iscsi backend for Cinder
+ type: boolean
+ CinderLVMLoopDeviceSize:
+ default: 10280
+ description: The size of the loopback file used by the cinder LVM driver.
+ type: number
+
+resources:
+
+ CinderBase:
+ type: ../../../puppet/services/cinder-volume.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Volume role.
+ value:
+ service_name: {get_attr: [CinderBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [CinderBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::cinder::volume_bundle::cinder_volume_docker_image: &cinder_volume_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderVolumeImage} ]
+ cinder::volume::manage_service: false
+ cinder::volume::enabled: false
+ cinder::host: hostgroup
+ step_config: ""
+ service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: {get_attr: [CinderBase, role_data, step_config]}
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_volume.json:
+ command: /usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_3:
+ cinder_volume_init_logs:
+ start_order: 0
+ image: *cinder_volume_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_5:
+ cinder_volume_init_bundle:
+ start_order: 0
+ detach: false
+ net: host
+ user: root
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 5}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file_line,concat,augeas,TAGS --debug -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::constraint::location'
+ CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::cinder::volume_bundle'
+ image: *cinder_volume_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/cinder
+ - /var/lib/cinder
+ #FIXME: all of this should be conditional on the CinderEnableIscsiBackend value being set to true
+ - name: cinder create LVM volume group dd
+ command:
+ list_join:
+ - ''
+ - - 'dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek='
+ - str_replace:
+ template: VALUE
+ params:
+ VALUE: {get_param: CinderLVMLoopDeviceSize}
+ - 'M'
+ args:
+ creates: /var/lib/cinder/cinder-volumes
+ - name: cinder create LVM volume group
+ shell: |
+ if ! losetup /dev/loop2; then
+ losetup /dev/loop2 /var/lib/cinder/cinder-volumes
+ fi
+ if ! pvdisplay | grep cinder-volumes; then
+ pvcreate /dev/loop2
+ fi
+ if ! vgdisplay | grep cinder-volumes; then
+ vgcreate cinder-volumes /dev/loop2
+ fi
+ args:
+ executable: /bin/bash
+ creates: /dev/loop2
+ upgrade_tasks:
+ - name: Stop and disable cinder_volume service
+ tags: step2
+ service: name=openstack-cinder-volume state=stopped enabled=no
diff --git a/docker/services/pacemaker/haproxy.yaml b/docker/services/pacemaker/haproxy.yaml
index ae19652e..7557afd6 100644
--- a/docker/services/pacemaker/haproxy.yaml
+++ b/docker/services/pacemaker/haproxy.yaml
@@ -60,11 +60,7 @@ outputs:
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
- step_config:
- list_join:
- - "\n"
- - - &noop_pcmk "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
- - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
+ step_config: ""
service_config_settings: {get_attr: [HAProxyBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
@@ -74,8 +70,8 @@ outputs:
list_join:
- "\n"
- - "exec {'wait-for-settle': command => '/bin/true' }"
- - &noop_firewall "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
- - *noop_pcmk
+ - "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
+ - "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
- 'include ::tripleo::profile::pacemaker::haproxy_bundle'
config_image: *haproxy_image
kolla_config:
@@ -88,6 +84,7 @@ outputs:
detach: false
net: host
user: root
+ privileged: true
command:
- '/bin/bash'
- '-c'
@@ -98,14 +95,20 @@ outputs:
- - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
- "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
params:
- TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+ TAGS: 'tripleo::firewall::rule,pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
CONFIG:
list_join:
- ';'
- - - *noop_firewall
- - 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::haproxy_bundle'
+ - - 'include ::tripleo::profile::base::pacemaker'
+ - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
image: *haproxy_image
volumes:
+ # puppet saves iptables rules in /etc/sysconfig
+ - /etc/sysconfig:/etc/sysconfig:rw
+ # saving rules require accessing /usr/libexec/iptables/iptables.init, just bind-mount
+ # the necessary bit and prevent systemd to try to reload the service in the container
+ - /usr/libexec/iptables:/usr/libexec/iptables:ro
+ - /usr/libexec/initscripts/legacy-actions:/usr/libexec/initscripts/legacy-actions:ro
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /etc/puppet:/tmp/puppet-etc:ro
diff --git a/docker/services/panko-api.yaml b/docker/services/panko-api.yaml
index c381c0da..585148e5 100644
--- a/docker/services/panko-api.yaml
+++ b/docker/services/panko-api.yaml
@@ -87,16 +87,15 @@ outputs:
owner: panko:panko
recurse: true
docker_config:
- step_3:
+ step_2:
panko_init_log:
- start_order: 0
image: *panko_image
user: root
volumes:
- /var/log/containers/panko:/var/log/panko
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R panko:panko /var/log/panko']
+ step_3:
panko_db_sync:
- start_order: 1
image: *panko_image
net: host
detach: false
diff --git a/docker/services/rabbitmq.yaml b/docker/services/rabbitmq.yaml
index 609aec06..06d663c9 100644
--- a/docker/services/rabbitmq.yaml
+++ b/docker/services/rabbitmq.yaml
@@ -89,6 +89,7 @@ outputs:
step_1:
rabbitmq_init_logs:
start_order: 0
+ detach: false
image: *rabbitmq_image
privileged: false
user: root
@@ -97,6 +98,7 @@ outputs:
command: ['/bin/bash', '-c', 'chown -R rabbitmq:rabbitmq /var/log/rabbitmq']
rabbitmq_bootstrap:
start_order: 1
+ detach: false
image: *rabbitmq_image
net: host
privileged: false
diff --git a/docker/services/sahara-api.yaml b/docker/services/sahara-api.yaml
new file mode 100644
index 00000000..10670796
--- /dev/null
+++ b/docker/services/sahara-api.yaml
@@ -0,0 +1,119 @@
+heat_template_version: pike
+
+description: >
+ OpenStack Sahara service configured with Puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSaharaApiImage:
+ description: image
+ default: 'centos-binary-sahara-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ SaharaApiPuppetBase:
+ type: ../../puppet/services/sahara-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Sahara API role.
+ value:
+ service_name: {get_attr: [SaharaApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [SaharaApiPuppetBase, role_data, config_settings]
+ - sahara::sync_db: false
+ step_config: &step_config
+ get_attr: [SaharaApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [SaharaApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: sahara
+ puppet_tags: sahara_api_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
+ step_config: *step_config
+ config_image: &sahara_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSaharaApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/sahara-api.json:
+ command: /usr/bin/sahara-api --config-file /etc/sahara/sahara.conf
+ permissions:
+ - path: /var/lib/sahara
+ owner: sahara:sahara
+ recurse: true
+ - path: /var/log/sahara
+ owner: sahara:sahara
+ recurse: true
+ docker_config:
+ step_3:
+ sahara_db_sync:
+ image: *sahara_image
+ net: host
+ privileged: false
+ detach: false
+ volumes: &sahara_volumes
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/sahara-api.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/sahara/etc/sahara/:/etc/sahara/:ro
+ - /lib/modules:/lib/modules:ro
+ - /var/lib/sahara:/var/lib/sahara
+ - /var/log/containers/sahara:/var/log/sahara
+ command: "/usr/bin/bootstrap_host_exec sahara_api su sahara -s /bin/bash -c 'sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head'"
+ step_4:
+ sahara_api:
+ image: *sahara_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: *sahara_volumes
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/lib/sahara
+ file:
+ path: /var/lib/sahara
+ state: directory
+ - name: create persistent sahara logs directory
+ file:
+ path: /var/log/containers/sahara
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable sahara_api service
+ tags: step2
+ service: name=openstack-sahara-api state=stopped enabled=no
diff --git a/docker/services/sahara-engine.yaml b/docker/services/sahara-engine.yaml
new file mode 100644
index 00000000..41b5790b
--- /dev/null
+++ b/docker/services/sahara-engine.yaml
@@ -0,0 +1,110 @@
+heat_template_version: pike
+
+description: >
+ OpenStack Sahara service configured with Puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSaharaEngineImage:
+ description: image
+ default: 'centos-binary-sahara-engine:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ SaharaEnginePuppetBase:
+ type: ../../puppet/services/sahara-engine.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Sahara Engine role.
+ value:
+ service_name: {get_attr: [SaharaEnginePuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [SaharaEnginePuppetBase, role_data, config_settings]
+ - sahara::sync_db: false
+ step_config: &step_config
+ get_attr: [SaharaEnginePuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [SaharaEnginePuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: sahara
+ puppet_tags: sahara_engine_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
+ step_config: *step_config
+ config_image: &sahara_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSaharaEngineImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/sahara-engine.json:
+ command: /usr/bin/sahara-engine --config-file /etc/sahara/sahara.conf
+ permissions:
+ - path: /var/lib/sahara
+ owner: sahara:sahara
+ recurse: true
+ - path: /var/log/sahara
+ owner: sahara:sahara
+ recurse: true
+ docker_config:
+ step_4:
+ sahara_engine:
+ image: *sahara_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: &sahara_volumes
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/sahara-engine.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/sahara/etc/sahara/:/etc/sahara/:ro
+ - /var/lib/sahara:/var/lib/sahara
+ - /var/log/containers/sahara:/var/log/sahara
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/lib/sahara
+ file:
+ path: /var/lib/sahara
+ state: directory
+ - name: create persistent sahara logs directory
+ file:
+ path: /var/log/containers/sahara
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable sahara_engine service
+ tags: step2
+ service: name=openstack-sahara-engine state=stopped enabled=no
diff --git a/docker/services/sensu-client.yaml b/docker/services/sensu-client.yaml
new file mode 100644
index 00000000..db6daf99
--- /dev/null
+++ b/docker/services/sensu-client.yaml
@@ -0,0 +1,141 @@
+heat_template_version: pike
+
+description: >
+ Containerized Sensu client service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSensuClientImage:
+ description: image
+ default: 'centos-binary-sensu-client:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ SensuDockerCheckCommand:
+ type: string
+ default: |
+ for i in $(docker ps --format '{{.ID}}'); do
+ if result=$(docker inspect --format='{{.State.Health.Status}}' $i 2>/dev/null); then
+ if [ "$result" != 'healthy' ]; then
+ echo "$(docker inspect --format='{{.Name}}' $i) ($i): $(docker inspect --format='{{json .State}}' $i)" && exit 2;
+ fi
+ fi
+ done
+ SensuDockerCheckInterval:
+ type: number
+ description: The frequency in seconds the docker health check is executed.
+ default: 10
+ SensuDockerCheckHandlers:
+ default: []
+ description: The Sensu event handler to use for events
+ created by the docker health check.
+ type: comma_delimited_list
+ SensuDockerCheckOccurrences:
+ type: number
+ description: The number of event occurrences before sensu-plugin-aware handler should take action.
+ default: 3
+ SensuDockerCheckRefresh:
+ type: number
+ description: The number of seconds sensu-plugin-aware handlers should wait before taking second action.
+ default: 90
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ SensuClientBase:
+ type: ../../puppet/services/monitoring/sensu-client.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Sensu client role.
+ value:
+ service_name: {get_attr: [SensuClientBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [SensuClientBase, role_data, config_settings]
+ - sensu::checks:
+ check-docker-health:
+ standalone: true
+ command: {get_param: SensuDockerCheckCommand}
+ interval: {get_param: SensuDockerCheckInterval}
+ handlers: {get_param: SensuDockerCheckHandlers}
+ occurrences: {get_param: SensuDockerCheckOccurrences}
+ refresh: {get_param: SensuDockerCheckRefresh}
+ step_config: &step_config
+ get_attr: [SensuClientBase, role_data, step_config]
+ service_config_settings: {get_attr: [SensuClientBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: sensu
+ puppet_tags: sensu_rabbitmq_config,sensu_client_config,sensu_check_config,sensu_check
+ step_config: *step_config
+ config_image: &sensu_client_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSensuClientImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/sensu-client.json:
+ command: /usr/bin/sensu-client -d /etc/sensu/conf.d/ -l /var/log/sensu/sensu-client.log
+ permissions:
+ - path: /var/log/sensu
+ owner: sensu:sensu
+ recurse: true
+ docker_config:
+ step_3:
+ sensu_client:
+ image: *sensu_client_image
+ net: host
+ privileged: true
+ # NOTE(mmagr) kolla image changes the user to 'sensu', we need it
+ # to be root have rw permission to docker.sock to run successfully
+ # "docker inspect" command
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/run/docker.sock:/var/run/docker.sock:rw
+ - /var/lib/kolla/config_files/sensu-client.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/sensu/etc/sensu/:/etc/sensu/:ro
+ - /var/log/containers/sensu:/var/log/sensu:rw
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/sensu
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable sensu-client service
+ tags: step2
+ service: name=sensu-client.service state=stopped enabled=no
diff --git a/docker/services/services.yaml b/docker/services/services.yaml
deleted file mode 100644
index 2ad3b63d..00000000
--- a/docker/services/services.yaml
+++ /dev/null
@@ -1,105 +0,0 @@
-heat_template_version: pike
-
-description: >
- Utility stack to convert an array of services into a set of combined
- role configs.
-
-parameters:
- Services:
- default: []
- description: |
- List nested stack service templates.
- type: comma_delimited_list
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- DefaultPasswords:
- default: {}
- description: Mapping of service -> default password. Used to help
- pass top level passwords managed by Heat into services.
- type: json
- RoleName:
- default: ''
- description: Role name on which the service is applied
- type: string
- RoleParameters:
- default: {}
- description: Parameters specific to the role
- type: json
-
-resources:
-
- PuppetServices:
- type: ../../puppet/services/services.yaml
- properties:
- Services: {get_param: Services}
- ServiceNetMap: {get_param: ServiceNetMap}
- EndpointMap: {get_param: EndpointMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- RoleName: {get_param: RoleName}
- RoleParameters: {get_param: RoleParameters}
-
- ServiceChain:
- type: OS::Heat::ResourceChain
- properties:
- resources: {get_param: Services}
- concurrent: true
- resource_properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- EndpointMap: {get_param: EndpointMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- RoleName: {get_param: RoleName}
- RoleParameters: {get_param: RoleParameters}
-
-outputs:
- role_data:
- description: Combined Role data for this set of services.
- value:
- service_names:
- {get_attr: [PuppetServices, role_data, service_names]}
- monitoring_subscriptions:
- {get_attr: [PuppetServices, role_data, monitoring_subscriptions]}
- logging_sources:
- {get_attr: [PuppetServices, role_data, logging_sources]}
- logging_groups:
- {get_attr: [PuppetServices, role_data, logging_groups]}
- service_config_settings:
- {get_attr: [PuppetServices, role_data, service_config_settings]}
- config_settings:
- {get_attr: [PuppetServices, role_data, config_settings]}
- global_config_settings:
- {get_attr: [PuppetServices, role_data, global_config_settings]}
- step_config:
- {get_attr: [ServiceChain, role_data, step_config]}
- puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
- kolla_config:
- map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
- docker_config:
- {get_attr: [ServiceChain, role_data, docker_config]}
- docker_puppet_tasks:
- {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
- host_prep_tasks:
- yaql:
- # Note we use distinct() here to filter any identical tasks
- expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
- data: {get_attr: [ServiceChain, role_data]}
- upgrade_tasks:
- yaql:
- # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
- expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
- data: {get_attr: [ServiceChain, role_data]}
- upgrade_batch_tasks:
- yaql:
- # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
- expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
- data: {get_attr: [ServiceChain, role_data]}
- service_metadata_settings:
- get_attr: [PuppetServices, role_data, service_metadata_settings]
diff --git a/docker/services/swift-ringbuilder.yaml b/docker/services/swift-ringbuilder.yaml
index bfd445d0..075d8d7c 100644
--- a/docker/services/swift-ringbuilder.yaml
+++ b/docker/services/swift-ringbuilder.yaml
@@ -58,6 +58,14 @@ parameters:
default: true
description: 'Use a local directory for Swift storage services when building rings'
type: boolean
+ SwiftRingGetTempurl:
+ default: ''
+ description: A temporary Swift URL to download rings from.
+ type: string
+ SwiftRingPutTempurl:
+ default: ''
+ description: A temporary Swift URL to upload rings to.
+ type: string
resources:
@@ -75,14 +83,17 @@ outputs:
description: Role data for Swift Ringbuilder configuration in containers.
value:
service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]}
- config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+ - tripleo::profile::base::swift::ringbuilder:skip_consistency_check: true
step_config: &step_config
get_attr: [SwiftRingbuilderBase, role_data, step_config]
service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
config_volume: 'swift'
- puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
+ puppet_tags: exec,fetch_swift_ring_tarball,extract_swift_ring_tarball,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance,create_swift_ring_tarball,upload_swift_ring_tarball
step_config: *step_config
config_image:
list_join:
diff --git a/docker/services/swift-storage.yaml b/docker/services/swift-storage.yaml
index 017fb123..55aea208 100644
--- a/docker/services/swift-storage.yaml
+++ b/docker/services/swift-storage.yaml
@@ -46,6 +46,11 @@ parameters:
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
+ SwiftRawDisks:
+ default: {}
+ description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
+ type: json
+
resources:
@@ -66,7 +71,11 @@ outputs:
description: Role data for the swift storage services.
value:
service_name: {get_attr: [SwiftStorageBase, role_data, service_name]}
- config_settings: {get_attr: [SwiftStorageBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - {get_attr: [SwiftStorageBase, role_data, config_settings]}
+ # FIXME (cschwede): re-enable this once checks works inside containers
+ - swift::storage::all::mount_check: false
step_config: &step_config
get_attr: [SwiftStorageBase, role_data, step_config]
service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]}
@@ -348,6 +357,18 @@ outputs:
with_items:
- /var/log/containers/swift
- /srv/node
+ - name: Format and mount devices defined in SwiftRawDisks
+ mount:
+ name: /srv/node/{{ item }}
+ src: /dev/{{ item }}
+ fstype: xfs
+ opts: noatime
+ state: mounted
+ with_items:
+ - repeat:
+ template: 'DEVICE'
+ for_each:
+ DEVICE: {get_param: SwiftRawDisks}
upgrade_tasks:
- name: Stop and disable swift storage services
tags: step2
diff --git a/docker/services/tacker.yaml b/docker/services/tacker.yaml
index 2fc99d6f..df9750c9 100644
--- a/docker/services/tacker.yaml
+++ b/docker/services/tacker.yaml
@@ -82,9 +82,8 @@ outputs:
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
tacker_init_logs:
- start_order: 0
image: &tacker_image
list_join:
- '/'
@@ -94,8 +93,8 @@ outputs:
volumes:
- /var/log/containers/tacker:/var/log/tacker
command: ['/bin/bash', '-c', 'chown -R tacker:tacker /var/log/tacker']
+ step_3:
tacker_db_sync:
- start_order: 1
image: *tacker_image
net: host
privileged: false
diff --git a/environments/cinder-dellsc-config.yaml b/environments/cinder-dellsc-config.yaml
index 99e517bf..681a2fe9 100644
--- a/environments/cinder-dellsc-config.yaml
+++ b/environments/cinder-dellsc-config.yaml
@@ -19,3 +19,4 @@ parameter_defaults:
CinderDellScSecondarySanLogin: 'Admin'
CinderDellScSecondarySanPassword: ''
CinderDellScSecondaryScApiPort: 3033
+ CinderDellScExcludedDomainIp: ''
diff --git a/environments/cinder-netapp-config.yaml b/environments/cinder-netapp-config.yaml
index dfd15893..83ecbbe4 100644
--- a/environments/cinder-netapp-config.yaml
+++ b/environments/cinder-netapp-config.yaml
@@ -1,3 +1,7 @@
+# *************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/storage/cinder-netapp-config.yaml
+# instead.
+# *************************************************************************************
# A Heat environment file which can be used to enable a
# a Cinder NetApp backend, configured via puppet
resource_registry:
diff --git a/environments/docker-services-tls-everywhere.yaml b/environments/docker-services-tls-everywhere.yaml
index 2740664c..3ca04697 100644
--- a/environments/docker-services-tls-everywhere.yaml
+++ b/environments/docker-services-tls-everywhere.yaml
@@ -35,13 +35,11 @@ resource_registry:
OS::TripleO::PostDeploySteps: ../docker/post.yaml
OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
- OS::TripleO::Services: ../docker/services/services.yaml
-
parameter_defaults:
- # Defaults to 'tripleoupstream'. Specify a local docker registry
- # Example: 192.168.24.1:8787/tripleoupstream
- DockerNamespace: tripleoupstream
- DockerNamespaceIsRegistry: false
+ # To specify a local docker registry, enable these
+ # where 192.168.24.1 is the host running docker-distribution
+ #DockerNamespace: 192.168.24.1:8787/tripleoupstream
+ #DockerNamespaceIsRegistry: true
ComputeServices:
- OS::TripleO::Services::CACerts
diff --git a/environments/docker.yaml b/environments/docker.yaml
index 28527945..03713e83 100644
--- a/environments/docker.yaml
+++ b/environments/docker.yaml
@@ -2,10 +2,11 @@ resource_registry:
# This can be used when you don't want to run puppet on the host,
# e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
# OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
- OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
# The compute node still needs extra initialization steps
OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+ OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+
#NOTE (dprince) add roles to be docker enabled as we support them
OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
@@ -18,7 +19,9 @@ resource_registry:
OS::TripleO::Services::NovaApi: ../docker/services/nova-api.yaml
OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml
OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml
+ OS::TripleO::Services::NovaConsoleauth: ../docker/services/nova-consoleauth.yaml
OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml
+ OS::TripleO::Services::NovaVncProxy: ../docker/services/nova-vnc-proxy.yaml
OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
@@ -27,7 +30,7 @@ resource_registry:
OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
- OS::TripleO::Services::HAProxy: ../docker/services/haproxy.yaml
+ OS::TripleO::Services::MySQLClient: ../docker/services/database/mysql-client.yaml
OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
@@ -44,15 +47,21 @@ resource_registry:
OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
OS::TripleO::Services::CeilometerAgentCentral: ../docker/services/ceilometer-agent-central.yaml
+ OS::TripleO::Services::CeilometerAgentIpmi: ../docker/services/ceilometer-agent-ipmi.yaml
OS::TripleO::Services::CeilometerAgentCompute: ../docker/services/ceilometer-agent-compute.yaml
OS::TripleO::Services::CeilometerAgentNotification: ../docker/services/ceilometer-agent-notification.yaml
OS::TripleO::Services::Horizon: ../docker/services/horizon.yaml
+ OS::TripleO::Services::Iscsid: ../docker/services/iscsid.yaml
+ OS::TripleO::Services::Multipathd: ../docker/services/multipathd.yaml
+ # FIXME: Had to remove these to unblock containers CI. They should be put back when fixed.
+ # OS::TripleO::Services::CinderApi: ../docker/services/cinder-api.yaml
+ # OS::TripleO::Services::CinderScheduler: ../docker/services/cinder-scheduler.yaml
+ # OS::TripleO::Services::CinderBackup: ../docker/services/cinder-backup.yaml
+ # OS::TripleO::Services::CinderVolume: ../docker/services/cinder-volume.yaml
OS::TripleO::PostDeploySteps: ../docker/post.yaml
OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
- OS::TripleO::Services: ../docker/services/services.yaml
-
parameter_defaults:
# To specify a local docker registry, enable these
# where 192.168.24.1 is the host running docker-distribution
diff --git a/environments/enable-tls.yaml b/environments/enable-tls.yaml
index 39ded654..175e1fd7 100644
--- a/environments/enable-tls.yaml
+++ b/environments/enable-tls.yaml
@@ -1,7 +1,11 @@
+# ********************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/enable-tls.yaml instead.
+# ********************************************************************************
# Use this environment to pass in certificates for SSL deployments.
# For these values to take effect, one of the tls-endpoints-*.yaml environments
# must also be used.
parameter_defaults:
+ HorizonSecureCookies: True
SSLCertificate: |
The contents of your certificate go here
SSLIntermediateCertificate: ''
diff --git a/environments/host-config-and-reboot.j2.yaml b/environments/host-config-and-reboot.j2.yaml
new file mode 100644
index 00000000..d5f69ec5
--- /dev/null
+++ b/environments/host-config-and-reboot.j2.yaml
@@ -0,0 +1,18 @@
+resource_registry:
+{% for role in roles %}
+ OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/host_config_and_reboot.yaml
+{% endfor %}
+
+#parameter_defaults:
+ # Note: There are no global parameters which can be applied to all roles as
+ # these configuration have to be specific to role.
+
+ # Sample parameters for Compute and ComputeOvsDpdk roles
+ #ComputeParameters:
+ #KernelArgs: ""
+ #TunedProfileName: ""
+ #HostIsolatedCoreList: ""
+ #ComputeOvsDpdkParameters:
+ #KernelArgs: ""
+ #TunedProfileName: ""
+ #HostIsolatedCoreList: ""
diff --git a/environments/host-config-pre-network.j2.yaml b/environments/host-config-pre-network.j2.yaml
deleted file mode 100644
index c79e28b4..00000000
--- a/environments/host-config-pre-network.j2.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-resource_registry:
-# Create the registry only for roles with the word "Compute" in it. Like ComputeOvsDpdk, ComputeSriov, etc.,
-{%- for role in roles -%}
-{% if "Compute" in role.name %}
- OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/{{role.name.lower()}}-host_config_and_reboot.yaml
-{%- endif -%}
-{% endfor %}
-
-#parameter_defaults:
- # Sample parameters for Compute and ComputeOvsDpdk roles
- #ComputeKernelArgs: ""
- #ComputeTunedProfileName: ""
- #ComputeHostCpuList: ""
- #ComputeOvsDpdkKernelArgs: ""
- #ComputeOvsDpdkTunedProfileName: ""
- #ComputeOvsDpdkHostCpuList: ""
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml
index 0f7e1143..05a3a391 100644
--- a/environments/hyperconverged-ceph.yaml
+++ b/environments/hyperconverged-ceph.yaml
@@ -36,3 +36,4 @@ parameter_defaults:
- OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::Docker
+ - OS::TripleO::Services::Iscsid
diff --git a/environments/inject-trust-anchor-hiera.yaml b/environments/inject-trust-anchor-hiera.yaml
index b4908c1b..95d2de95 100644
--- a/environments/inject-trust-anchor-hiera.yaml
+++ b/environments/inject-trust-anchor-hiera.yaml
@@ -1,3 +1,7 @@
+# **************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/inject-trust-anchor-hiera.yaml
+# instead.
+# **************************************************************************************
parameter_defaults:
CAMap:
first-ca-name:
diff --git a/environments/inject-trust-anchor.yaml b/environments/inject-trust-anchor.yaml
index 3ecb0d27..1b0f7066 100644
--- a/environments/inject-trust-anchor.yaml
+++ b/environments/inject-trust-anchor.yaml
@@ -1,3 +1,7 @@
+# ********************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/inject-trust-anchor.yaml
+# instead.
+# ********************************************************************************
parameter_defaults:
SSLRootCertificate: |
The contents of your root CA certificate go here
diff --git a/environments/network-isolation.j2.yaml b/environments/network-isolation.j2.yaml
new file mode 100644
index 00000000..6a7318fc
--- /dev/null
+++ b/environments/network-isolation.j2.yaml
@@ -0,0 +1,37 @@
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+ {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+ {%- set _ = primary_role.pop() -%}
+ {%- set _ = primary_role.append(role) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# Enable the creation of Neutron networks for isolated Overcloud
+# traffic and configure each role to assign ports (related
+# to that role) on these networks.
+# primary role is: {{primary_role_name}}
+resource_registry:
+ # networks as defined in network_data.yaml
+ {%- for network in networks if network.enabled|default(true) %}
+ OS::TripleO::Network::{{network.name}}: ../network/{{network.name_lower|default(network.name.lower())}}.yaml
+ {%- endfor %}
+
+ # Port assignments for the VIPs
+ {%- for network in networks if network.vip %}
+ OS::TripleO::Network::Ports::{{network.name}}VipPort: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+ {%- endfor %}
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+
+ OS::TripleO::{{primary_role_name}}::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+{%- for role in roles %}
+ # Port assignments for the {{role.name}}
+ {%- for network in networks %}
+ {%- if network.name in role.networks|default([]) and network.enabled|default(true) %}
+ OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+ {%- else %}
+ OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/noop.yaml
+ {%- endif %}
+ {%- endfor %}
+{%- endfor %}
diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml
deleted file mode 100644
index a6b4b8ae..00000000
--- a/environments/network-isolation.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-# Enable the creation of Neutron networks for isolated Overcloud
-# traffic and configure each role to assign ports (related
-# to that role) on these networks.
-resource_registry:
- OS::TripleO::Network::External: ../network/external.yaml
- OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
- OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
- OS::TripleO::Network::Storage: ../network/storage.yaml
- OS::TripleO::Network::Tenant: ../network/tenant.yaml
- # Management network is optional and disabled by default.
- # To enable it, include environments/network-management.yaml
- #OS::TripleO::Network::Management: ../network/management.yaml
-
- # Port assignments for the VIPs
- OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
- OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
- OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
- OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
-
- # Port assignments for the controller role
- OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
- OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml
- #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the compute role
- OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
- OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml
- #OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the ceph storage role
- OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the swift storage role
- OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the block storage role
- OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
diff --git a/environments/networking/neutron-midonet.yaml b/environments/networking/neutron-midonet.yaml
new file mode 100644
index 00000000..ad8da8cf
--- /dev/null
+++ b/environments/networking/neutron-midonet.yaml
@@ -0,0 +1,66 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable the Neutron MidoNet Services
+# description: |
+# A Heat environment that can be used to deploy MidoNet Services
+parameter_defaults:
+ # Native Transport Port
+ # Type: string
+ CassandraClientPort: 9042
+
+ # The port for the Thrift RPC service, which is used for client connections
+ # Type: string
+ CassandraClientPortThrift: 9160
+
+ # The SSL port for encrypted communication. Unused unless enabled in encryption_options
+ # Type: string
+ CassandraSslStoragePort: 7001
+
+ # The Cassandra port for inter-node communication
+ # Type: string
+ CassandraStoragePort: 7000
+
+ # Name of the tunnel zone used to tunnel packages
+ # Type: string
+ TunnelZoneName: tunnelzone_tripleo
+
+ # Type of the tunnels on the overlay. Choose between `gre` and `vxlan`
+ # Type: string
+ TunnelZoneType: vxlan
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # Whether enable Cassandra cluster on Controller
+ # Type: boolean
+ EnableCassandraOnController: True
+
+ # Whether enable Zookeeper cluster on Controller
+ # Type: boolean
+ EnableZookeeperOnController: True
+
+ # The core plugin for Neutron. The value should be the entrypoint to be loaded
+ # from neutron.core_plugins namespace.
+ # Type: string
+ NeutronCorePlugin: midonet.neutron.plugin_v1.MidonetPluginV2
+
+ # If True, DHCP provide metadata route to VM.
+ # Type: boolean
+ NeutronEnableIsolatedMetadata: True
+
+ # *********************
+ # End static parameters
+ # *********************
+resource_registry:
+ OS::TripleO::AllNodesExtraConfig: ../../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../../net-config-linux-bridge.yaml
+ OS::TripleO::Services::ComputeNeutronCorePlugin: ../../puppet/services/neutron-compute-plugin-midonet.yaml
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginMidonet
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
diff --git a/environments/neutron-midonet.yaml b/environments/neutron-midonet.yaml
index c120d0b3..64cea2a6 100644
--- a/environments/neutron-midonet.yaml
+++ b/environments/neutron-midonet.yaml
@@ -1,3 +1,7 @@
+# ******************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/networking/neutron-midonet
+# instead.
+# ******************************************************************************
# A Heat environment that can be used to deploy MidoNet Services
resource_registry:
OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
diff --git a/environments/neutron-ml2-ovn-ha.yaml b/environments/neutron-ml2-ovn-ha.yaml
new file mode 100644
index 00000000..c592d576
--- /dev/null
+++ b/environments/neutron-ml2-ovn-ha.yaml
@@ -0,0 +1,24 @@
+# A Heat environment file which can be used to enable OVN
+# extensions, configured via puppet
+resource_registry:
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginML2OVN
+ OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-ovn.yaml
+ OS::TripleO::Services::OVNDBs: ../puppet/services/pacemaker/ovn-dbs.yaml
+# Disabling Neutron services that overlap with OVN
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+
+parameter_defaults:
+ NeutronMechanismDrivers: ovn
+ OVNVifType: ovs
+ OVNNeutronSyncMode: log
+ OVNQosDriver: ovn-qos
+ OVNTunnelEncapType: geneve
+ NeutronEnableDHCPAgent: false
+ NeutronTypeDrivers: 'geneve,vxlan,vlan,flat'
+ NeutronNetworkType: 'geneve'
+ NeutronServicePlugins: 'qos,ovn-router'
+ NeutronVniRanges: ['1:65536', ]
diff --git a/environments/neutron-opendaylight-dpdk.yaml b/environments/neutron-opendaylight-dpdk.yaml
new file mode 100644
index 00000000..9ee4eb7e
--- /dev/null
+++ b/environments/neutron-opendaylight-dpdk.yaml
@@ -0,0 +1,37 @@
+# A Heat environment that can be used to deploy OpenDaylight with L3 DVR and DPDK
+resource_registry:
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+ OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
+ OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+
+parameter_defaults:
+ NeutronEnableForceMetadata: true
+ NeutronMechanismDrivers: 'opendaylight_v2'
+ NeutronServicePlugins: 'odl-router_v2'
+ NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+ ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+ ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+ ## This can be done using ComputeKernelArgs as shown below.
+ ComputeParameters:
+ #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+ ## due to CPU contention of DPDK PMD threads.
+ OvsEnableDpdk: True
+ ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+ #OvsDpdkSocketMemory: "" # Sets the amount of hugepage memory to assign per NUMA node.
+ # It is recommended to use the socket closest to the PCIe slot used for the
+ # desired DPDK NIC. Format should be comma separated per socket string such as:
+ # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+ #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+ #OvsPmdCoreList: "" # List or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ # location to cores on socket, number of hyper-threaded logical cores, and
+ # desired number of PMD threads can all play a role in configuring this setting.
+ # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+ # If using hyperthreading then specify both logical cores that would equal the
+ # physical core. Also, specifying more than one core will trigger multiple PMD
+ # threads to be spawned, which may improve dataplane performance.
+ #NovaVcpuPinSet: "" # Cores to pin Nova instances to. For maximum performance, select cores
+ # on the same NUMA node(s) selected for previous settings.
diff --git a/environments/neutron-ovs-dpdk.yaml b/environments/neutron-ovs-dpdk.yaml
index 6706bccc..ecfd0fea 100644
--- a/environments/neutron-ovs-dpdk.yaml
+++ b/environments/neutron-ovs-dpdk.yaml
@@ -1,18 +1,31 @@
-## A Heat environment that can be used to deploy DPDK with OVS
+# A Heat environment that can be used to deploy DPDK with OVS
+# Deploying DPDK requires enabling hugepages for the overcloud nodes
resource_registry:
OS::TripleO::Services::ComputeNeutronOvsAgent: ../puppet/services/neutron-ovs-dpdk-agent.yaml
parameter_defaults:
- ## NeutronDpdkCoreList and NeutronDpdkMemoryChannels are REQUIRED settings.
- ## Attempting to deploy DPDK without appropriate values will cause deployment to fail or lead to unstable deployments.
- #NeutronDpdkCoreList: ""
- #NeutronDpdkMemoryChannels: ""
-
NeutronDatapathType: "netdev"
NeutronVhostuserSocketDir: "/var/lib/vhost_sockets"
-
- #NeutronDpdkSocketMemory: ""
- #NeutronDpdkDriverType: "vfio-pci"
- #NovaReservedHostMemory: 4096
- #NovaVcpuPinSet: ""
-
+ NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+ ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+ ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+ ## This can be done using ComputeKernelArgs as shown below.
+ #ComputeParameters:
+ #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+ ## due to CPU contention of DPDK PMD threads.
+ ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+ #OvsDpdkSocketMemory: "" # Sets the amount of hugepage memory to assign per NUMA node.
+ # It is recommended to use the socket closest to the PCIe slot used for the
+ # desired DPDK NIC. Format should be comma separated per socket string such as:
+ # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+ #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+ #OvsPmdCoreList: "" # List or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ # location to cores on socket, number of hyper-threaded logical cores, and
+ # desired number of PMD threads can all play a role in configuring this setting.
+ # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+ # If using hyperthreading then specify both logical cores that would equal the
+ # physical core. Also, specifying more than one core will trigger multiple PMD
+ # threads to be spawned, which may improve dataplane performance.
+ #NovaVcpuPinSet: "" # Cores to pin Nova instances to. For maximum performance, select cores
+ # on the same NUMA node(s) selected for previous settings.
diff --git a/environments/nonha-arch.yaml b/environments/nonha-arch.yaml
new file mode 100644
index 00000000..7fdcc100
--- /dev/null
+++ b/environments/nonha-arch.yaml
@@ -0,0 +1,16 @@
+# An environment which creates an Overcloud without the use of pacemaker
+# (i.e. only with keepalived and systemd for all resources)
+resource_registry:
+ OS::TripleO::Tasks::ControllerPreConfig: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPostConfig: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: OS::Heat::None
+
+ OS::TripleO::Services::CinderVolume: ../puppet/services/cinder-volume.yaml
+ OS::TripleO::Services::RabbitMQ: ../puppet/services/rabbitmq.yaml
+ OS::TripleO::Services::HAproxy: ../puppet/services/haproxy.yaml
+ OS::TripleO::Services::Redis: ../puppet/services/database/redis.yaml
+ OS::TripleO::Services::MySQL: ../puppet/services/database/mysql.yaml
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+ OS::TripleO::Services::Pacemaker: OS::Heat::None
+ OS::TripleO::Services::PacemakerRemote: OS::Heat::None
+
diff --git a/environments/overcloud-baremetal.j2.yaml b/environments/overcloud-baremetal.j2.yaml
new file mode 100644
index 00000000..668e28de
--- /dev/null
+++ b/environments/overcloud-baremetal.j2.yaml
@@ -0,0 +1,19 @@
+resource_registry:
+ OS::TripleO::AllNodes::SoftwareConfig: OS::Heat::None
+ OS::TripleO::PostDeploySteps: OS::Heat::None
+ OS::TripleO::DefaultPasswords: OS::Heat::None
+ OS::TripleO::RandomString: OS::Heat::None
+ OS::TripleO::AllNodesDeployment: OS::Heat::None
+
+parameter_defaults:
+ # Deploy no services
+{% for role in roles %}
+ {{role.name}}Services: []
+{% endfor %}
+
+ # Consistent Hostname format
+ ControllerHostnameFormat: overcloud-controller-%index%
+ ComputeHostnameFormat: overcloud-novacompute-%index%
+ ObjectStorageHostnameFormat: overcloud-objectstorage-%index%
+ CephStorageHostnameFormat: overcloud-cephstorage-%index%
+ BlockStorageHostnameFormat: overcloud-blockstorage-%index%
diff --git a/environments/overcloud-services.yaml b/environments/overcloud-services.yaml
new file mode 100644
index 00000000..c409b899
--- /dev/null
+++ b/environments/overcloud-services.yaml
@@ -0,0 +1,7 @@
+parameter_defaults:
+ # Consistent Hostname format
+ ControllerDeployedServerHostnameFormat: overcloud-controller-%index%
+ ComputeDeployedServerHostnameFormat: overcloud-novacompute-%index%
+ ObjectStorageDeployedServerHostnameFormat: overcloud-objectstorage-%index%
+ CephStorageDeployedServerHostnameFormat: overcloud-cephstorage-%index%
+ BlockStorageDeployedServerHostnameFormat: overcloud-blockstorage-%index%
diff --git a/environments/predictable-placement/custom-hostnames.yaml b/environments/predictable-placement/custom-hostnames.yaml
new file mode 100644
index 00000000..0d9d520b
--- /dev/null
+++ b/environments/predictable-placement/custom-hostnames.yaml
@@ -0,0 +1,33 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Custom Hostnames
+# description: |
+# Hostname format for each role
+# Note %index% is translated into the index of the node, e.g 0/1/2 etc
+# and %stackname% is replaced with OS::stack_name in the template below.
+# If you want to use the heat generated names, pass '' (empty string).
+parameter_defaults:
+ # Format for BlockStorage node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Type: string
+ BlockStorageHostnameFormat: '%stackname%-blockstorage-%index%'
+
+ # Format for CephStorage node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Type: string
+ CephStorageHostnameFormat: '%stackname%-cephstorage-%index%'
+
+ # Format for Compute node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Type: string
+ ComputeHostnameFormat: '%stackname%-novacompute-%index%'
+
+ # Format for Controller node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Type: string
+ ControllerHostnameFormat: '%stackname%-controller-%index%'
+
+ # Format for ObjectStorage node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Type: string
+ ObjectStorageHostnameFormat: '%stackname%-objectstorage-%index%'
+
diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml
index 5f8b02ad..2f577c26 100644
--- a/environments/puppet-ceph-external.yaml
+++ b/environments/puppet-ceph-external.yaml
@@ -1,3 +1,7 @@
+# ******************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/storage/ceph-external.yaml
+# instead.
+# ******************************************************************************
# A Heat environment file which can be used to enable the
# use of an externally managed Ceph cluster.
resource_registry:
diff --git a/environments/services-docker/ec2-api.yaml b/environments/services-docker/ec2-api.yaml
new file mode 100644
index 00000000..24cbb032
--- /dev/null
+++ b/environments/services-docker/ec2-api.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
diff --git a/environments/services-docker/manila.yaml b/environments/services-docker/manila.yaml
new file mode 100644
index 00000000..795309f6
--- /dev/null
+++ b/environments/services-docker/manila.yaml
@@ -0,0 +1,3 @@
+resource_registry:
+ OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../../docker/services/manila-scheduler.yaml
diff --git a/environments/services-docker/sahara.yaml b/environments/services-docker/sahara.yaml
new file mode 100644
index 00000000..d0bf9fe1
--- /dev/null
+++ b/environments/services-docker/sahara.yaml
@@ -0,0 +1,3 @@
+resource_registry:
+ OS::TripleO::Services::SaharaApi: ../../docker/services/sahara-api.yaml
+ OS::TripleO::Services::SaharaEngine: ../../docker/services/sahara-engine.yaml
diff --git a/environments/services-docker/sensu-client.yaml b/environments/services-docker/sensu-client.yaml
new file mode 100644
index 00000000..c03104d2
--- /dev/null
+++ b/environments/services-docker/sensu-client.yaml
@@ -0,0 +1,3 @@
+
+resource_registry:
+ OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
diff --git a/environments/services-docker/undercloud-ceilometer.yaml b/environments/services-docker/undercloud-ceilometer.yaml
index 07a61c20..ca55b4d9 100644
--- a/environments/services-docker/undercloud-ceilometer.yaml
+++ b/environments/services-docker/undercloud-ceilometer.yaml
@@ -1,3 +1,4 @@
resource_registry:
OS::TripleO::Services::UndercloudCeilometerAgentCentral: ../../docker/services/ceilometer-agent-central.yaml
OS::TripleO::Services::UndercloudCeilometerAgentNotification: ../../docker/services/ceilometer-agent-notification.yaml
+ OS::TripleO::Services::UndercloudCeilometerAgentIpmi: ../../docker/services/ceilometer-agent-ipmi.yaml
diff --git a/environments/services/ironic.yaml b/environments/services/ironic.yaml
index b1317382..b81b0269 100644
--- a/environments/services/ironic.yaml
+++ b/environments/services/ironic.yaml
@@ -1,5 +1,6 @@
resource_registry:
OS::TripleO::Services::IronicApi: ../../puppet/services/ironic-api.yaml
OS::TripleO::Services::IronicConductor: ../../puppet/services/ironic-conductor.yaml
- OS::TripleO::Services::IronicPxe: ../../puppet/services/ironic-pxe.yaml
OS::TripleO::Services::NovaIronic: ../../puppet/services/nova-ironic.yaml
+parameter_defaults:
+ NovaSchedulerDiscoverHostsInCellsInterval: 15
diff --git a/environments/ssl/enable-tls.yaml b/environments/ssl/enable-tls.yaml
new file mode 100644
index 00000000..c8ed2bd2
--- /dev/null
+++ b/environments/ssl/enable-tls.yaml
@@ -0,0 +1,41 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable SSL on OpenStack Public Endpoints
+# description: |
+# Use this environment to pass in certificates for SSL deployments.
+# For these values to take effect, one of the tls-endpoints-*.yaml environments
+# must also be used.
+parameter_defaults:
+ # The content of the SSL certificate (without Key) in PEM format.
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ SSLCertificate: |
+ The contents of your certificate go here
+
+ # The content of an SSL intermediate CA certificate in PEM format.
+ # Type: string
+ SSLIntermediateCertificate: ''
+
+ # The content of the SSL Key in PEM format.
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ SSLKey: |
+ The contents of the private key go here
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # The filepath of the certificate as it will be stored in the controller.
+ # Type: string
+ DeployedSSLCertificatePath: /etc/pki/tls/private/overcloud_endpoint.pem
+
+ # *********************
+ # End static parameters
+ # *********************
+resource_registry:
+ OS::TripleO::NodeTLSData: ../../puppet/extraconfig/tls/tls-cert-inject.yaml
diff --git a/environments/ssl/inject-trust-anchor-hiera.yaml b/environments/ssl/inject-trust-anchor-hiera.yaml
new file mode 100644
index 00000000..db3f2677
--- /dev/null
+++ b/environments/ssl/inject-trust-anchor-hiera.yaml
@@ -0,0 +1,22 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Inject SSL Trust Anchor on Overcloud Nodes
+# description: |
+# When using an SSL certificate signed by a CA that is not in the default
+# list of CAs, this environment allows adding a custom CA certificate to
+# the overcloud nodes.
+parameter_defaults:
+ # Map containing the CA certs and information needed for deploying them.
+ # Type: json
+ CAMap:
+ first-ca-name:
+ content: |
+ The content of the CA cert goes here
+ second-ca-name:
+ content: |
+ The content of the CA cert goes here
+
diff --git a/environments/ssl/inject-trust-anchor.yaml b/environments/ssl/inject-trust-anchor.yaml
new file mode 100644
index 00000000..521a4191
--- /dev/null
+++ b/environments/ssl/inject-trust-anchor.yaml
@@ -0,0 +1,20 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Inject SSL Trust Anchor on Overcloud Nodes
+# description: |
+# When using an SSL certificate signed by a CA that is not in the default
+# list of CAs, this environment allows adding a custom CA certificate to
+# the overcloud nodes.
+parameter_defaults:
+ # The content of a CA's SSL certificate file in PEM format. This is evaluated on the client side.
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ SSLRootCertificate: |
+ The contents of your certificate go here
+
+resource_registry:
+ OS::TripleO::NodeTLSCAData: ../../puppet/extraconfig/tls/ca-inject.yaml
diff --git a/environments/ssl/tls-endpoints-public-dns.yaml b/environments/ssl/tls-endpoints-public-dns.yaml
new file mode 100644
index 00000000..216afece
--- /dev/null
+++ b/environments/ssl/tls-endpoints-public-dns.yaml
@@ -0,0 +1,131 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy Public SSL Endpoints as DNS Names
+# description: |
+# Use this environment when deploying an SSL-enabled overcloud where the public
+# endpoint is a DNS name.
+parameter_defaults:
+ # Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.
+ # Type: json
+ EndpointMap:
+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+ CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+ GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+ GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+ HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+ IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+ KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+ KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+ ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+ MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+ NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+ NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+ NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+ SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+
diff --git a/environments/ssl/tls-endpoints-public-ip.yaml b/environments/ssl/tls-endpoints-public-ip.yaml
new file mode 100644
index 00000000..d216ab7f
--- /dev/null
+++ b/environments/ssl/tls-endpoints-public-ip.yaml
@@ -0,0 +1,131 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy Public SSL Endpoints as IP Addresses
+# description: |
+# Use this environment when deploying an SSL-enabled overcloud where the public
+# endpoint is an IP address.
+parameter_defaults:
+ # Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.
+ # Type: json
+ EndpointMap:
+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'IP_ADDRESS'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'IP_ADDRESS'}
+ CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+ CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'}
+ CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'IP_ADDRESS'}
+ GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'IP_ADDRESS'}
+ GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'IP_ADDRESS'}
+ HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'IP_ADDRESS'}
+ HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'IP_ADDRESS'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'IP_ADDRESS'}
+ IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'IP_ADDRESS'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'IP_ADDRESS'}
+ KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+ KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'IP_ADDRESS'}
+ ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'IP_ADDRESS'}
+ MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+ NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'IP_ADDRESS'}
+ NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'IP_ADDRESS'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'IP_ADDRESS'}
+ NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'IP_ADDRESS'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
+ PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
+ SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
+ SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+ TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+ ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'IP_ADDRESS'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'IP_ADDRESS'}
+
diff --git a/environments/ssl/tls-everywhere-endpoints-dns.yaml b/environments/ssl/tls-everywhere-endpoints-dns.yaml
new file mode 100644
index 00000000..63157ddd
--- /dev/null
+++ b/environments/ssl/tls-everywhere-endpoints-dns.yaml
@@ -0,0 +1,131 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy All SSL Endpoints as DNS Names
+# description: |
+# Use this environment when deploying an overcloud where all the endpoints are
+# DNS names and there's TLS in all endpoint types.
+parameter_defaults:
+ # Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.
+ # Type: json
+ EndpointMap:
+ AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+ AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+ CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+ CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ CephRgwInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ CinderAdmin: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+ CinderInternal: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ CongressAdmin: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+ CongressInternal: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiInternal: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+ GlanceAdmin: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+ GlanceInternal: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+ GnocchiAdmin: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+ GnocchiInternal: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+ HeatAdmin: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+ HeatInternal: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HeatCfnAdmin: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+ HeatCfnInternal: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+ IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+ IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+ KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
+ KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+ ManilaAdmin: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+ ManilaInternal: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+ MistralAdmin: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+ MistralInternal: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'CLOUDNAME'}
+ NeutronAdmin: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+ NeutronInternal: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+ NovaAdmin: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+ NovaInternal: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementInternal: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+ NovaVNCProxyAdmin: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+ NovaVNCProxyInternal: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+ PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+ SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+ SwiftAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ SwiftInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ TackerAdmin: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+ TackerInternal: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ ZaqarAdmin: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+ ZaqarInternal: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketInternal: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+
diff --git a/environments/storage/cinder-netapp-config.yaml b/environments/storage/cinder-netapp-config.yaml
new file mode 100644
index 00000000..4cdba09b
--- /dev/null
+++ b/environments/storage/cinder-netapp-config.yaml
@@ -0,0 +1,119 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable the Cinder NetApp Backend
+# description: |
+# A Heat environment file which can be used to enable a
+# a Cinder NetApp backend, configured via puppet
+parameter_defaults:
+ #
+ # Type: string
+ CinderNetappBackendName: tripleo_netapp
+
+ #
+ # Type: string
+ CinderNetappControllerIps: ''
+
+ #
+ # Type: string
+ CinderNetappCopyOffloadToolPath: ''
+
+ #
+ # Type: string
+ CinderNetappEseriesHostType: linux_dm_mp
+
+ #
+ # Type: string
+ CinderNetappHostType: ''
+
+ #
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ CinderNetappLogin: <None>
+
+ #
+ # Type: string
+ CinderNetappNfsMountOptions: ''
+
+ #
+ # Type: string
+ CinderNetappNfsShares: ''
+
+ #
+ # Type: string
+ CinderNetappNfsSharesConfig: /etc/cinder/shares.conf
+
+ #
+ # Type: string
+ CinderNetappPartnerBackendName: ''
+
+ #
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ CinderNetappPassword: <None>
+
+ #
+ # Type: string
+ CinderNetappSaPassword: ''
+
+ #
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ CinderNetappServerHostname: <None>
+
+ #
+ # Type: string
+ CinderNetappServerPort: 80
+
+ #
+ # Type: string
+ CinderNetappSizeMultiplier: 1.2
+
+ #
+ # Type: string
+ CinderNetappStorageFamily: ontap_cluster
+
+ #
+ # Type: string
+ CinderNetappStoragePools: ''
+
+ #
+ # Type: string
+ CinderNetappStorageProtocol: nfs
+
+ #
+ # Type: string
+ CinderNetappTransportType: http
+
+ #
+ # Type: string
+ CinderNetappVfiler: ''
+
+ #
+ # Type: string
+ CinderNetappVolumeList: ''
+
+ #
+ # Type: string
+ CinderNetappVserver: ''
+
+ #
+ # Type: string
+ CinderNetappWebservicePath: /devmgr/v2
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ #
+ # Type: boolean
+ CinderEnableNetappBackend: True
+
+ # *********************
+ # End static parameters
+ # *********************
+resource_registry:
+ OS::TripleO::ControllerExtraConfigPre: ../../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
diff --git a/environments/storage/cinder-nfs.yaml b/environments/storage/cinder-nfs.yaml
new file mode 100644
index 00000000..2de3e78c
--- /dev/null
+++ b/environments/storage/cinder-nfs.yaml
@@ -0,0 +1,27 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable Cinder NFS Backend
+# description: |
+# Configure and include this environment to enable the use of an NFS
+# share as the backend for Cinder.
+parameter_defaults:
+ # Whether to enable or not the Iscsi backend for Cinder
+ # Type: boolean
+ CinderEnableIscsiBackend: False
+
+ # Whether to enable or not the NFS backend for Cinder
+ # Type: boolean
+ CinderEnableNfsBackend: True
+
+ # Mount options for NFS mounts used by Cinder NFS backend. Effective when CinderEnableNfsBackend is true.
+ # Type: string
+ CinderNfsMountOptions: ''
+
+ # NFS servers used by Cinder NFS backend. Effective when CinderEnableNfsBackend is true.
+ # Type: comma_delimited_list
+ CinderNfsServers: 192.168.122.1:/export/cinder
+
diff --git a/environments/storage/enable-ceph.yaml b/environments/storage/enable-ceph.yaml
new file mode 100644
index 00000000..c629f74b
--- /dev/null
+++ b/environments/storage/enable-ceph.yaml
@@ -0,0 +1,35 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable Ceph Storage Backend
+# description: |
+# Include this environment to enable Ceph as the backend for
+# Cinder, Nova, Gnocchi, and Glance.
+parameter_defaults:
+ # The short name of the Cinder Backup backend to use.
+ # Type: string
+ CinderBackupBackend: rbd
+
+ # Whether to enable or not the Iscsi backend for Cinder
+ # Type: boolean
+ CinderEnableIscsiBackend: False
+
+ # Whether to enable or not the Rbd backend for Cinder
+ # Type: boolean
+ CinderEnableRbdBackend: True
+
+ # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # Type: string
+ GlanceBackend: rbd
+
+ # The short name of the Gnocchi backend to use. Should be one of swift, rbd, or file
+ # Type: string
+ GnocchiBackend: rbd
+
+ # Whether to enable or not the Rbd backend for Nova
+ # Type: boolean
+ NovaEnableRbdBackend: True
+
diff --git a/environments/storage/external-ceph.yaml b/environments/storage/external-ceph.yaml
new file mode 100644
index 00000000..f1c9d516
--- /dev/null
+++ b/environments/storage/external-ceph.yaml
@@ -0,0 +1,78 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy Using an External Ceph Cluster
+# description: |
+# A Heat environment file which can be used to enable the
+# use of an externally managed Ceph cluster.
+parameter_defaults:
+ # The Ceph admin client key. Can be created with ceph-authtool --gen-print-key.
+ # Type: string
+ CephAdminKey: ''
+
+ # The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ CephClientKey: <None>
+
+ #
+ # Type: string
+ CephClientUserName: openstack
+
+ # The Ceph cluster FSID. Must be a UUID.
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ CephClusterFSID: <None>
+
+ # List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments.
+ # Type: string
+ CephExternalMonHost: ''
+
+ # Whether to enable or not the Iscsi backend for Cinder
+ # Type: boolean
+ CinderEnableIscsiBackend: False
+
+ # Whether to enable or not the Rbd backend for Cinder
+ # Type: boolean
+ CinderEnableRbdBackend: True
+
+ #
+ # Type: string
+ CinderRbdPoolName: volumes
+
+ # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # Type: string
+ GlanceBackend: rbd
+
+ #
+ # Type: string
+ GlanceRbdPoolName: images
+
+ # The short name of the Gnocchi backend to use. Should be one of swift, rbd, or file
+ # Type: string
+ GnocchiBackend: rbd
+
+ #
+ # Type: string
+ GnocchiRbdPoolName: metrics
+
+ # Whether to enable or not the Rbd backend for Nova
+ # Type: boolean
+ NovaEnableRbdBackend: True
+
+ #
+ # Type: string
+ NovaRbdPoolName: vms
+
+ # The default features enabled when creating a block device image. Only applies to format 2 images. Set to '1' for Jewel clients using older Ceph servers.
+ # Type: string
+ RbdDefaultFeatures: ''
+
+resource_registry:
+ OS::TripleO::Services::CephClient: OS::Heat::None
+ OS::TripleO::Services::CephExternal: ../../puppet/services/ceph-external.yaml
+ OS::TripleO::Services::CephMon: OS::Heat::None
+ OS::TripleO::Services::CephOSD: OS::Heat::None
diff --git a/environments/storage/glance-nfs.yaml b/environments/storage/glance-nfs.yaml
new file mode 100644
index 00000000..3c139306
--- /dev/null
+++ b/environments/storage/glance-nfs.yaml
@@ -0,0 +1,34 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable Glance NFS Backend
+# description: |
+# Configure and include this environment to enable the use of an NFS
+# share as the backend for Glance.
+parameter_defaults:
+ # NFS mount options for image storage (when GlanceNfsEnabled is true)
+ # Type: string
+ GlanceNfsOptions: intr,context=system_u:object_r:glance_var_lib_t:s0
+
+ # NFS share to mount for image storage (when GlanceNfsEnabled is true)
+ # Type: string
+ GlanceNfsShare: ''
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # Type: string
+ GlanceBackend: file
+
+ # When using GlanceBackend 'file', mount NFS share for image storage.
+ # Type: boolean
+ GlanceNfsEnabled: True
+
+ # *********************
+ # End static parameters
+ # *********************
diff --git a/environments/tls-endpoints-public-dns.yaml b/environments/tls-endpoints-public-dns.yaml
index 44432210..83b32495 100644
--- a/environments/tls-endpoints-public-dns.yaml
+++ b/environments/tls-endpoints-public-dns.yaml
@@ -1,3 +1,7 @@
+# *************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/tls-endpoints-public-dns.yaml
+# instead.
+# *************************************************************************************
# Use this environment when deploying an SSL-enabled overcloud where the public
# endpoint is a DNS name.
parameter_defaults:
@@ -100,9 +104,9 @@ parameter_defaults:
OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
- PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
- PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
- PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'}
SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
diff --git a/environments/tls-endpoints-public-ip.yaml b/environments/tls-endpoints-public-ip.yaml
index 5ac2918b..8e502972 100644
--- a/environments/tls-endpoints-public-ip.yaml
+++ b/environments/tls-endpoints-public-ip.yaml
@@ -1,3 +1,7 @@
+# *************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/tls-endpoints-public-ip.yaml
+# instead.
+# *************************************************************************************
# Use this environment when deploying an SSL-enabled overcloud where the public
# endpoint is an IP address.
parameter_defaults:
@@ -100,9 +104,9 @@ parameter_defaults:
OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
- PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
- PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
- PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
+ PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13977', host: 'IP_ADDRESS'}
SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
diff --git a/environments/tls-everywhere-endpoints-dns.yaml b/environments/tls-everywhere-endpoints-dns.yaml
index 865ed4c3..84cabf10 100644
--- a/environments/tls-everywhere-endpoints-dns.yaml
+++ b/environments/tls-everywhere-endpoints-dns.yaml
@@ -72,8 +72,8 @@ parameter_defaults:
IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
- IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
- IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'https', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorInternal: {protocol: 'https', port: '5050', host: 'CLOUDNAME'}
IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
@@ -100,9 +100,9 @@ parameter_defaults:
OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
- PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
- PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
- PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
+ PankoInternal: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
+ PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'}
SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
diff --git a/environments/undercloud.yaml b/environments/undercloud.yaml
index 7a2716da..559d81df 100644
--- a/environments/undercloud.yaml
+++ b/environments/undercloud.yaml
@@ -18,3 +18,5 @@ parameter_defaults:
HeatConvergenceEngine: false
HeatMaxResourcesPerStack: -1
HeatMaxJsonBodySize: 2097152
+ IronicInspectorInterface: br-ctlplane
+ IronicInspectorIpRange: '192.168.24.100,192.168.24.200'
diff --git a/extraconfig/pre_network/ansible_host_config.ansible b/extraconfig/pre_network/ansible_host_config.yaml
index c126c1a1..2d862613 100644
--- a/extraconfig/pre_network/ansible_host_config.ansible
+++ b/extraconfig/pre_network/ansible_host_config.yaml
@@ -28,10 +28,10 @@
lineinfile:
dest: /etc/tuned/cpu-partitioning-variables.conf
regexp: '^isolated_cores=.*'
- line: 'isolated_cores={{ _HOST_CPUS_LIST_ }}'
- when: _HOST_CPUS_LIST_|default("") != ""
+ line: 'isolated_cores={{ _TUNED_CORES_ }}'
+ when: _TUNED_CORES_|default("") != ""
- - name: Tune-d provile activation
+ - name: Tune-d profile activation
shell: tuned-adm profile {{ _TUNED_PROFILE_NAME_ }}
become: true
when: _TUNED_PROFILE_NAME_|default("") != ""
@@ -52,7 +52,7 @@
when:
- item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') != "lo"
# This condition will list all the interfaces except the one with valid IP (which is Provisioning network at this stage)
- # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4']['address'] is undefined
- - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4']['address'] is undefined
+ # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4'] is undefined
+ - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4'] is undefined
with_items:
- "{{ ifcfg_files.files }}"
diff --git a/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml b/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
index 658fea77..41d8f4f6 100644
--- a/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
+++ b/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
@@ -7,6 +7,9 @@ description: >
parameters:
server:
type: string
+ # Deprecated Parameters, these configuration are deprecated in favor or role-specific parameters.
+ # Use: extraconfig/pre_network/host_config_and_reboot.yaml.
+ # Deprecated in Pike and will be removed in Queens.
{{role}}KernelArgs:
type: string
default: ""
@@ -17,6 +20,13 @@ parameters:
type: string
default: ""
+parameter_group:
+ - label: deprecated
+ parameters:
+ - {{role}}KernelArgs
+ - {{role}}TunedProfileName
+ - {{role}}HostCpusList
+
conditions:
param_exists:
or:
diff --git a/extraconfig/pre_network/host_config_and_reboot.yaml b/extraconfig/pre_network/host_config_and_reboot.yaml
new file mode 100644
index 00000000..009a0879
--- /dev/null
+++ b/extraconfig/pre_network/host_config_and_reboot.yaml
@@ -0,0 +1,246 @@
+heat_template_version: pike
+
+description: >
+ All configurations which require reboot should be initiated via PreNetworkConfig. After
+ this configuration is completed, the corresponding node will be rebooted.
+
+parameters:
+ server:
+ type: string
+ RoleParameters:
+ type: json
+ description: Role Specific parameters
+ default: {}
+ ServiceNames:
+ type: comma_delimited_list
+ default: []
+ IsolCpusList:
+ default: "0"
+ description: List of cores to be isolated by tuned
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]+"
+ OvsEnableDpdk:
+ default: false
+ description: Whether or not to configure enable DPDK in OVS
+ type: boolean
+ OvsDpdkCoreList:
+ description: >
+ List of cores to be used for DPDK lcore threads. Note, these threads
+ are used by the OVS control path for validator and handling functions.
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ""
+ OvsDpdkMemoryChannels:
+ description: Number of memory channels per socket to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ""
+ OvsDpdkSocketMemory:
+ default: ""
+ description: >
+ Sets the amount of hugepage memory to assign per NUMA node. It is
+ recommended to use the socket closest to the PCIe slot used for the
+ desired DPDK NIC. The format should be in "<socket 0 mem>, <socket 1
+ mem>, <socket n mem>", where the value is specified in MB. For example:
+ "1024,0".
+ type: string
+ OvsDpdkDriverType:
+ default: "vfio-pci"
+ description: >
+ DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+ this UIO/PMD driver.
+ type: string
+ OvsPmdCoreList:
+ description: >
+ A list or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ location to cores on socket, number of hyper-threaded logical cores, and
+ desired number of PMD threads can all play a role in configuring this
+ setting. These cores should be on the same socket where
+ OvsDpdkSocketMemory is assigned. If using hyperthreading then specify
+ both logical cores that would equal the physical core. Also, specifying
+ more than one core will trigger multiple PMD threads to be spawned which
+ may improve dataplane performance.
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ type: string
+ default: ""
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Queens cycle.
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]+"
+ default: '0'
+ NeutronDpdkCoreList:
+ description: List of cores to be used for DPDK Poll Mode Driver
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkMemoryChannels:
+ description: Number of memory channels to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ''
+ NeutronDpdkSocketMemory:
+ default: ''
+ description: Memory allocated for each socket
+ type: string
+ NeutronDpdkDriverType:
+ default: "vfio-pci"
+ description: DPDK Driver type
+ type: string
+
+conditions:
+ is_host_config_required: {not: {equals: [{get_param: [RoleParameters, KernelArgs]}, ""]}}
+ # YAQL is enabled in conditions with https://review.openstack.org/#/c/467506/
+ is_dpdk_config_required:
+ or:
+ - yaql:
+ expression: $.data.service_names.contains('neutron_ovs_dpdk_agent')
+ data:
+ service_names: {get_param: ServiceNames}
+ - {get_param: OvsEnableDpdk}
+ - {get_param: [RoleParameters, OvsEnableDpdk]}
+ is_reboot_config_required:
+ or:
+ - is_host_config_required
+ - is_dpdk_config_required
+ l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+ pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+ mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+ socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+ driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+ isol_cpus_empty: {equals: [{get_param: IsolCpusList}, '0']}
+
+resources:
+ RoleParametersValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - map_replace:
+ - IsolCpusList: IsolCpusList
+ OvsDpdkCoreList: OvsDpdkCoreList
+ OvsDpdkMemoryChannels: OvsDpdkMemoryChannels
+ OvsDpdkSocketMemory: OvsDpdkSocketMemory
+ OvsDpdkDriverType: OvsDpdkDriverType
+ OvsPmdCoreList: OvsDpdkCoreList
+ - values: {get_param: [RoleParameters]}
+ - values:
+ IsolCpusList: {if: [isol_cpus_empty, {get_param: HostCpusList}, {get_param: IsolCpusList}]}
+ OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+ OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+ OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+ OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+ OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
+ HostParametersConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: is_host_config_required
+ properties:
+ group: ansible
+ inputs:
+ - name: _KERNEL_ARGS_
+ - name: _TUNED_PROFILE_NAME_
+ - name: _TUNED_CORES_
+ outputs:
+ - name: result
+ config:
+ get_file: ansible_host_config.yaml
+
+ HostParametersDeployment:
+ type: OS::Heat::SoftwareDeployment
+ condition: is_host_config_required
+ properties:
+ name: HostParametersDeployment
+ server: {get_param: server}
+ config: {get_resource: HostParametersConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+ input_values:
+ _KERNEL_ARGS_: {get_param: [RoleParameters, KernelArgs]}
+ _TUNED_PROFILE_NAME_: {get_param: [RoleParameters, TunedProfileName]}
+ _TUNED_CORES_: {get_param: [RoleParameters, IsolCpusList]}
+
+ EnableDpdkConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: is_dpdk_config_required
+ properties:
+ group: script
+ config:
+ str_replace:
+ template: |
+ #!/bin/bash
+ set -x
+ # DO NOT use --detailed-exitcodes
+ puppet apply --logdest console \
+ --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+ -e '
+ class {"vswitch::dpdk":
+ host_core_list => "$HOST_CORES",
+ pmd_core_list => "$PMD_CORES",
+ memory_channels => "$MEMORY_CHANNELS",
+ socket_mem => "$SOCKET_MEMORY",
+ }
+ '
+ params:
+ $HOST_CORES: {get_attr: [RoleParametersValue, value, OvsDpdkCoreList]}
+ $PMD_CORES: {get_attr: [RoleParametersValue, value, OvsPmdCoreList]}
+ $MEMORY_CHANNELS: {get_attr: [RoleParametersValue, value, OvsDpdkMemoryChannels]}
+ $SOCKET_MEMORY: {get_attr: [RoleParametersValue, value, OvsDpdkSocketMemory]}
+
+ EnableDpdkDeployment:
+ type: OS::Heat::SoftwareDeployment
+ condition: is_dpdk_config_required
+ properties:
+ name: EnableDpdkDeployment
+ server: {get_param: server}
+ config: {get_resource: EnableDpdkConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+
+ RebootConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: is_reboot_config_required
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ # Stop os-collect-config to avoid any race collecting another
+ # deployment before reboot happens
+ systemctl stop os-collect-config.service
+ /sbin/reboot
+
+ RebootDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: HostParametersDeployment
+ condition: is_reboot_config_required
+ properties:
+ name: RebootDeployment
+ server: {get_param: server}
+ config: {get_resource: RebootConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+ signal_transport: NO_SIGNAL
+
+outputs:
+ result:
+ condition: is_host_config_required
+ value:
+ get_attr: [HostParametersDeployment, result]
+ stdout:
+ condition: is_host_config_required
+ value:
+ get_attr: [HostParametersDeployment, deploy_stdout]
+ stderr:
+ condition: is_host_config_required
+ value:
+ get_attr: [HostParametersDeployment, deploy_stderr]
+ status_code:
+ condition: is_host_config_required
+ value:
+ get_attr: [HostParametersDeployment, deploy_status_code]
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index f17a073a..d1dd5d1d 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -11,7 +11,7 @@ function log_debug {
}
function is_bootstrap_node {
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid | tr '[:upper:]' '[:lower:]')" = "$(facter hostname | tr '[:upper:]' '[:lower:]')" ]; then
log_debug "Node is bootstrap"
echo "true"
fi
diff --git a/extraconfig/tasks/post_puppet_pacemaker.j2.yaml b/extraconfig/tasks/post_puppet_pacemaker.j2.yaml
index 7fc258d6..6bf5afb0 100644
--- a/extraconfig/tasks/post_puppet_pacemaker.j2.yaml
+++ b/extraconfig/tasks/post_puppet_pacemaker.j2.yaml
@@ -10,8 +10,8 @@ parameters:
resources:
-{%- for role in roles -%}
-{% if "controller" in role.tags %}
+{%- for role in roles %}
+ {%- if 'controller' in role.tags %}
{{role.name}}PostPuppetMaintenanceModeConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -37,6 +37,6 @@ resources:
properties:
servers: {get_param: [servers, {{role.name}}]}
input_values: {get_param: input_values}
-{%- endif -%}
-{% endfor %}
+ {%- endif %}
+{%- endfor %}
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index cb9cc5b1..0c4a7928 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -49,7 +49,7 @@ fi
# of packages to update (the check for -z "$update_identifier" guarantees that this
# is run only on overcloud stack update -i)
if [[ "$pacemaker_status" == "active" && \
- "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name)" == "$(facter hostname)" ]] ; then \
+ "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name | tr '[:upper:]' '[:lower:]')" == "$(facter hostname | tr '[:upper:]' '[:lower:]')" ]] ; then \
# OCF scripts don't cope with -eu
echo "Verifying if we need to fix up any IPv6 VIPs"
set +eu
diff --git a/net-config-bond.yaml b/net-config-bond.yaml
index f92f9a13..95b47455 100644
--- a/net-config-bond.yaml
+++ b/net-config-bond.yaml
@@ -4,19 +4,14 @@ description: >
parameters:
BondInterfaceOvsOptions:
default: ''
- description: 'The ovs_options string for the bond interface. Set things like
-
- lacp=active and/or bond_mode=balance-slb using this option.
-
- '
+ description: The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
- description: 'The balance-tcp bond mode is known to cause packet loss and
-
+ description: The balance-tcp bond mode is known to cause packet loss and
should not be used in BondInterfaceOvsOptions.
-
- '
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
diff --git a/network/config/bond-with-vlans/ceph-storage.yaml b/network/config/bond-with-vlans/ceph-storage.yaml
index 97177c41..9683456a 100644
--- a/network/config/bond-with-vlans/ceph-storage.yaml
+++ b/network/config/bond-with-vlans/ceph-storage.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/cinder-storage.yaml b/network/config/bond-with-vlans/cinder-storage.yaml
index 5456c2cc..3ad6d653 100644
--- a/network/config/bond-with-vlans/cinder-storage.yaml
+++ b/network/config/bond-with-vlans/cinder-storage.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/compute-dpdk.yaml b/network/config/bond-with-vlans/compute-dpdk.yaml
index 607d346f..095c4973 100644
--- a/network/config/bond-with-vlans/compute-dpdk.yaml
+++ b/network/config/bond-with-vlans/compute-dpdk.yaml
@@ -32,8 +32,9 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
ExternalNetworkVlanID:
default: 10
diff --git a/network/config/bond-with-vlans/compute.yaml b/network/config/bond-with-vlans/compute.yaml
index 448d4e2a..8fff1378 100644
--- a/network/config/bond-with-vlans/compute.yaml
+++ b/network/config/bond-with-vlans/compute.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/controller-no-external.yaml b/network/config/bond-with-vlans/controller-no-external.yaml
index 8ac5cda7..4901f94d 100644
--- a/network/config/bond-with-vlans/controller-no-external.yaml
+++ b/network/config/bond-with-vlans/controller-no-external.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/controller-v6.yaml b/network/config/bond-with-vlans/controller-v6.yaml
index 25796484..33c6fa65 100644
--- a/network/config/bond-with-vlans/controller-v6.yaml
+++ b/network/config/bond-with-vlans/controller-v6.yaml
@@ -34,16 +34,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: bond_mode=active-backup
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/controller.yaml b/network/config/bond-with-vlans/controller.yaml
index e4b30120..100821b7 100644
--- a/network/config/bond-with-vlans/controller.yaml
+++ b/network/config/bond-with-vlans/controller.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: bond_mode=active-backup
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/swift-storage.yaml b/network/config/bond-with-vlans/swift-storage.yaml
index 6371ceb5..0ede081f 100644
--- a/network/config/bond-with-vlans/swift-storage.yaml
+++ b/network/config/bond-with-vlans/swift-storage.yaml
@@ -32,16 +32,14 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
- description: 'The balance-tcp bond mode is known to cause packet loss and
-
+ description: The balance-tcp bond mode is known to cause packet loss and
should not be used in BondInterfaceOvsOptions.
-
- '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
index f5f2b97e..ece40085 100644
--- a/network/endpoints/endpoint_data.yaml
+++ b/network/endpoints/endpoint_data.yaml
@@ -134,7 +134,7 @@ Panko:
net_param: Public
Admin:
net_param: PankoApi
- port: 8779
+ port: 8977
Cinder:
Internal:
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index 4509bca9..42d1fbd0 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -117,9 +117,9 @@ parameters:
OctaviaAdmin: {protocol: http, port: '9876', host: IP_ADDRESS}
OctaviaInternal: {protocol: http, port: '9876', host: IP_ADDRESS}
OctaviaPublic: {protocol: http, port: '9876', host: IP_ADDRESS}
- PankoAdmin: {protocol: http, port: '8779', host: IP_ADDRESS}
- PankoInternal: {protocol: http, port: '8779', host: IP_ADDRESS}
- PankoPublic: {protocol: http, port: '8779', host: IP_ADDRESS}
+ PankoAdmin: {protocol: http, port: '8977', host: IP_ADDRESS}
+ PankoInternal: {protocol: http, port: '8977', host: IP_ADDRESS}
+ PankoPublic: {protocol: http, port: '8977', host: IP_ADDRESS}
SaharaAdmin: {protocol: http, port: '8386', host: IP_ADDRESS}
SaharaInternal: {protocol: http, port: '8386', host: IP_ADDRESS}
SaharaPublic: {protocol: http, port: '8386', host: IP_ADDRESS}
diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml
index 386520cf..bb54ca62 100644
--- a/network/ports/ctlplane_vip.yaml
+++ b/network/ports/ctlplane_vip.yaml
@@ -34,7 +34,7 @@ parameters:
resources:
VipPort:
- type: OS::Neutron::Port
+ type: OS::TripleO::Network::Ports::ControlPlaneVipPort
properties:
network: {get_param: ControlPlaneNetwork}
name: {get_param: PortName}
diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml
index c3734afe..a9111ed9 100644
--- a/network/ports/net_ip_list_map.yaml
+++ b/network/ports/net_ip_list_map.yaml
@@ -133,6 +133,20 @@ outputs:
SERVICE: {get_attr: [EnabledServicesValue, value]}
- values: {get_param: ServiceNetMap}
- values: {get_attr: [NetIpMapValue, value]}
+ ctlplane_service_ips:
+ description: >
+ Map of enabled services to a list of their ctlplane IP addresses
+ value:
+ yaql:
+ expression: dict($.data.map.items().where(len($[1]) > 0))
+ data:
+ map:
+ map_merge:
+ repeat:
+ template:
+ SERVICE_ctlplane_node_ips: {get_param: ControlPlaneIpList}
+ for_each:
+ SERVICE: {get_attr: [EnabledServicesValue, value]}
service_hostnames:
description: >
Map of enabled services to a list of hostnames where they're running
diff --git a/network/scripts/run-os-net-config.sh b/network/scripts/run-os-net-config.sh
index 8fe2d270..864da24b 100755
--- a/network/scripts/run-os-net-config.sh
+++ b/network/scripts/run-os-net-config.sh
@@ -110,7 +110,7 @@ EOF_CAT
}
if [ -n '$network_config' ]; then
- if [ -z "${disable_configure_safe_defaults:-''}" ]; then
+ if [ -z "${disable_configure_safe_defaults:-}" ]; then
trap configure_safe_defaults EXIT
fi
diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml
index d3d8cbdb..ba8e5568 100644
--- a/network/service_net_map.j2.yaml
+++ b/network/service_net_map.j2.yaml
@@ -42,7 +42,7 @@ parameters:
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
CongressApiNetwork: internal_api
- GlanceApiNetwork: storage
+ GlanceApiNetwork: internal_api
IronicApiNetwork: ctlplane
IronicNetwork: ctlplane
IronicInspectorNetwork: ctlplane
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index d4c301bb..b1a35293 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -4,10 +4,12 @@ resource_registry:
OS::TripleO::PostDeploySteps: puppet/post.yaml
OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
+ OS::TripleO::AllNodesDeployment: OS::Heat::StructuredDeployments
OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
+ OS::TripleO::RandomString: OS::Heat::RandomString
# Tasks (for internal TripleO usage)
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
@@ -106,7 +108,7 @@ resource_registry:
OS::TripleO::UpgradeConfig: puppet/upgrade_config.yaml
# services
- OS::TripleO::Services: puppet/services/services.yaml
+ OS::TripleO::Services: services.yaml
OS::TripleO::Services::Apache: puppet/services/apache.yaml
OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
OS::TripleO::Services::CephMds: OS::Heat::None
@@ -200,6 +202,7 @@ resource_registry:
# Undercloud Telemetry services
OS::TripleO::Services::UndercloudCeilometerAgentCentral: OS::Heat::None
OS::TripleO::Services::UndercloudCeilometerAgentNotification: OS::Heat::None
+ OS::TripleO::Services::UndercloudCeilometerAgentIpmi: OS::Heat::None
#Gnocchi services
OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml
@@ -236,6 +239,7 @@ resource_registry:
OS::TripleO::Services::MistralExecutor: OS::Heat::None
OS::TripleO::Services::IronicApi: OS::Heat::None
OS::TripleO::Services::IronicConductor: OS::Heat::None
+ OS::TripleO::Services::IronicInspector: OS::Heat::None
OS::TripleO::Services::NovaIronic: OS::Heat::None
OS::TripleO::Services::TripleoPackages: puppet/services/tripleo-packages.yaml
OS::TripleO::Services::TripleoFirewall: puppet/services/tripleo-firewall.yaml
@@ -263,6 +267,7 @@ resource_registry:
OS::TripleO::Services::NeutronVppAgent: OS::Heat::None
OS::TripleO::Services::Docker: OS::Heat::None
OS::TripleO::Services::CertmongerUser: OS::Heat::None
+ OS::TripleO::Services::Iscsid: OS::Heat::None
parameter_defaults:
EnablePackageInstall: false
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index f8655b18..1848e09a 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -101,8 +101,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
ServerMetadata:
default: {}
description: >
@@ -242,15 +242,15 @@ resources:
HOST: {get_param: CloudNameStorageManagement}
HeatAuthEncryptionKey:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
PcsdPassword:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 16
HorizonSecret:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 10
@@ -334,7 +334,7 @@ resources:
servers: {get_attr: [{{role.name}}Servers, value]}
{{role.name}}AllNodesDeployment:
- type: OS::Heat::StructuredDeployments
+ type: OS::TripleO::AllNodesDeployment
depends_on:
{% for role_inner in roles %}
- {{role_inner.name}}HostsDeployment
@@ -446,6 +446,7 @@ resources:
MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]}
ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]}
DeploymentServerBlacklistDict: {get_attr: [DeploymentServerBlacklistDict, value]}
+ RoleParameters: {get_param: {{role.name}}Parameters}
{% endfor %}
{% for role in roles %}
@@ -574,12 +575,12 @@ resources:
UpdateIdentifier: {get_param: UpdateIdentifier}
MysqlRootPassword:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 10
RabbitCookie:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 20
salt: {get_param: RabbitCookieSalt}
@@ -734,12 +735,34 @@ resources:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{% endfor %}
+ stack_name: {get_param: 'OS::stack_name'}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ ctlplane_service_ips:
+ # Note (shardy) this somewhat complex yaql may be replaced
+ # with a map_deep_merge function in ocata. It merges the
+ # list of maps, but appends to colliding lists when a service
+ # is deployed on more than one role
+ yaql:
+ expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+ data:
+ l:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, ctlplane_service_ips]}
+{% endfor %}
role_data:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
{% endfor %}
+ ServerOsCollectConfigData:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+
outputs:
ManagedEndpoints:
description: Asserts that the keystone endpoints have been provisioned.
@@ -790,3 +813,15 @@ outputs:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
{% endfor %}
+ ServerOsCollectConfigData:
+ description: The os-collect-config configuration associated with each server resource
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+ VipMap:
+ description: Mapping of each network to VIP addresses. Also includes the Redis VIP.
+ value:
+ map_merge:
+ - {get_attr: [VipMap, net_ip_map]}
+ - redis: {get_attr: [RedisVirtualIP, ip_address]}
diff --git a/plan-samples/README.rst b/plan-samples/README.rst
new file mode 100644
index 00000000..44b9d0cd
--- /dev/null
+++ b/plan-samples/README.rst
@@ -0,0 +1,22 @@
+=================================
+Samples for plan-environment.yaml
+=================================
+
+The ``plan-environment.yaml`` file provides the details of the plan to be
+deployed by TripleO. Along with the details of the heat environments and
+parameters, it is also possible to provide workflow specific parameters to the
+TripleO mistral workflows. A new section ``workflow_parameters`` has been
+added to provide workflow specific parameters. This provides a clear
+separation of heat environment parameters and the workflow only parameters.
+These customized plan environment files can be provided as with ``-p`` option
+to the ``openstack overcloud deploy`` and ``openstack overcloud plan create``
+commands. The sample format to provide the workflow specific parameters::
+
+ workflow_parameters:
+ tripleo.derive_params.v1.derive_parameters:
+ # DPDK Parameters
+ number_of_pmd_cpu_threads_per_numa_node: 2
+
+
+All the parameters specified under the workflow name will be passed as
+``user_input`` to the workflow, while invoking from the tripleoclient. \ No newline at end of file
diff --git a/plan-samples/plan-environment-derived-params.yaml b/plan-samples/plan-environment-derived-params.yaml
new file mode 100644
index 00000000..964e57d2
--- /dev/null
+++ b/plan-samples/plan-environment-derived-params.yaml
@@ -0,0 +1,35 @@
+version: 1.0
+
+name: overcloud
+description: >
+ Default Deployment plan
+template: overcloud.yaml
+environments:
+ - path: overcloud-resource-registry-puppet.yaml
+workflow_parameters:
+ tripleo.derive_params.v1.derive_parameters:
+ ######### DPDK Parameters #########
+ # Specifices the minimum number of CPU threads to be allocated for DPDK
+ # PMD threads. The actual allocation will be based on network config, if
+ # the a DPDK port is associated with a numa node, then this configuration
+ # will be used, else 0.
+ number_of_pmd_cpu_threads_per_numa_node: 4
+ # Amount of memory to be configured as huge pages in percentage. Ouf the
+ # total available memory (excluding the NovaReservedHostMemory), the
+ # specified percentage of the remaining is configured as huge pages.
+ huge_page_allocation_percentage: 90
+ ######### HCI Parameters #########
+ hci_profile: default
+ hci_profile_config:
+ default:
+ average_guest_memory_size_in_mb: 2048
+ average_guest_cpu_utilization_percentage: 50
+ many_small_vms:
+ average_guest_memory_size_in_mb: 1024
+ average_guest_cpu_utilization_percentage: 20
+ few_large_vms:
+ average_guest_memory_size_in_mb: 4096
+ average_guest_cpu_utilization_percentage: 80
+ nfv_default:
+ average_guest_memory_size_in_mb: 8192
+ average_guest_cpu_utilization_percentage: 90
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index baafe03d..b1284452 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -12,10 +12,8 @@ parameters:
type: string
cloud_name_ctlplane:
type: string
- # FIXME(shardy) this can be comma_delimited_list when
- # https://bugs.launchpad.net/heat/+bug/1617019 is fixed
enabled_services:
- type: string
+ type: comma_delimited_list
controller_ips:
type: comma_delimited_list
logging_groups:
@@ -118,7 +116,10 @@ resources:
map_merge:
- tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
- tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
- - enabled_services: {get_param: enabled_services}
+ - enabled_services:
+ yaql:
+ expression: $.data.distinct()
+ data: {get_param: enabled_services}
# This writes out a mapping of service_name_enabled: 'true'
# For any services not enabled, hiera foo_enabled will
# return nil, as it's undefined
@@ -129,8 +130,7 @@ resources:
# https://bugs.launchpad.net/heat/+bug/1617203
SERVICE_enabled: 'true'
for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
+ SERVICE: {get_param: enabled_services}
# Dynamically generate per-service network data
# This works as follows (outer->inner functions)
# yaql - filters services where no mapping exists in ServiceNetMap
@@ -150,8 +150,7 @@ resources:
template:
SERVICE_network: SERVICE_network
for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
+ SERVICE: {get_param: enabled_services}
- values: {get_param: ServiceNetMap}
# Keystone doesn't provide separate entries for the public
# and admin endpoints, so we need to add them here manually
@@ -203,8 +202,7 @@ resources:
template:
SERVICE_vip: SERVICE_network
for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
+ SERVICE: {get_param: enabled_services}
- values: {get_param: ServiceNetMap}
- values: {get_param: NetVipMap}
- keystone_admin_api_vip:
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index 60ddeb8a..612a4a01 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -69,8 +69,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
BlockStorageServerMetadata:
default: {}
description: >
@@ -139,6 +139,28 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
@@ -146,6 +168,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
BlockStorage:
@@ -174,6 +202,12 @@ resources:
- {get_param: BlockStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: BlockStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -372,6 +406,8 @@ resources:
type: OS::TripleO::BlockStorage::PreNetworkConfig
properties:
server: {get_resource: BlockStorage}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -466,6 +502,7 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -608,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [BlockStorage, os_collect_config]}
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index 9d30ab29..e7afcb40 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -75,8 +75,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
CephStorageServerMetadata:
default: {}
description: >
@@ -145,6 +145,28 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
@@ -152,6 +174,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
CephStorage:
@@ -180,6 +208,12 @@ resources:
- {get_param: CephStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: CephStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -378,6 +412,8 @@ resources:
type: OS::TripleO::CephStorage::PreNetworkConfig
properties:
server: {get_resource: CephStorage}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -471,6 +507,7 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -619,3 +656,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [CephStorage, os_collect_config]}
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index 06a31ec9..5a662e86 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -37,7 +37,7 @@ parameters:
type: string
NeutronPublicInterface:
default: nic1
- description: A port to add to the NeutronPhysicalBridge.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
NodeIndex:
type: number
@@ -90,8 +90,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
NovaComputeServerMetadata:
default: {}
description: >
@@ -157,8 +157,36 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
server_not_blacklisted:
not:
equals:
@@ -194,6 +222,12 @@ resources:
- {get_param: NovaComputeServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: NovaComputeSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -381,6 +415,8 @@ resources:
type: OS::TripleO::Compute::PreNetworkConfig
properties:
server: {get_resource: NovaCompute}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkConfig:
type: OS::TripleO::Compute::Net::SoftwareConfig
@@ -479,6 +515,7 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
NovaComputeDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -645,3 +682,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
value:
{get_resource: NovaCompute}
condition: server_not_blacklisted
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [NovaCompute, os_collect_config]}
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index cccfdef1..09e5b2b9 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -58,9 +58,13 @@ parameters:
type: string
constraints:
- custom_constraint: nova.keypair
+ NeutronPhysicalBridge:
+ default: 'br-ex'
+ description: An OVS bridge to create for accessing external networks.
+ type: string
NeutronPublicInterface:
default: nic1
- description: What interface to bridge onto br-ex for network nodes.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
ServiceNetMap:
default: {}
@@ -104,8 +108,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
ControllerServerMetadata:
default: {}
description: >
@@ -171,6 +175,28 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
parameter_groups:
- label: deprecated
@@ -184,7 +210,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
-
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
@@ -214,6 +245,12 @@ resources:
- {get_param: ControllerServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ControllerSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -401,6 +438,8 @@ resources:
type: OS::TripleO::Controller::PreNetworkConfig
properties:
server: {get_resource: Controller}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkConfig:
type: OS::TripleO::Controller::Net::SoftwareConfig
@@ -426,7 +465,7 @@ resources:
- {get_param: NetworkDeploymentActions}
- []
input_values:
- bridge_name: br-ex
+ bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
# Resource for site-specific injection of root certificate
@@ -541,6 +580,7 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Hook for site-specific additional pre-deployment config, e.g extra hieradata
ControllerExtraConfigPre:
@@ -691,3 +731,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
tls_cert_modulus_md5:
description: MD5 checksum of the TLS Certificate Modulus
value: {get_attr: [NodeTLSData, cert_modulus_md5]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [Controller, os_collect_config]}
diff --git a/puppet/deploy-artifacts.sh b/puppet/deploy-artifacts.sh
index 4e1ad89f..e4d20b49 100644
--- a/puppet/deploy-artifacts.sh
+++ b/puppet/deploy-artifacts.sh
@@ -10,16 +10,20 @@ if [ -n "$artifact_urls" ]; then
for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
curl --globoff -o $TMP_DATA/file_data "$URL"
if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
- yum install -y $TMP_DATA/file_data
+ mv $TMP_DATA/file_data $TMP_DATA/file_data.rpm
+ yum install -y $TMP_DATA/file_data.rpm
+ rm $TMP_DATA/file_data.rpm
elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
pushd /
tar xvzf $TMP_DATA/file_data
popd
else
- echo "ERROR: Unsupported file format."
+ echo "ERROR: Unsupported file format: $URL"
exit 1
fi
- rm $TMP_DATA/file_data
+ if [ -f $TMP_DATA/file_data ]; then
+ rm $TMP_DATA/file_data
+ fi
done
else
echo "No artifact_urls was set. Skipping..."
diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml
index b44095bd..b9e0860e 100644
--- a/puppet/major_upgrade_steps.j2.yaml
+++ b/puppet/major_upgrade_steps.j2.yaml
@@ -8,7 +8,9 @@ description: 'Upgrade steps for all roles'
parameters:
servers:
type: json
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
@@ -206,6 +208,7 @@ resources:
{%- endfor %}
properties:
servers: {get_param: servers}
+ stack_name: {get_param: stack_name}
role_data: {get_param: role_data}
outputs:
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index 19ea1b65..4a1670f8 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -69,8 +69,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
SwiftStorageServerMetadata:
default: {}
description: >
@@ -139,6 +139,29 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ default: {}
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
@@ -146,6 +169,12 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
@@ -174,6 +203,12 @@ resources:
- {get_param: SwiftStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ObjectStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -372,6 +407,8 @@ resources:
type: OS::TripleO::ObjectStorage::PreNetworkConfig
properties:
server: {get_resource: SwiftStorage}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -451,6 +488,7 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
SwiftStorageHieraDeploy:
type: OS::Heat::StructuredDeployment
@@ -607,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [SwiftStorage, os_collect_config]}
diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml
index 3a15cec6..67e1ecfd 100644
--- a/puppet/post.j2.yaml
+++ b/puppet/post.j2.yaml
@@ -8,7 +8,9 @@ parameters:
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
@@ -23,6 +25,7 @@ parameters:
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
+ ctlplane_service_ips:
+ type: json
-resources:
{% include 'puppet-steps.j2' %}
diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2
index 360c633a..82c6171e 100644
--- a/puppet/puppet-steps.j2
+++ b/puppet/puppet-steps.j2
@@ -1,3 +1,19 @@
+{% set deploy_steps_max = 6 %}
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {% for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {% endfor %}
+{% endfor %}
+
+resources:
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
{% for role in roles %}
@@ -24,17 +40,26 @@
StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
# Step through a series of configuration steps
-{% for step in range(1, 6) %}
+{% for step in range(1, deploy_steps_max) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
depends_on:
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
{% for dep in roles %}
- {{dep.name}}Deployment_Step{{step -1}}
{% endfor %}
- {% endif %}
+ {% endif %}
properties:
name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
@@ -44,26 +69,78 @@
update_identifier: {get_param: DeployIdentifier}
{% endfor %}
+ # Note, this should be the last step to execute configuration changes.
+ # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+ # after all the previous deployment steps.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ {% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+
+ # The {{role.name}}PostConfig steps are in charge of
+ # quiescing all services, i.e. in the Controller case,
+ # we should run a full service reload.
{{role.name}}PostConfig:
type: OS::TripleO::Tasks::{{role.name}}PostConfig
depends_on:
{% for dep in roles %}
- - {{dep.name}}Deployment_Step5
+ - {{dep.name}}ExtraConfigPost
{% endfor %}
properties:
servers: {get_param: servers}
input_values:
update_identifier: {get_param: DeployIdentifier}
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- {{role.name}}ExtraConfigPost:
+
+{% endfor %}
+
+# BEGIN service_workflow_tasks handling
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
depends_on:
- {% for dep in roles %}
- - {{dep.name}}PostConfig
- {% endfor %}
- type: OS::TripleO::NodeExtraConfigPost
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
properties:
- servers: {get_param: [servers, {{role.name}}]}
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {% for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {% endfor %}
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ always_update: true
{% endfor %}
+# END service_workflow_tasks handling
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 7af90e24..b45736c1 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -28,9 +28,13 @@ parameters:
constraints:
- custom_constraint: nova.keypair
{% endif %}
+ NeutronPhysicalBridge:
+ default: 'br-ex'
+ description: An OVS bridge to create for accessing tenant networks.
+ type: string
NeutronPublicInterface:
default: nic1
- description: What interface to bridge onto br-ex for network nodes.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
ServiceNetMap:
default: {}
@@ -85,8 +89,8 @@ parameters:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
{{role}}ServerMetadata:
default: {}
description: >
@@ -161,6 +165,28 @@ parameters:
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
@@ -168,10 +194,16 @@ conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
{{role}}:
- type: OS::TripleO::{{role.name}}Server
+ type: OS::TripleO::{{role}}Server
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
@@ -196,6 +228,12 @@ resources:
- {get_param: {{role}}ServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: {{role}}SchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -394,6 +432,8 @@ resources:
type: OS::TripleO::{{role}}::PreNetworkConfig
properties:
server: {get_resource: {{role}}}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
@@ -404,7 +444,7 @@ resources:
server: {get_resource: {{role}}}
actions: {get_param: NetworkDeploymentActions}
input_values:
- bridge_name: br-ex
+ bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
actions:
if:
@@ -493,6 +533,7 @@ resources:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -642,3 +683,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [{{role}}, os_collect_config]}
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index 7a18ef0c..d55414b7 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -95,6 +95,30 @@ are re-asserted when applying latter ones.
5) Service activation (Pacemaker)
+It is also possible to use Mistral actions or workflows together with
+a deployment step, these are executed before the main configuration run.
+To describe actions or workflows from within a service use:
+
+ * service_workflow_tasks: One or more workflow task properties
+
+which expects a map where the key is the step and the value a list of
+dictionaries descrbing each a workflow task, for example::
+
+ service_workflow_tasks:
+ step2:
+ - name: echo
+ action: std.echo output=Hello
+ step3:
+ - name: external
+ workflow: my-pre-existing-workflow-name
+ input:
+ workflow_param1: value
+ workflow_param2: value
+
+The Heat guide for the `OS::Mistral::Workflow task property
+<https://docs.openstack.org/developer/heat/template_guide/openstack.html#OS::Mistral::Workflow-prop-tasks>`_
+has more details about the expected dictionary.
+
Batch Upgrade Steps
-------------------
diff --git a/puppet/services/cinder-backend-dellsc.yaml b/puppet/services/cinder-backend-dellsc.yaml
index a201134c..c0bffb18 100644
--- a/puppet/services/cinder-backend-dellsc.yaml
+++ b/puppet/services/cinder-backend-dellsc.yaml
@@ -61,6 +61,9 @@ parameters:
CinderDellScSecondaryScApiPort:
type: number
default: 3033
+ CinderDellScExcludedDomainIp:
+ type: string
+ default: ''
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -105,5 +108,6 @@ outputs:
cinder::backend::dellsc_iscsi::secondary_san_login: {get_param: CinderDellScSecondarySanLogin}
cinder::backend::dellsc_iscsi::secondary_san_password: {get_param: CinderDellScSecondarySanPassword}
cinder::backend::dellsc_iscsi::secondary_sc_api_port: {get_param: CinderDellScSecondaryScApiPort}
+ cinder::backend::dellsc_iscsi::excluded_domain_ip: {get_param: CinderDellScExcludedDomainIp}
step_config: |
include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-backend-netapp.yaml b/puppet/services/cinder-backend-netapp.yaml
index bddc8e1a..fbde4c0a 100644
--- a/puppet/services/cinder-backend-netapp.yaml
+++ b/puppet/services/cinder-backend-netapp.yaml
@@ -93,6 +93,12 @@ parameters:
CinderNetappWebservicePath:
type: string
default: '/devmgr/v2'
+ CinderNetappNasSecureFileOperations:
+ type: string
+ default: 'false'
+ CinderNetappNasSecureFilePermissions:
+ type: string
+ default: 'false'
# DEPRECATED options for compatibility with older versions
CinderNetappEseriesHostType:
type: string
@@ -133,5 +139,7 @@ outputs:
cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools}
cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType}
cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath}
+ cinder::backend::netapp::nas_secure_file_operations: {get_param: CinderNetappNasSecureFileOperations}
+ cinder::backend::netapp::nas_secure_file_permissions: {get_param: CinderNetappNasSecureFilePermissions}
step_config: |
include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml
index fe95222b..1f8c345d 100644
--- a/puppet/services/cinder-volume.yaml
+++ b/puppet/services/cinder-volume.yaml
@@ -40,6 +40,20 @@ parameters:
NFS servers used by Cinder NFS backend. Effective when
CinderEnableNfsBackend is true.
type: comma_delimited_list
+ CinderNasSecureFileOperations:
+ default: false
+ description: >
+ Controls whether security enhanced NFS file operations are enabled.
+ Valid values are 'auto', 'true' or 'false'. Effective when
+ CinderEnableNfsBackend is true.
+ type: string
+ CinderNasSecureFilePermissions:
+ default: false
+ description: >
+ Controls whether security enhanced NFS file permissions are enabled.
+ Valid values are 'auto', 'true' or 'false'. Effective when
+ CinderEnableNfsBackend is true.
+ type: string
CinderRbdPoolName:
default: volumes
type: string
@@ -105,6 +119,8 @@ outputs:
tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
+ tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_operations: {get_param: CinderNasSecureFileOperations}
+ tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_permissions: {get_param: CinderNasSecureFilePermissions}
tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
index 2bde9033..882ba299 100644
--- a/puppet/services/database/mysql.yaml
+++ b/puppet/services/database/mysql.yaml
@@ -118,6 +118,16 @@ outputs:
template: "%{hiera('cloud_name_NETWORK')}"
params:
NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ dnsnames:
+ - str_replace:
+ template: "%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ - str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
principal:
str_replace:
template: "mysql/%{hiera('cloud_name_NETWORK')}"
@@ -132,6 +142,9 @@ outputs:
- service: mysql
network: {get_param: [ServiceNetMap, MysqlNetwork]}
type: vip
+ - service: mysql
+ network: {get_param: [ServiceNetMap, MysqlNetwork]}
+ type: node
- null
upgrade_tasks:
- name: Check for galera root password
diff --git a/puppet/services/database/redis.yaml b/puppet/services/database/redis.yaml
index df406a8c..9567a73f 100644
--- a/puppet/services/database/redis.yaml
+++ b/puppet/services/database/redis.yaml
@@ -52,3 +52,23 @@ outputs:
- 26379
step_config: |
include ::tripleo::profile::base::database::redis
+ upgrade_tasks:
+ - name: Check if redis is deployed
+ command: systemctl is-enabled redis
+ tags: common
+ ignore_errors: True
+ register: redis_enabled
+ - name: "PreUpgrade step0,validation: Check if redis is running"
+ shell: >
+ /usr/bin/systemctl show 'redis' --property ActiveState |
+ grep '\bactive\b'
+ when: redis_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop redis service
+ tags: step1
+ when: redis_enabled.rc == 0
+ service: name=redis state=stopped
+ - name: Install redis package if it was disabled
+ tags: step3
+ yum: name=redis state=latest
+ when: redis_enabled.rc != 0
diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml
index f4067ef6..b4af7e85 100644
--- a/puppet/services/gnocchi-base.yaml
+++ b/puppet/services/gnocchi-base.yaml
@@ -34,6 +34,10 @@ parameters:
default: 30
description: Delay between processing metrics.
type: number
+ NumberOfStorageSacks:
+ default: 128
+ description: Number of storage sacks to create.
+ type: number
GnocchiPassword:
description: The password for the gnocchi service and db account.
type: string
@@ -87,7 +91,11 @@ outputs:
query:
read_default_file: /etc/my.cnf.d/tripleo.cnf
read_default_group: tripleo
- gnocchi::db::sync::extra_opts: ''
+ gnocchi::db::sync::extra_opts:
+ str_replace:
+ template: " --sacks-number NUM_SACKS"
+ params:
+ NUM_SACKS: {get_param: NumberOfStorageSacks}
gnocchi::storage::metric_processing_delay: {get_param: MetricProcessingDelay}
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 3
diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml
index 619cf131..5bdc3b88 100644
--- a/puppet/services/haproxy.yaml
+++ b/puppet/services/haproxy.yaml
@@ -38,6 +38,10 @@ parameters:
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
+ HAProxyStatsEnabled:
+ default: true
+ description: Whether or not to enable the HAProxy stats interface.
+ type: boolean
RedisPassword:
description: The password for Redis
type: string
@@ -95,6 +99,7 @@ outputs:
tripleo::haproxy::redis_password: {get_param: RedisPassword}
tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
tripleo::haproxy::crl_file: {get_param: InternalTLSCRLPEMFile}
+ tripleo::haproxy::haproxy_stats: {get_param: HAProxyStatsEnabled}
tripleo::profile::base::haproxy::certificates_specs:
map_merge:
- get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index 93bced8b..1f97b8ba 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -55,7 +55,7 @@ parameters:
HorizonSecureCookies:
description: Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon
type: boolean
- default: true
+ default: false
MemcachedIPv6:
default: false
description: Enable IPv6 features in Memcached.
@@ -89,7 +89,6 @@ outputs:
horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
horizon::vhost_extra_params:
- add_listen: false
priority: 10
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
options: ['FollowSymLinks','MultiViews']
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
index 945033a1..0e8eacf1 100644
--- a/puppet/services/ironic-api.yaml
+++ b/puppet/services/ironic-api.yaml
@@ -43,8 +43,21 @@ parameters:
e.g. { ironic-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
default: {}
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
IronicBase:
type: ./ironic-base.yaml
properties:
@@ -63,6 +76,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- ironic::api::authtoken::password: {get_param: IronicPassword}
ironic::api::authtoken::project_name: 'service'
ironic::api::authtoken::user_domain_name: 'Default'
@@ -80,7 +94,17 @@ outputs:
ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
# This is used to build links in responses
ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+ ironic::api::service_name: 'httpd'
ironic::policy::policies: {get_param: IronicApiPolicies}
+ ironic::wsgi::apache::bind_host: {get_param: [ServiceNetMap, IronicApiNetwork]}
+ ironic::wsgi::apache::port: {get_param: [EndpointMap, IronicInternal, port]}
+ ironic::wsgi::apache::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, IronicApiNetwork]}
+ ironic::wsgi::apache::ssl: {get_param: EnableInternalTLS}
tripleo.ironic_api.firewall_rules:
'133 ironic api':
dport:
@@ -106,6 +130,9 @@ outputs:
- '%'
- "%{hiera('mysql_bind_host')}"
upgrade_tasks:
- - name: Stop ironic_api service
+ - name: Stop ironic_api service (before httpd support)
+ tags: step1
+ service: name=openstack-ironic-api state=stopped enabled=no
+ - name: Stop ironic_api service (running under httpd)
tags: step1
- service: name=openstack-ironic-api state=stopped
+ service: name=httpd state=stopped
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index b1676715..0e8c8e12 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -164,6 +164,12 @@ outputs:
ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
# Credentials to access other services
+ ironic::cinder::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::cinder::username: 'ironic'
+ ironic::cinder::password: {get_param: IronicPassword}
+ ironic::cinder::project_name: 'service'
+ ironic::cinder::user_domain_name: 'Default'
+ ironic::cinder::project_domain_name: 'Default'
ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
ironic::glance::username: 'ironic'
ironic::glance::password: {get_param: IronicPassword}
diff --git a/puppet/services/ironic-inspector.yaml b/puppet/services/ironic-inspector.yaml
new file mode 100644
index 00000000..e8537a29
--- /dev/null
+++ b/puppet/services/ironic-inspector.yaml
@@ -0,0 +1,151 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Ironic Inspector configured with Puppet (EXPERIMENTAL)
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MonitoringSubscriptionIronicInspector:
+ default: 'overcloud-ironic-inspector'
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ IronicInspectorInterface:
+ default: br-ex
+ description: |
+ Network interface on which inspection dnsmasq will listen. Should allow
+ access to untagged traffic from nodes booted for inspection. The default
+ value only makes sense if you don't modify any networking configuration.
+ type: string
+ IronicInspectorIPXEEnabled:
+ default: true
+ description: Whether to use iPXE for inspection.
+ type: boolean
+ IronicInspectorIpRange:
+ description: |
+ Temporary IP range that will be given to nodes during the inspection
+ process. This should not overlap with any range that Neutron's DHCP
+ gives away, but it has to be routeable back to ironic-inspector API.
+ This option has no meaningful defaults, and thus is required.
+ type: string
+ IronicInspectorUseSwift:
+ default: true
+ description: Whether to use Swift for storing introspection data.
+ type: boolean
+ IronicIPXEPort:
+ default: 8088
+ description: Port to use for serving images when iPXE is used.
+ type: string
+ IronicPassword:
+ description: The password for the Ironic service and db account, used by the Ironic services
+ type: string
+ hidden: true
+
+conditions:
+ enable_ipxe: {equals : [{get_param: IronicInspectorIPXEEnabled}, true]}
+ use_swift: {equals : [{get_param: IronicInspectorUseSwift}, true]}
+
+outputs:
+ role_data:
+ description: Role data for the Ironic Inspector role.
+ value:
+ service_name: ironic_inspector
+ monitoring_subscription: {get_param: MonitoringSubscriptionIronicInspector}
+ config_settings:
+ map_merge:
+ - ironic::inspector::listen_address: {get_param: [ServiceNetMap, IronicInspectorNetwork]}
+ ironic::inspector::dnsmasq_local_ip: {get_param: [ServiceNetMap, IronicInspectorNetwork]}
+ ironic::inspector::dnsmasq_ip_range: {get_param: IronicInspectorIpRange}
+ ironic::inspector::dnsmasq_interface: {get_param: IronicInspectorInterface}
+ ironic::inspector::debug: {get_param: Debug}
+ ironic::inspector::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ironic::inspector::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::inspector::authtoken::username: 'ironic'
+ ironic::inspector::authtoken::password: {get_param: IronicPassword}
+ ironic::inspector::authtoken::project_name: 'service'
+ ironic::inspector::authtoken::user_domain_name: 'Default'
+ ironic::inspector::authtoken::project_domain_name: 'Default'
+ tripleo.ironic_inspector.firewall_rules:
+ '137 ironic-inspector':
+ dport:
+ - 5050
+ ironic::inspector::ironic_username: 'ironic'
+ ironic::inspector::ironic_password: {get_param: IronicPassword}
+ ironic::inspector::ironic_tenant_name: 'service'
+ ironic::inspector::ironic_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::inspector::ironic_max_retries: 6
+ ironic::inspector::ironic_retry_interval: 10
+ ironic::inspector::ironic_user_domain_name: 'Default'
+ ironic::inspector::ironic_project_domain_name: 'Default'
+ ironic::inspector::http_port: {get_param: IronicIPXEPort}
+ ironic::inspector::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://ironic-inspector:'
+ - {get_param: IronicPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/ironic-inspector'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ -
+ if:
+ - enable_ipxe
+ - ironic::inspector::pxe_transfer_protocol: 'http'
+ - {}
+ -
+ if:
+ - use_swift
+ - ironic::inspector::store_data: 'swift'
+ ironic::inspector::swift_username: 'ironic'
+ ironic::inspector::swift_password: {get_param: IronicPassword}
+ ironic::inspector::swift_tenant_name: 'service'
+ ironic::inspector::swift_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::inspector::swift_user_domain_name: 'Default'
+ ironic::inspector::swift_project_domain_name: 'Default'
+ - {}
+ step_config: |
+ include ::tripleo::profile::base::ironic_inspector
+ service_config_settings:
+ keystone:
+ ironic::keystone::auth_inspector::tenant: 'service'
+ ironic::keystone::auth_inspector::public_url: {get_param: [EndpointMap, IronicInspectorPublic, uri]}
+ ironic::keystone::auth_inspector::internal_url: {get_param: [EndpointMap, IronicInspectorInternal, uri]}
+ ironic::keystone::auth_inspector::admin_url: {get_param: [EndpointMap, IronicInspectorAdmin, uri]}
+ ironic::keystone::auth_inspector::password: {get_param: IronicPassword}
+ ironic::keystone::auth_inspector::region: {get_param: KeystoneRegion}
+ mysql:
+ ironic::inspector::db::mysql::password: {get_param: IronicPassword}
+ ironic::inspector::db::mysql::user: ironic-inspector
+ ironic::inspector::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ ironic::inspector::db::mysql::dbname: ironic-inspector
+ ironic::inspector::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index f3a9cbc4..60d194bc 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -113,10 +113,27 @@ parameters:
description: The second Keystone credential key. Must be a valid key.
KeystoneFernetKey0:
type: string
- description: The first Keystone fernet key. Must be a valid key.
+ default: ''
+ description: (DEPRECATED) The first Keystone fernet key. Must be a valid key.
KeystoneFernetKey1:
type: string
- description: The second Keystone fernet key. Must be a valid key.
+ default: ''
+ description: (DEPRECATED) The second Keystone fernet key. Must be a valid key.
+ KeystoneFernetKeys:
+ type: json
+ description: Mapping containing keystone's fernet keys and their paths.
+ KeystoneFernetMaxActiveKeys:
+ type: number
+ description: The maximum active keys in the keystone fernet key repository.
+ default: 5
+ ManageKeystoneFernetKeys:
+ type: boolean
+ default: true
+ description: Whether TripleO should manage the keystone fernet keys or not.
+ If set to true, the fernet keys will get the values from the
+ saved keys repository in mistral (the KeystoneFernetKeys
+ variable). If set to false, only the stack creation
+ initializes the keys, but subsequent updates won't touch them.
KeystoneLoggingSource:
type: json
default:
@@ -187,6 +204,17 @@ parameters:
default: {}
hidden: true
+parameter_groups:
+- label: deprecated
+ description: |
+ The following parameters are deprecated and will be removed. They should not
+ be relied on for new deployments. If you have concerns regarding deprecated
+ parameters, please contact the TripleO development team on IRC or the
+ OpenStack mailing list.
+ parameters:
+ - KeystoneFernetKey0
+ - KeystoneFernetKey1
+
resources:
ApacheServiceBase:
@@ -234,6 +262,7 @@ outputs:
keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
keystone::token_provider: {get_param: KeystoneTokenProvider}
keystone::enable_fernet_setup: {if: [keystone_fernet_tokens, true, false]}
+ keystone::fernet_max_active_keys: {get_param: KeystoneFernetMaxActiveKeys}
keystone::enable_proxy_headers_parsing: true
keystone::enable_credential_setup: true
keystone::credential_keys:
@@ -241,12 +270,8 @@ outputs:
content: {get_param: KeystoneCredential0}
'/etc/keystone/credential-keys/1':
content: {get_param: KeystoneCredential1}
- keystone::fernet_keys:
- '/etc/keystone/fernet-keys/0':
- content: {get_param: KeystoneFernetKey0}
- '/etc/keystone/fernet-keys/1':
- content: {get_param: KeystoneFernetKey1}
- keystone::fernet_replace_keys: false
+ keystone::fernet_keys: {get_param: KeystoneFernetKeys}
+ keystone::fernet_replace_keys: {get_param: ManageKeystoneFernetKeys}
keystone::debug:
if:
- service_debug_unset
diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml
index 76d5c269..4493721c 100644
--- a/puppet/services/neutron-ovs-agent.yaml
+++ b/puppet/services/neutron-ovs-agent.yaml
@@ -92,8 +92,12 @@ resources:
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
@@ -138,7 +142,7 @@ outputs:
expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
data:
ovs_upgrade:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
neutron_ovs_upgrade:
- name: Check if neutron_ovs_agent is deployed
command: systemctl is-enabled neutron-openvswitch-agent
diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml
index 29c10469..da7a4d68 100644
--- a/puppet/services/neutron-ovs-dpdk-agent.yaml
+++ b/puppet/services/neutron-ovs-dpdk-agent.yaml
@@ -26,32 +26,6 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- HostCpusList:
- default: "0"
- description: List of cores to be used for host process
- type: string
- constraints:
- - allowed_pattern: "[0-9,-]+"
- NeutronDpdkCoreList:
- default: ""
- description: List of cores to be used for DPDK Poll Mode Driver
- type: string
- constraints:
- - allowed_pattern: "[0-9,-]*"
- NeutronDpdkMemoryChannels:
- default: ""
- description: Number of memory channels to be used for DPDK
- type: string
- constraints:
- - allowed_pattern: "[0-9]*"
- NeutronDpdkSocketMemory:
- default: ""
- description: Memory allocated for each socket
- type: string
- NeutronDpdkDriverType:
- default: "vfio-pci"
- description: DPDK Driver type
- type: string
# below parameters has to be set in neutron agent only for compute nodes.
# as of now there is no other usecase for these parameters except dpdk.
# should be moved to compute only ovs agent in case of any other usecases.
@@ -75,9 +49,6 @@ resources:
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
-
# Merging role-specific parameters (RoleParameters) with the default parameters.
# RoleParameters will have the precedence over the default parameters.
RoleParametersValue:
@@ -89,20 +60,19 @@ resources:
- map_replace:
- neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
- vswitch::dpdk::driver_type: NeutronDpdkDriverType
- vswitch::dpdk::host_core_list: HostCpusList
- vswitch::dpdk::pmd_core_list: NeutronDpdkCoreList
- vswitch::dpdk::memory_channels: NeutronDpdkMemoryChannels
- vswitch::dpdk::socket_mem: NeutronDpdkSocketMemory
- values: {get_param: [RoleParameters]}
- values:
NeutronDatapathType: {get_param: NeutronDatapathType}
NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
- NeutronDpdkDriverType: {get_param: NeutronDpdkDriverType}
- HostCpusList: {get_param: HostCpusList}
- NeutronDpdkCoreList: {get_param: NeutronDpdkCoreList}
- NeutronDpdkMemoryChannels: {get_param: NeutronDpdkMemoryChannels}
- NeutronDpdkSocketMemory: {get_param: NeutronDpdkSocketMemory}
+
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
@@ -116,7 +86,8 @@ outputs:
- keys:
tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
- neutron::agents::ml2::ovs::enable_dpdk: true
+ - get_attr: [Ovs, role_data, config_settings]
- get_attr: [RoleParametersValue, value]
step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
upgrade_tasks:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
diff --git a/puppet/services/neutron-sriov-agent.yaml b/puppet/services/neutron-sriov-agent.yaml
index c124d1e6..090640ed 100644
--- a/puppet/services/neutron-sriov-agent.yaml
+++ b/puppet/services/neutron-sriov-agent.yaml
@@ -65,6 +65,24 @@ resources:
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
+ # Merging role-specific parameters (RoleParameters) with the default parameters.
+ # RoleParameters will have the precedence over the default parameters.
+ RoleParametersValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - map_replace:
+ - neutron::agents::ml2::sriov::physical_device_mappings: NeutronPhysicalDevMappings
+ neutron::agents::ml2::sriov::exclude_devices: NeutronExcludeDevices
+ tripleo::host::sriov::number_of_vfs: NeutronSriovNumVFs
+ - values: {get_param: [RoleParameters]}
+ - values:
+ NeutronPhysicalDevMappings: {get_param: NeutronPhysicalDevMappings}
+ NeutronExcludeDevices: {get_param: NeutronExcludeDevices}
+ NeutronSriovNumVFs: {get_param: NeutronSriovNumVFs}
+
outputs:
role_data:
description: Role data for the Neutron SR-IOV nic agent service.
@@ -73,8 +91,6 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::agents::ml2::sriov::physical_device_mappings: {get_param: NeutronPhysicalDevMappings}
- neutron::agents::ml2::sriov::exclude_devices: {get_param: NeutronExcludeDevices}
- tripleo::host::sriov::number_of_vfs: {get_param: NeutronSriovNumVFs}
+ - get_attr: [RoleParametersValue, value]
step_config: |
include ::tripleo::profile::base::neutron::sriov
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index 835edf0a..fe2f2946 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -210,7 +210,7 @@ outputs:
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
- set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}}
+ set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Extra migration for nova tripleo/+bug/1656791
tags: step0,pre-upgrade
when: is_bootstrap_node
diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml
index 5da6d43e..72a1fce7 100644
--- a/puppet/services/nova-scheduler.yaml
+++ b/puppet/services/nova-scheduler.yaml
@@ -45,6 +45,14 @@ parameters:
default:
tag: openstack.nova.scheduler
path: /var/log/nova/nova-scheduler.log
+ NovaSchedulerDiscoverHostsInCellsInterval:
+ type: number
+ default: -1
+ description: >
+ This value controls how often (in seconds) the scheduler should
+ attempt to discover new hosts that have been added to cells.
+ The default value of -1 disables the periodic task completely.
+ It is recommended to set this parameter for deployments using Ironic.
resources:
NovaBase:
@@ -71,6 +79,7 @@ outputs:
- nova::ram_allocation_ratio: '1.0'
nova::scheduler::filter::scheduler_available_filters: {get_param: NovaSchedulerAvailableFilters}
nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters}
+ nova::scheduler::discover_hosts_in_cells_interval: {get_param: NovaSchedulerDiscoverHostsInCellsInterval}
step_config: |
include tripleo::profile::base::nova::scheduler
upgrade_tasks:
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index 0d859be1..1a8754a5 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -57,8 +57,14 @@ parameters:
type: json
resources:
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
@@ -66,19 +72,21 @@ outputs:
value:
service_name: opendaylight_ovs
config_settings:
- opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
- opendaylight::username: {get_param: OpenDaylightUsername}
- opendaylight::password: {get_param: OpenDaylightPassword}
- opendaylight_check_url: {get_param: OpenDaylightCheckURL}
- opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
- neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
- tripleo.opendaylight_ovs.firewall_rules:
- '118 neutron vxlan networks':
- proto: 'udp'
- dport: 4789
- '136 neutron gre networks':
- proto: 'gre'
+ map_merge:
+ - opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
+ opendaylight::username: {get_param: OpenDaylightUsername}
+ opendaylight::password: {get_param: OpenDaylightPassword}
+ opendaylight_check_url: {get_param: OpenDaylightCheckURL}
+ opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
+ neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+ neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
+ tripleo.opendaylight_ovs.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '136 neutron gre networks':
+ proto: 'gre'
+ - get_attr: [Ovs, role_data, config_settings]
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
upgrade_tasks:
@@ -86,7 +94,7 @@ outputs:
expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
data:
ovs_upgrade:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
opendaylight_upgrade:
- name: Check if openvswitch is deployed
command: systemctl is-enabled openvswitch
diff --git a/puppet/services/openvswitch-upgrade.yaml b/puppet/services/openvswitch-upgrade.yaml
deleted file mode 100644
index f6e78462..00000000
--- a/puppet/services/openvswitch-upgrade.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-heat_template_version: pike
-
-description: >
- Openvswitch package special handling for upgrade.
-
-outputs:
- role_data:
- description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
- value:
- service_name: openvswitch_upgrade
- upgrade_tasks:
- - name: Check openvswitch version.
- tags: step2
- register: ovs_version
- ignore_errors: true
- shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
- - name: Check openvswitch packaging.
- tags: step2
- shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
- register: ovs_packaging_issue
- ignore_errors: true
- - block:
- - name: "Ensure empty directory: emptying."
- file:
- state: absent
- path: /root/OVS_UPGRADE
- - name: "Ensure empty directory: creating."
- file:
- state: directory
- path: /root/OVS_UPGRADE
- owner: root
- group: root
- mode: 0750
- - name: Download OVS packages.
- command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
- - name: Get rpm list for manual upgrade of OVS.
- shell: ls -1 /root/OVS_UPGRADE/*.rpm
- register: ovs_list_of_rpms
- - name: Manual upgrade of OVS
- shell: |
- rpm -U --test {{item}} 2>&1 | grep "already installed" || \
- rpm -U --replacepkgs --notriggerun --nopostun {{item}};
- args:
- chdir: /root/OVS_UPGRADE
- with_items:
- - "{{ovs_list_of_rpms.stdout_lines}}"
- tags: step2
- when: "'2.5.0-14' in '{{ovs_version.stdout}}'
- or
- ovs_packaging_issue|succeeded"
diff --git a/puppet/services/openvswitch.yaml b/puppet/services/openvswitch.yaml
new file mode 100644
index 00000000..36aa5db7
--- /dev/null
+++ b/puppet/services/openvswitch.yaml
@@ -0,0 +1,178 @@
+heat_template_version: pike
+
+description: >
+ Open vSwitch Configuration
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OvsDpdkCoreList:
+ description: >
+ List of cores to be used for DPDK lcore threads. Note, these threads
+ are used by the OVS control path for validator and handling functions.
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ""
+ OvsDpdkMemoryChannels:
+ description: Number of memory channels per socket to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ""
+ OvsDpdkSocketMemory:
+ default: ""
+ description: >
+ Sets the amount of hugepage memory to assign per NUMA node. It is
+ recommended to use the socket closest to the PCIe slot used for the
+ desired DPDK NIC. The format should be in "<socket 0 mem>, <socket 1
+ mem>, <socket n mem>", where the value is specified in MB. For example:
+ "1024,0".
+ type: string
+ OvsDpdkDriverType:
+ default: "vfio-pci"
+ description: >
+ DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+ this UIO/PMD driver.
+ type: string
+ OvsPmdCoreList:
+ description: >
+ A list or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ location to cores on socket, number of hyper-threaded logical cores, and
+ desired number of PMD threads can all play a role in configuring this
+ setting. These cores should be on the same socket where
+ OvsDpdkSocketMemory is assigned. If using hyperthreading then specify
+ both logical cores that would equal the physical core. Also, specifying
+ more than one core will trigger multiple PMD threads to be spawned which
+ may improve dataplane performance.
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ type: string
+ default: ""
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Queens cycle.
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkCoreList:
+ description: List of cores to be used for DPDK Poll Mode Driver
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkMemoryChannels:
+ description: Number of memory channels to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ''
+ NeutronDpdkSocketMemory:
+ default: ''
+ description: Memory allocated for each socket
+ type: string
+ NeutronDpdkDriverType:
+ default: "vfio-pci"
+ description: DPDK Driver type
+ type: string
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - HostCpusList
+ - NeutronDpdkCoreList
+ - NeutronDpdkMemoryChannels
+ - NeutronDpdkSocketMemory
+ - NeutronDpdkDriverType
+
+conditions:
+ l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+ pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+ mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+ socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+ driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+
+outputs:
+ role_data:
+ description: Role data for the Open vSwitch service.
+ value:
+ service_name: openvswitch
+ config_settings:
+ map_replace:
+ - map_replace:
+ - vswitch::dpdk::driver_type: OvsDpdkDriverType
+ vswitch::dpdk::host_core_list: OvsDpdkCoreList
+ vswitch::dpdk::pmd_core_list: OvsPmdCoreList
+ vswitch::dpdk::memory_channels: OvsDpdkMemoryChannels
+ vswitch::dpdk::socket_mem: OvsDpdkSocketMemory
+ - values: {get_param: [RoleParameters]}
+ - values:
+ OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+ OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+ OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+ OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+ OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
+ upgrade_tasks:
+ - name: Check openvswitch version.
+ tags: step2
+ register: ovs_version
+ ignore_errors: true
+ shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+ - name: Check openvswitch packaging.
+ tags: step2
+ shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+ register: ovs_packaging_issue
+ ignore_errors: true
+ - block:
+ - name: "Ensure empty directory: emptying."
+ file:
+ state: absent
+ path: /root/OVS_UPGRADE
+ - name: "Ensure empty directory: creating."
+ file:
+ state: directory
+ path: /root/OVS_UPGRADE
+ owner: root
+ group: root
+ mode: 0750
+ - name: Download OVS packages.
+ command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+ - name: Get rpm list for manual upgrade of OVS.
+ shell: ls -1 /root/OVS_UPGRADE/*.rpm
+ register: ovs_list_of_rpms
+ - name: Manual upgrade of OVS
+ shell: |
+ rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+ rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+ args:
+ chdir: /root/OVS_UPGRADE
+ with_items:
+ - "{{ovs_list_of_rpms.stdout_lines}}"
+ tags: step2
+ when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+ or
+ ovs_packaging_issue|succeeded"
diff --git a/puppet/services/ovn-dbs.yaml b/puppet/services/ovn-dbs.yaml
index 20c38d8a..df234c77 100644
--- a/puppet/services/ovn-dbs.yaml
+++ b/puppet/services/ovn-dbs.yaml
@@ -44,6 +44,7 @@ outputs:
ovn::northbound::port: {get_param: OVNNorthboundServerPort}
ovn::southbound::port: {get_param: OVNSouthboundServerPort}
ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]}
+ tripleo::haproxy::ovn_dbs_manage_lb: true
tripleo.ovn_dbs.firewall_rules:
'121 OVN DB server ports':
proto: 'tcp'
diff --git a/puppet/services/pacemaker/database/mysql.yaml b/puppet/services/pacemaker/database/mysql.yaml
index d8e942d0..0a7659e0 100644
--- a/puppet/services/pacemaker/database/mysql.yaml
+++ b/puppet/services/pacemaker/database/mysql.yaml
@@ -27,6 +27,11 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
resources:
@@ -61,6 +66,8 @@ outputs:
# internal_api_subnet - > IP/CIDR
tripleo::profile::pacemaker::database::mysql::gmcast_listen_addr:
get_param: [ServiceNetMap, MysqlNetwork]
+ tripleo::profile::pacemaker::database::mysql::ca_file:
+ get_param: InternalTLSCAFile
step_config: |
include ::tripleo::profile::pacemaker::database::mysql
metadata_settings:
diff --git a/puppet/services/pacemaker/ovn-dbs.yaml b/puppet/services/pacemaker/ovn-dbs.yaml
new file mode 100644
index 00000000..1cbb4763
--- /dev/null
+++ b/puppet/services/pacemaker/ovn-dbs.yaml
@@ -0,0 +1,61 @@
+heat_template_version: ocata
+
+description: >
+ OVN databases configured with puppet in HA mode
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OVNNorthboundServerPort:
+ description: Port of the OVN Northbound DB server
+ type: number
+ default: 6641
+ OVNSouthboundServerPort:
+ description: Port of the OVN Southbound DB server
+ type: number
+ default: 6642
+
+resources:
+
+ OVNDBsBase:
+ type: ../ovn-dbs.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the OVN northd service
+ value:
+ service_name: ovn_dbs
+ config_settings:
+ map_merge:
+ - get_attr: [OVNDBsBase, role_data, config_settings]
+ - tripleo::haproxy::ovn_dbs_manage_lb: false
+ tripleo::profile::pacemaker::ovn_northd::nb_db_port: {get_param: OVNNorthboundServerPort}
+ tripleo::profile::pacemaker::ovn_northd::sb_db_port: {get_param: OVNSouthboundServerPort}
+ step_config: |
+ include ::tripleo::profile::pacemaker::ovn_northd
diff --git a/puppet/services/pacemaker_remote.yaml b/puppet/services/pacemaker_remote.yaml
index 74aaf599..c49b0848 100644
--- a/puppet/services/pacemaker_remote.yaml
+++ b/puppet/services/pacemaker_remote.yaml
@@ -34,6 +34,42 @@ parameters:
MonitoringSubscriptionPacemakerRemote:
default: 'overcloud-pacemaker_remote'
type: string
+ EnableFencing:
+ default: false
+ description: Whether to enable fencing in Pacemaker or not.
+ type: boolean
+ FencingConfig:
+ default: {}
+ description: |
+ Pacemaker fencing configuration. The JSON should have
+ the following structure:
+ {
+ "devices": [
+ {
+ "agent": "AGENT_NAME",
+ "host_mac": "HOST_MAC_ADDRESS",
+ "params": {"PARAM_NAME": "PARAM_VALUE"}
+ }
+ ]
+ }
+ For instance:
+ {
+ "devices": [
+ {
+ "agent": "fence_xvm",
+ "host_mac": "52:54:00:aa:bb:cc",
+ "params": {
+ "multicast_address": "225.0.0.12",
+ "port": "baremetal_0",
+ "manage_fw": true,
+ "manage_key_file": true,
+ "key_file": "/etc/fence_xvm.key",
+ "key_file_password": "abcdef"
+ }
+ }
+ ]
+ }
+ type: json
PacemakerRemoteLoggingSource:
type: json
default:
@@ -60,6 +96,8 @@ outputs:
proto: 'tcp'
dport:
- 3121
+ tripleo::fencing::config: {get_param: FencingConfig}
+ enable_fencing: {get_param: EnableFencing}
tripleo::profile::base::pacemaker_remote::remote_authkey: {get_param: PacemakerRemoteAuthkey}
step_config: |
include ::tripleo::profile::base::pacemaker_remote
diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml
index a41e34f7..0289b7a7 100644
--- a/puppet/services/panko-api.yaml
+++ b/puppet/services/panko-api.yaml
@@ -84,8 +84,8 @@ outputs:
tripleo.panko_api.firewall_rules:
'140 panko-api':
dport:
- - 8779
- - 13779
+ - 8977
+ - 13977
panko::api::host:
str_replace:
template:
diff --git a/releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml b/releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml
new file mode 100644
index 00000000..64a4d7e7
--- /dev/null
+++ b/releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - This introduces the ManageKeystoneFernetKeys parameter, which tells
+ heat/puppet if it should replace the existing fernet keys on a stack
+ deployment or not. This is useful if the deployer wants to do key rotations
+ out of band.
diff --git a/releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml b/releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml
new file mode 100644
index 00000000..193154d0
--- /dev/null
+++ b/releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - The HAProxy stats interface can now be enabled/disabled with the
+ HAProxyStatsEnabled flag. Note that it's still enabled by default.
diff --git a/releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml b/releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml
new file mode 100644
index 00000000..1e2673f1
--- /dev/null
+++ b/releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - The KeystoneFernetKeys parameter was introduced, which is able to take any
+ amount of keys as long as it's in the right format. It's generated by the
+ same mechanism as the rest of the passwords; so it's value is also
+ available via mistral's "password" environment variable. This will also
+ allow for rotations to be made via mistral and via stack updates.
+deprecations:
+ - The individual keystone fernet key parameters (KeystoneFernetKey0 and
+ KeystoneFernetKey1) were deprecated in favor of KeystoneFernetKeys.
diff --git a/releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml b/releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml
new file mode 100644
index 00000000..73b9f9c9
--- /dev/null
+++ b/releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Add parameters to control the Cinder NAS security settings associated
+ with the NFS and NetApp Cinder back ends. The settings are disabled
+ by default.
diff --git a/releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml b/releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml
new file mode 100644
index 00000000..67a55cd8
--- /dev/null
+++ b/releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Added new DeploymentSwiftDataMap parameter, which is used to set the
+ deployment_swift_data property on the Server resoures. The parameter is a
+ map where the keys are the Heat assigned hostnames, and the value is a map
+ of the container/object name in Swift.
diff --git a/releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml b/releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml
new file mode 100644
index 00000000..cd352ac1
--- /dev/null
+++ b/releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Adds a new output, ServerOsCollectConfigData, which is the
+ os-collect-config configuration associated with each server resource.
+ This can be used to [pre]configure the os-collect-config agents on
+ deployed-server's.
diff --git a/releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml b/releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml
new file mode 100644
index 00000000..98ba86d7
--- /dev/null
+++ b/releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ When ``environments/services/ironic.yaml`` is used, enable periodic task
+ in nova-scheduler to automatically discover new nodes. Otherwise a user
+ has to run nova management command on controllers each time.
diff --git a/releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml b/releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml
new file mode 100644
index 00000000..51176426
--- /dev/null
+++ b/releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add an example role ``roles/IronicConductor.yaml`` for a node with only
+ ironic-conductor and its (i)PXE service.
diff --git a/releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml b/releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml
new file mode 100644
index 00000000..353d16db
--- /dev/null
+++ b/releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - Changed panko api port to run on 8977 instead of 8779. 8779 is reserved
+ for trove. Hence changing to avoid conflicts.
diff --git a/releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml b/releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml
new file mode 100644
index 00000000..d8fcbfec
--- /dev/null
+++ b/releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Added a custom plan-environment file for providing workflow specific
+ inputs for the derived parameters workflow.
diff --git a/releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml b/releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml
new file mode 100644
index 00000000..4cb9b801
--- /dev/null
+++ b/releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - DPDK is enabled in OvS before the NetworkDeployment to ensure DPDK
+ is ready to handle new port additions.
+upgrade:
+ - A new parameter ServiceNames is added to the PreNeworkConfig resource.
+ All templates associated with PreNeworkConfig should add this new
+ parameter during the upgrade.
diff --git a/releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml b/releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml
new file mode 100644
index 00000000..18474cf3
--- /dev/null
+++ b/releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml
@@ -0,0 +1,3 @@
+---
+fixes:
+ - Incorrect network used for Glance API service.
diff --git a/releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml b/releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml
new file mode 100644
index 00000000..25016e83
--- /dev/null
+++ b/releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix support for RPMs to be installed via DeployArtifactURLs. LP#1697102
diff --git a/releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml b/releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml
new file mode 100644
index 00000000..1fbdd1f8
--- /dev/null
+++ b/releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add basic support for **ironic-inspector** in the overcloud. It is highly
+ experimental and is not yet recommended for production use.
diff --git a/releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml b/releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml
new file mode 100644
index 00000000..4c10753a
--- /dev/null
+++ b/releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - KeystoneFernetMaxActiveKeys was introduced as a parameter to the keystone
+ profile. It sets the max_active_keys value of the keystone.conf file and
+ will subsequently be used by mistral to purge the keys in a mistral task.
diff --git a/releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml b/releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml
new file mode 100644
index 00000000..e5adb6a9
--- /dev/null
+++ b/releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Add support to configure number of sacks in gnocchi.
diff --git a/releasenotes/notes/ovn-ha-c0139ac519680872.yaml b/releasenotes/notes/ovn-ha-c0139ac519680872.yaml
new file mode 100644
index 00000000..d36f8364
--- /dev/null
+++ b/releasenotes/notes/ovn-ha-c0139ac519680872.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Support HA for OVN db servers and ovn-northd using Pacemaker.
diff --git a/releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml b/releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml
new file mode 100644
index 00000000..95e9260b
--- /dev/null
+++ b/releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - PreNetworkConfig is modified to support role-specific parameters.
+upgrade:
+ - PreNetworkConfig takes a new parameter, RoleParameters. All the templates
+ associated with PreNetworkConfig should add this new parameter during
+ upgrade.
+deprecations:
+ - Parameters {{role}}KernelArgs, {{role}}TunedProfileName and
+ {{role}}HostCpusList are deprecated. Alternatively, role-specific
+ parameter support has been added with the same names.
diff --git a/releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml b/releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml
new file mode 100644
index 00000000..1e44d926
--- /dev/null
+++ b/releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - Adds common openvswitch service template to be
+ inherited by other services.
+ - Adds environment file to be used for deploying
+ OpenDaylight + OVS DPDK.
+ - Adds first boot and ovs configuration scripts
+deprecations:
+ - The ``HostCpusList`` parameter is deprecated in
+ favor of ``OvsDpdkCoreList`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkCoreList`` parameter is deprecated in
+ favor of ``OvsPmdCoreList`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkMemoryChannels`` parameter is deprecated in
+ favor of ``OvsDpdkMemoryChannels`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkSocketMemory`` parameter is deprecated in
+ favor of ``OvsDpdkSocketMemory`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkDriverType`` parameter is deprecated in
+ favor of ``OvsDpdkDriverType`` and will be removed
+ in a future release.
diff --git a/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml b/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml
new file mode 100644
index 00000000..cf99ec5d
--- /dev/null
+++ b/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ It is now possible to trigger Mistral workflows or workflow actions
+ before a deployment step is applied. This can be defined within the
+ scope of a service template and is described as a task property
+ for the Heat OS::Mistral::Workflow resource, for more details also
+ see the puppet/services/README.rst file. \ No newline at end of file
diff --git a/releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml b/releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml
new file mode 100644
index 00000000..1bc99371
--- /dev/null
+++ b/releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - Add 2 new example environments to facilitate deploying split-stack,
+ environments/overcloud-baremetal.j2.yaml and
+ environments/overcloud-services.yaml. The environments are used to deploy two
+ separate Heat stacks, one for just the baremetal+network configuration and one
+ for the service configuration.
diff --git a/releasenotes/notes/vipmap-output-4a9ce99930960346.yaml b/releasenotes/notes/vipmap-output-4a9ce99930960346.yaml
new file mode 100644
index 00000000..1f49bacd
--- /dev/null
+++ b/releasenotes/notes/vipmap-output-4a9ce99930960346.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Add VipMap output to the top level stack output. VipMap is a mapping from
+ each network to the VIP address on that network. Also includes the Redis
+ VIP.
diff --git a/roles/BlockStorage.yaml b/roles/BlockStorage.yaml
index d242a5bb..b0117400 100644
--- a/roles/BlockStorage.yaml
+++ b/roles/BlockStorage.yaml
@@ -4,6 +4,10 @@
- name: BlockStorage
description: |
Cinder Block Storage node role
+ networks:
+ - InternalApi
+ - Storage
+ - StorageMgmt
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::BlockStorageCinderVolume
@@ -12,6 +16,7 @@
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Docker
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::Ntp
diff --git a/roles/CephStorage.yaml b/roles/CephStorage.yaml
index d3de6bae..647c4d5a 100644
--- a/roles/CephStorage.yaml
+++ b/roles/CephStorage.yaml
@@ -4,6 +4,9 @@
- name: CephStorage
description: |
Ceph OSD Storage node role
+ networks:
+ - Storage
+ - StorageMgmt
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::CACerts
diff --git a/roles/Compute.yaml b/roles/Compute.yaml
index 73ec6595..75a6f608 100644
--- a/roles/Compute.yaml
+++ b/roles/Compute.yaml
@@ -5,6 +5,10 @@
description: |
Basic Compute Node role
CountDefault: 1
+ networks:
+ - InternalApi
+ - Tenant
+ - Storage
HostnameFormatDefault: '%stackname%-novacompute-%index%'
disable_upgrade_deployment: True
ServicesDefault:
@@ -21,6 +25,7 @@
- OS::TripleO::Services::ComputeNeutronOvsAgent
- OS::TripleO::Services::Docker
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronLinuxbridgeAgent
diff --git a/roles/Controller.yaml b/roles/Controller.yaml
index 7511d4c0..b0a13138 100644
--- a/roles/Controller.yaml
+++ b/roles/Controller.yaml
@@ -9,6 +9,12 @@
tags:
- primary
- controller
+ networks:
+ - External
+ - InternalApi
+ - Storage
+ - StorageMgmt
+ - Tenant
HostnameFormatDefault: '%stackname%-controller-%index%'
ServicesDefault:
- OS::TripleO::Services::AodhApi
@@ -57,6 +63,7 @@
- OS::TripleO::Services::Horizon
- OS::TripleO::Services::IronicApi
- OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/roles/ControllerOpenstack.yaml b/roles/ControllerOpenstack.yaml
index 2d1702e8..6cf2120e 100644
--- a/roles/ControllerOpenstack.yaml
+++ b/roles/ControllerOpenstack.yaml
@@ -9,6 +9,12 @@
tags:
- primary
- controller
+ networks:
+ - External
+ - InternalApi
+ - Storage
+ - StorageMgmt
+ - Tenant
HostnameFormatDefault: '%stackname%-controller-%index%'
ServicesDefault:
- OS::TripleO::Services::AodhApi
diff --git a/roles/Database.yaml b/roles/Database.yaml
index 3ef751a7..75b26a8c 100644
--- a/roles/Database.yaml
+++ b/roles/Database.yaml
@@ -4,6 +4,8 @@
- name: Database
description: |
Standalone database role with the database being managed via Pacemaker
+ networks:
+ - InternalApi
HostnameFormatDefault: '%stackname%-database-%index%'
ServicesDefault:
- OS::TripleO::Services::AuditD
diff --git a/roles/IronicConductor.yaml b/roles/IronicConductor.yaml
new file mode 100644
index 00000000..8a29b337
--- /dev/null
+++ b/roles/IronicConductor.yaml
@@ -0,0 +1,21 @@
+###############################################################################
+# Role: IronicConductor #
+###############################################################################
+- name: IronicConductor
+ description: |
+ Ironic Conductor node role
+ HostnameFormatDefault: '%stackname%-ironic-%index%'
+ ServicesDefault:
+ - OS::TripleO::Services::AuditD
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Collectd
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::TripleoPackages
diff --git a/roles/Messaging.yaml b/roles/Messaging.yaml
index cbef61ab..5b06063f 100644
--- a/roles/Messaging.yaml
+++ b/roles/Messaging.yaml
@@ -4,6 +4,8 @@
- name: Messaging
description: |
Standalone messaging role with RabbitMQ being managed via Pacemaker
+ networks:
+ - InternalApi
HostnameFormatDefault: '%stackname%-messaging-%index%'
ServicesDefault:
- OS::TripleO::Services::AuditD
diff --git a/roles/Networker.yaml b/roles/Networker.yaml
index b393fa7b..a28eaa63 100644
--- a/roles/Networker.yaml
+++ b/roles/Networker.yaml
@@ -5,6 +5,8 @@
description: |
Standalone networking role to run Neutron services their own. Includes
Pacemaker integration via PacemakerRemote
+ networks:
+ - InternalApi
HostnameFormatDefault: '%stackname%-networker-%index%'
ServicesDefault:
- OS::TripleO::Services::AuditD
diff --git a/roles/ObjectStorage.yaml b/roles/ObjectStorage.yaml
index 3741ca66..27dc1233 100644
--- a/roles/ObjectStorage.yaml
+++ b/roles/ObjectStorage.yaml
@@ -4,6 +4,10 @@
- name: ObjectStorage
description: |
Swift Object Storage node role
+ networks:
+ - InternalApi
+ - Storage
+ - StorageMgmt
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::AuditD
diff --git a/roles/README.rst b/roles/README.rst
index 6c742332..cd1fcb47 100644
--- a/roles/README.rst
+++ b/roles/README.rst
@@ -58,6 +58,10 @@ Role Options
* description: (string) as few sentences describing the role and information
pertaining to the usage of the role.
+ * networks: (list), optional list of networks which the role will have
+ access to when network isolation is enabled. The names should match
+ those defined in network_data.yaml.
+
Working with Roles
==================
The tripleoclient provides a series of commands that can be used to view
diff --git a/roles/Telemetry.yaml b/roles/Telemetry.yaml
index 0f60364b..d23ab6e3 100644
--- a/roles/Telemetry.yaml
+++ b/roles/Telemetry.yaml
@@ -4,6 +4,8 @@
- name: Telemetry
description: |
Telemetry role that has all the telemetry services.
+ networks:
+ - InternalApi
HostnameFormatDefault: '%stackname%-telemetry-%index%'
ServicesDefault:
- OS::TripleO::Services::AodhApi
diff --git a/roles/Undercloud.yaml b/roles/Undercloud.yaml
index 0a9bcadf..bcdedc71 100644
--- a/roles/Undercloud.yaml
+++ b/roles/Undercloud.yaml
@@ -19,6 +19,7 @@
- OS::TripleO::Services::IronicApi
- OS::TripleO::Services::IronicConductor
- OS::TripleO::Services::IronicPxe
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::MistralApi
diff --git a/roles_data.yaml b/roles_data.yaml
index c536e834..f96e5625 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -12,6 +12,12 @@
tags:
- primary
- controller
+ networks:
+ - External
+ - InternalApi
+ - Storage
+ - StorageMgmt
+ - Tenant
HostnameFormatDefault: '%stackname%-controller-%index%'
ServicesDefault:
- OS::TripleO::Services::AodhApi
@@ -60,6 +66,7 @@
- OS::TripleO::Services::Horizon
- OS::TripleO::Services::IronicApi
- OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
@@ -128,6 +135,10 @@
description: |
Basic Compute Node role
CountDefault: 1
+ networks:
+ - InternalApi
+ - Tenant
+ - Storage
HostnameFormatDefault: '%stackname%-novacompute-%index%'
disable_upgrade_deployment: True
ServicesDefault:
@@ -144,6 +155,7 @@
- OS::TripleO::Services::ComputeNeutronOvsAgent
- OS::TripleO::Services::Docker
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronLinuxbridgeAgent
@@ -167,6 +179,10 @@
- name: BlockStorage
description: |
Cinder Block Storage node role
+ networks:
+ - InternalApi
+ - Storage
+ - StorageMgmt
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::BlockStorageCinderVolume
@@ -175,6 +191,7 @@
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Docker
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::Ntp
@@ -191,6 +208,10 @@
- name: ObjectStorage
description: |
Swift Object Storage node role
+ networks:
+ - InternalApi
+ - Storage
+ - StorageMgmt
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::AuditD
@@ -217,6 +238,9 @@
- name: CephStorage
description: |
Ceph OSD Storage node role
+ networks:
+ - Storage
+ - StorageMgmt
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::CACerts
diff --git a/roles_data_undercloud.yaml b/roles_data_undercloud.yaml
index ad760fd6..783df91d 100644
--- a/roles_data_undercloud.yaml
+++ b/roles_data_undercloud.yaml
@@ -21,7 +21,9 @@
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::IronicApi
- OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::IronicInspector
- OS::TripleO::Services::IronicPxe
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::MistralApi
@@ -50,6 +52,7 @@
- OS::TripleO::Services::UndercloudAodhListener
- OS::TripleO::Services::UndercloudAodhNotifier
- OS::TripleO::Services::UndercloudCeilometerAgentCentral
+ - OS::TripleO::Services::UndercloudCeilometerAgentIpmi
- OS::TripleO::Services::UndercloudCeilometerAgentNotification
- OS::TripleO::Services::UndercloudGnocchiApi
- OS::TripleO::Services::UndercloudGnocchiMetricd
diff --git a/sample-env-generator/README.rst b/sample-env-generator/README.rst
new file mode 100644
index 00000000..32e94f98
--- /dev/null
+++ b/sample-env-generator/README.rst
@@ -0,0 +1,160 @@
+Sample Environment Generator
+----------------------------
+
+This is a tool to automate the generation of our sample environment
+files. It takes a yaml file as input, and based on the environments
+defined in that file generates a number of sample environment files
+from the parameters in the Heat templates.
+
+Usage
+=====
+
+The simplest case is when an existing sample environment needs to be
+updated to reflect changes in the templates. Use the tox ``genconfig``
+target to do this::
+
+ tox -e genconfig
+
+.. note:: The tool should be run from the root directory of the
+ ``tripleo-heat-templates`` project.
+
+If a new sample environment is needed, it should be added to the
+appropriate file in the ``sample-env-generator/`` directory. The existing
+entries in the files can be used as examples, and a more detailed
+explanation of the different available keys is below:
+
+Top-level:
+- **environments**: This is the top-level key in the file. All other keys
+ below should appear in a list of dictionaries that define environments.
+
+Environment-specific:
+- **name**: the output file will be this name + .yaml, in the
+ ``environments`` directory.
+- **title**: a human-readable title for the environment.
+- **description**: A description of the environment. Will be included
+ as a comment at the top of the sample file.
+- **files**: The Heat templates containing the parameter definitions
+ for the environment. Should be specified as a path relative to the
+ root of the ``tripleo-heat-templates`` project. For example:
+ ``puppet/extraconfig/tls/tls-cert-inject.yaml:``. Each filename
+ should be a YAML dictionary that contains a ``parameters`` entry.
+- **parameters**: There should be one ``parameters`` entry per file in the
+ ``files`` section (see the example configuration below).
+ This can be either a list of parameters related to
+ the environment, which is necessary for templates like
+ overcloud.yaml, or the string 'all', which indicates that all
+ parameters from the file should be included.
+- **static**: Can be used to specify that certain parameters must
+ not be changed. Examples would be the EnableSomething params
+ in the templates. When writing a sample config for Something,
+ ``EnableSomething: True`` would be a static param, since it
+ would be nonsense to include the environment with it set to any other
+ value.
+- **sample_values**: Sometimes it is useful to include a sample value
+ for a parameter that is not the parameter's actual default.
+ An example of this is the SSLCertificate param in the enable-tls
+ environment file.
+- **resource_registry**: Many environments also need to pass
+ resource_registry entries when they are used. This can be used
+ to specify that in the configuration file.
+- **children**: For environments that share a lot of common values but may
+ need minor variations for different use cases, sample environment entries
+ can be nested. ``children`` takes a list of environments with the same
+ structure as the top-level ``environments`` key. The main difference is
+ that all keys are optional, and any that are omitted will be inherited from
+ the parent environment definition.
+
+Some behavioral notes:
+
+- Parameters without default values will be marked as mandatory to indicate
+ that the user must set a value for them.
+- It is no longer recommended to set parameters using the ``parameters``
+ section. Instead, all parameters should be set as ``parameter_defaults``
+ which will work regardless of whether the parameter is top-level or nested.
+ Therefore, the tool will always set parameters in the ``parameter_defaults``
+ section.
+- Parameters whose name begins with the _ character are treated as private.
+ This indicates that the parameter value will be passed in from another
+ template and does not need to be exposed directly to the user.
+
+If adding a new environment, don't forget to add the new file to the
+git repository so it will be included with the review.
+
+Example
+=======
+
+Given a Heat template named ``example.yaml`` that looks like::
+
+ parameters:
+ EnableExample:
+ default: False
+ description: Enable the example feature
+ type: boolean
+ ParamOne:
+ default: one
+ description: First example param
+ type: string
+ ParamTwo:
+ description: Second example param
+ type: number
+ _PrivateParam:
+ default: does not matter
+ description: Will not show up
+ type: string
+
+And an environment generator entry that looks like::
+
+ environments:
+ -
+ name: example
+ title: Example Environment
+ description: |
+ An example environment demonstrating how to use the sample
+ environment generator. This text will be included at the top
+ of the generated file as a comment.
+ files:
+ example.yaml:
+ parameters: all
+ sample_values:
+ EnableExample: True
+ static:
+ - EnableExample
+ resource_registry:
+ OS::TripleO::ExampleData: ../extraconfig/example.yaml
+
+The generated environment file would look like::
+
+ # *******************************************************************
+ # This file was created automatically by the sample environment
+ # generator. Developers should use `tox -e genconfig` to update it.
+ # Users are recommended to make changes to a copy of the file instead
+ # of the original, if any customizations are needed.
+ # *******************************************************************
+ # title: Example Environment
+ # description: |
+ # An example environment demonstrating how to use the sample
+ # environment generator. This text will be included at the top
+ # of the generated file as a comment.
+ parameter_defaults:
+ # First example param
+ # Type: string
+ ParamOne: one
+
+ # Second example param
+ # Mandatory. This parameter must be set by the user.
+ # Type: number
+ ParamTwo: <None>
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # Enable the example feature
+ # Type: boolean
+ EnableExample: True
+
+ # *********************
+ # End static parameters
+ # *********************
+ resource_registry:
+ OS::TripleO::ExampleData: ../extraconfig/example.yaml
diff --git a/sample-env-generator/networking.yaml b/sample-env-generator/networking.yaml
new file mode 100644
index 00000000..ea7042b4
--- /dev/null
+++ b/sample-env-generator/networking.yaml
@@ -0,0 +1,32 @@
+environments:
+ -
+ name: networking/neutron-midonet
+ title: Enable the Neutron MidoNet Services
+ description: A Heat environment that can be used to deploy MidoNet Services
+ files:
+ puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml:
+ parameters: all
+ puppet/services/neutron-base.yaml:
+ parameters:
+ - NeutronCorePlugin
+ puppet/services/neutron-dhcp.yaml:
+ parameters:
+ - NeutronEnableIsolatedMetadata
+ sample_values:
+ NeutronCorePlugin: 'midonet.neutron.plugin_v1.MidonetPluginV2'
+ NeutronEnableIsolatedMetadata: true
+ EnableZookeeperOnController: true
+ EnableCassandraOnController: true
+ static:
+ - NeutronCorePlugin
+ - NeutronEnableIsolatedMetadata
+ - EnableZookeeperOnController
+ - EnableCassandraOnController
+ resource_registry:
+ OS::TripleO::AllNodesExtraConfig: ../../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../../net-config-linux-bridge.yaml
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginMidonet
+ OS::TripleO::Services::ComputeNeutronCorePlugin: ../../puppet/services/neutron-compute-plugin-midonet.yaml
diff --git a/sample-env-generator/predictable-placement.yaml b/sample-env-generator/predictable-placement.yaml
new file mode 100644
index 00000000..ffda7aca
--- /dev/null
+++ b/sample-env-generator/predictable-placement.yaml
@@ -0,0 +1,17 @@
+environments:
+ -
+ name: predictable-placement/custom-hostnames
+ title: Custom Hostnames
+ files:
+ overcloud.yaml:
+ parameters:
+ - ControllerHostnameFormat
+ - ComputeHostnameFormat
+ - BlockStorageHostnameFormat
+ - ObjectStorageHostnameFormat
+ - CephStorageHostnameFormat
+ description: |
+ Hostname format for each role
+ Note %index% is translated into the index of the node, e.g 0/1/2 etc
+ and %stackname% is replaced with OS::stack_name in the template below.
+ If you want to use the heat generated names, pass '' (empty string).
diff --git a/sample-env-generator/ssl.yaml b/sample-env-generator/ssl.yaml
new file mode 100644
index 00000000..6963e842
--- /dev/null
+++ b/sample-env-generator/ssl.yaml
@@ -0,0 +1,459 @@
+environments:
+ -
+ name: ssl/enable-tls
+ title: Enable SSL on OpenStack Public Endpoints
+ description: |
+ Use this environment to pass in certificates for SSL deployments.
+ For these values to take effect, one of the tls-endpoints-*.yaml environments
+ must also be used.
+ files:
+ puppet/extraconfig/tls/tls-cert-inject.yaml:
+ parameters: all
+ static:
+ # This should probably be private, but for testing static params I'm
+ # setting it as such for now.
+ - DeployedSSLCertificatePath
+ sample_values:
+ SSLCertificate: |-
+ |
+ The contents of your certificate go here
+ SSLKey: |-
+ |
+ The contents of the private key go here
+ resource_registry:
+ OS::TripleO::NodeTLSData: ../../puppet/extraconfig/tls/tls-cert-inject.yaml
+ - name: ssl/inject-trust-anchor
+ title: Inject SSL Trust Anchor on Overcloud Nodes
+ description: |
+ When using an SSL certificate signed by a CA that is not in the default
+ list of CAs, this environment allows adding a custom CA certificate to
+ the overcloud nodes.
+ files:
+ puppet/extraconfig/tls/ca-inject.yaml:
+ parameters:
+ - SSLRootCertificate
+ sample_values:
+ SSLRootCertificate: |-
+ |
+ The contents of your certificate go here
+ resource_registry:
+ OS::TripleO::NodeTLSCAData: ../../puppet/extraconfig/tls/ca-inject.yaml
+ children:
+ - name: ssl/inject-trust-anchor-hiera
+ files:
+ puppet/services/ca-certs.yaml:
+ parameters:
+ - CAMap
+ # Need to clear this so we don't inherit the parent registry
+ resource_registry: {}
+ sample_values:
+ CAMap: |-2
+
+ first-ca-name:
+ content: |
+ The content of the CA cert goes here
+ second-ca-name:
+ content: |
+ The content of the CA cert goes here
+ -
+ name: ssl/tls-endpoints-public-ip
+ title: Deploy Public SSL Endpoints as IP Addresses
+ description: |
+ Use this environment when deploying an SSL-enabled overcloud where the public
+ endpoint is an IP address.
+ files:
+ network/endpoints/endpoint_map.yaml:
+ parameters:
+ - EndpointMap
+ sample_values:
+ # NOTE(bnemec): This is a bit odd, but it's the only way I've found that
+ # works. The |-2 tells YAML to strip two spaces off the indentation of
+ # the value, which because it's indented six spaces gets us to the four
+ # that we actually want. Note that zero is not a valid value here, so
+ # two seemed like the most sane option.
+ EndpointMap: |-2
+
+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'IP_ADDRESS'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'IP_ADDRESS'}
+ CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+ CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'}
+ CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'IP_ADDRESS'}
+ GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'IP_ADDRESS'}
+ GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'IP_ADDRESS'}
+ HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'IP_ADDRESS'}
+ HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'IP_ADDRESS'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'IP_ADDRESS'}
+ IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'IP_ADDRESS'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'IP_ADDRESS'}
+ KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+ KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'IP_ADDRESS'}
+ ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'IP_ADDRESS'}
+ MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+ NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'IP_ADDRESS'}
+ NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'IP_ADDRESS'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'IP_ADDRESS'}
+ NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'IP_ADDRESS'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
+ PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
+ SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
+ SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+ TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+ ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'IP_ADDRESS'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'IP_ADDRESS'}
+ -
+ name: ssl/tls-endpoints-public-dns
+ title: Deploy Public SSL Endpoints as DNS Names
+ description: |
+ Use this environment when deploying an SSL-enabled overcloud where the public
+ endpoint is a DNS name.
+ files:
+ network/endpoints/endpoint_map.yaml:
+ parameters:
+ - EndpointMap
+ sample_values:
+ # NOTE(bnemec): This is a bit odd, but it's the only way I've found that
+ # works. The |-2 tells YAML to strip two spaces off the indentation of
+ # the value, which because it's indented six spaces gets us to the four
+ # that we actually want. Note that zero is not a valid value here, so
+ # two seemed like the most sane option.
+ EndpointMap: |-2
+
+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+ CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+ GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+ GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+ HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+ IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+ KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+ KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+ ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+ MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+ NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+ NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+ NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+ SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ -
+ name: ssl/tls-everywhere-endpoints-dns
+ title: Deploy All SSL Endpoints as DNS Names
+ description: |
+ Use this environment when deploying an overcloud where all the endpoints are
+ DNS names and there's TLS in all endpoint types.
+ files:
+ network/endpoints/endpoint_map.yaml:
+ parameters:
+ - EndpointMap
+ sample_values:
+ # NOTE(bnemec): This is a bit odd, but it's the only way I've found that
+ # works. The |-2 tells YAML to strip two spaces off the indentation of
+ # the value, which because it's indented six spaces gets us to the four
+ # that we actually want. Note that zero is not a valid value here, so
+ # two seemed like the most sane option.
+ EndpointMap: |-2
+
+ AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+ AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+ CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+ CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ CephRgwInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ CinderAdmin: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+ CinderInternal: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ CongressAdmin: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+ CongressInternal: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiInternal: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+ GlanceAdmin: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+ GlanceInternal: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+ GnocchiAdmin: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+ GnocchiInternal: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+ HeatAdmin: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+ HeatInternal: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HeatCfnAdmin: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+ HeatCfnInternal: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+ IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+ IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+ KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
+ KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+ ManilaAdmin: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+ ManilaInternal: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+ MistralAdmin: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+ MistralInternal: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'CLOUDNAME'}
+ NeutronAdmin: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+ NeutronInternal: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+ NovaAdmin: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+ NovaInternal: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementInternal: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+ NovaVNCProxyAdmin: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+ NovaVNCProxyInternal: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+ PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+ SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+ SwiftAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ SwiftInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ TackerAdmin: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+ TackerInternal: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ ZaqarAdmin: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+ ZaqarInternal: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketInternal: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
diff --git a/sample-env-generator/storage.yaml b/sample-env-generator/storage.yaml
new file mode 100644
index 00000000..aa0385cc
--- /dev/null
+++ b/sample-env-generator/storage.yaml
@@ -0,0 +1,133 @@
+environments:
+ -
+ name: storage/enable-ceph
+ title: Enable Ceph Storage Backend
+ files:
+ puppet/services/cinder-volume.yaml:
+ parameters:
+ - CinderEnableIscsiBackend
+ - CinderEnableRbdBackend
+ puppet/services/cinder-backup.yaml:
+ parameters:
+ - CinderBackupBackend
+ puppet/services/nova-compute.yaml:
+ parameters:
+ - NovaEnableRbdBackend
+ puppet/services/glance-api.yaml:
+ parameters:
+ - GlanceBackend
+ puppet/services/gnocchi-api.yaml:
+ parameters:
+ - GnocchiBackend
+ sample_values:
+ CinderEnableIscsiBackend: False
+ CinderEnableRbdBackend: True
+ CinderBackupBackend: rbd
+ NovaEnableRbdBackend: True
+ GlanceBackend: rbd
+ GnocchiBackend: rbd
+ description: |
+ Include this environment to enable Ceph as the backend for
+ Cinder, Nova, Gnocchi, and Glance.
+ -
+ name: storage/cinder-nfs
+ title: Enable Cinder NFS Backend
+ files:
+ puppet/services/cinder-volume.yaml:
+ parameters:
+ - CinderNfsMountOptions
+ - CinderNfsServers
+ - CinderEnableNfsBackend
+ - CinderEnableIscsiBackend
+ sample_values:
+ CinderEnableNfsBackend: True
+ CinderEnableIscsiBackend: False
+ CinderNfsServers: '192.168.122.1:/export/cinder'
+ description: |
+ Configure and include this environment to enable the use of an NFS
+ share as the backend for Cinder.
+ -
+ name: storage/glance-nfs
+ title: Enable Glance NFS Backend
+ files:
+ puppet/services/glance-api.yaml:
+ parameters:
+ - GlanceBackend
+ - GlanceNfsEnabled
+ - GlanceNfsShare
+ - GlanceNfsOptions
+ sample_values:
+ GlanceBackend: file
+ GlanceNfsEnabled: True
+ static:
+ - GlanceBackend
+ - GlanceNfsEnabled
+ description: |
+ Configure and include this environment to enable the use of an NFS
+ share as the backend for Glance.
+ -
+ name: storage/external-ceph
+ title: Deploy Using an External Ceph Cluster
+ files:
+ puppet/services/nova-compute.yaml:
+ parameters:
+ - NovaRbdPoolName
+ - NovaEnableRbdBackend
+ - CephClientUserName
+ puppet/services/cinder-volume.yaml:
+ parameters:
+ - CinderRbdPoolName
+ - CinderEnableIscsiBackend
+ - CinderEnableRbdBackend
+ puppet/services/glance-api.yaml:
+ parameters:
+ - GlanceRbdPoolName
+ - GlanceBackend
+ puppet/services/gnocchi-api.yaml:
+ parameters:
+ - GnocchiBackend
+ puppet/services/gnocchi-base.yaml:
+ parameters:
+ - GnocchiRbdPoolName
+ puppet/services/ceph-external.yaml:
+ parameters:
+ - CephClusterFSID
+ - CephClientKey
+ - CephExternalMonHost
+ - RbdDefaultFeatures
+ puppet/services/ceph-base.yaml:
+ parameters:
+ - CephAdminKey
+ sample_values:
+ CinderEnableIscsiBackend: False
+ CinderEnableRbdBackend: True
+ NovaEnableRbdBackend: True
+ GlanceBackend: rbd
+ GnocchiBackend: rbd
+ NovaRbdPoolName: vms
+ CinderRbdPoolName: volumes
+ GlanceRbdPoolName: images
+ GnocchiRbdPoolName: metrics
+ CephClientUserName: openstack
+ CephAdminKey: ''
+ description: |
+ A Heat environment file which can be used to enable the
+ use of an externally managed Ceph cluster.
+ resource_registry:
+ OS::TripleO::Services::CephExternal: ../../puppet/services/ceph-external.yaml
+ OS::TripleO::Services::CephMon: OS::Heat::None
+ OS::TripleO::Services::CephClient: OS::Heat::None
+ OS::TripleO::Services::CephOSD: OS::Heat::None
+ -
+ name: storage/cinder-netapp-config
+ title: Enable the Cinder NetApp Backend
+ description: |
+ A Heat environment file which can be used to enable a
+ a Cinder NetApp backend, configured via puppet
+ files:
+ puppet/services/cinder-backend-netapp.yaml:
+ parameters: all
+ static:
+ - CinderEnableNetappBackend
+ resource_registry:
+ OS::TripleO::ControllerExtraConfigPre: ../../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
diff --git a/puppet/services/services.yaml b/services.yaml
index 0e7b6d2b..4d3ca8d1 100644
--- a/puppet/services/services.yaml
+++ b/services.yaml
@@ -1,3 +1,4 @@
+#FIXME move into common when specfile adds it
heat_template_version: pike
description: >
@@ -115,6 +116,10 @@ outputs:
yaql:
expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
data: {role_data: {get_attr: [ServiceChain, role_data]}}
+ service_workflow_tasks:
+ yaql:
+ expression: $.data.role_data.where($ != null).select($.get('service_workflow_tasks')).where($ != null).reduce($1.mergeWith($2), {})
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
step_config: {get_attr: [ServiceChain, role_data, step_config]}
upgrade_tasks:
yaql:
@@ -127,3 +132,17 @@ outputs:
expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
+
+ # Keys to support docker/services
+ puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
+ kolla_config:
+ map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
+ docker_config:
+ {get_attr: [ServiceChain, role_data, docker_config]}
+ docker_puppet_tasks:
+ {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
+ host_prep_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks
+ expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
diff --git a/test-requirements.txt b/test-requirements.txt
index 76f03d75..81136356 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -7,3 +7,11 @@ six>=1.9.0 # MIT
sphinx!=1.6.1,>=1.5.1 # BSD
oslosphinx>=4.7.0 # Apache-2.0
reno!=2.3.1,>=1.8.0 # Apache-2.0
+coverage!=4.4,>=4.0 # Apache-2.0
+fixtures>=3.0.0 # Apache-2.0/BSD
+python-subunit>=0.0.18 # Apache-2.0/BSD
+testrepository>=0.0.18 # Apache-2.0/BSD
+testscenarios>=0.4 # Apache-2.0/BSD
+testtools>=1.4.0 # MIT
+mock>=2.0 # BSD
+oslotest>=1.10.0 # Apache-2.0
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index 92d76d23..396998a0 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -20,8 +20,15 @@ import yaml
required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords',
'RoleName', 'RoleParameters']
+# NOTE(bnemec): The duplication in this list is intentional. For the
+# transition to generated environments we have two copies of these files,
+# so they need to be listed twice. Once the deprecated version can be removed
+# the duplicate entries can be as well.
envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
'tls-endpoints-public-ip.yaml',
+ 'tls-everywhere-endpoints-dns.yaml',
+ 'tls-endpoints-public-dns.yaml',
+ 'tls-endpoints-public-ip.yaml',
'tls-everywhere-endpoints-dns.yaml']
ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
@@ -32,6 +39,19 @@ OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
'config_image']
OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags' ]
+# Mapping of parameter names to a list of the fields we should _not_ enforce
+# consistency across files on. This should only contain parameters whose
+# definition we cannot change for backwards compatibility reasons. New
+# parameters to the templates should not be added to this list.
+PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
+ 'ManagementAllocationPools': ['default'],
+ 'ExternalNetCidr': ['default'],
+ 'ExternalAllocationPools': ['default'],
+ 'StorageNetCidr': ['default'],
+ 'StorageAllocationPools': ['default'],
+ 'StorageMgmtNetCidr': ['default'],
+ 'StorageMgmtAllocationPools': ['default'],
+ }
def exit_usage():
@@ -204,7 +224,30 @@ def validate_service(filename, tpl):
return 0
-def validate(filename):
+def validate(filename, param_map):
+ """Validate a Heat template
+
+ :param filename: The path to the file to validate
+ :param param_map: A dict which will be populated with the details of the
+ parameters in the template. The dict will have the
+ following structure:
+
+ {'ParameterName': [
+ {'filename': ./file1.yaml,
+ 'data': {'description': '',
+ 'type': string,
+ 'default': '',
+ ...}
+ },
+ {'filename': ./file2.yaml,
+ 'data': {'description': '',
+ 'type': string,
+ 'default': '',
+ ...}
+ },
+ ...
+ ]}
+ """
print('Validating %s' % filename)
retval = 0
try:
@@ -219,12 +262,10 @@ def validate(filename):
# qdr aliases rabbitmq service to provide alternative messaging backend
if (filename.startswith('./puppet/services/') and
- filename not in ['./puppet/services/services.yaml',
- './puppet/services/qdr.yaml']):
+ filename not in ['./puppet/services/qdr.yaml']):
retval = validate_service(filename, tpl)
- if (filename.startswith('./docker/services/') and
- filename != './docker/services/services.yaml'):
+ if filename.startswith('./docker/services/'):
retval = validate_docker_service(filename, tpl)
if filename.endswith('hyperconverged-ceph.yaml'):
@@ -235,7 +276,9 @@ def validate(filename):
return 1
# yaml is OK, now walk the parameters and output a warning for unused ones
if 'heat_template_version' in tpl:
- for p in tpl.get('parameters', {}):
+ for p, data in tpl.get('parameters', {}).items():
+ definition = {'data': data, 'filename': filename}
+ param_map.setdefault(p, []).append(definition)
if p in required_params:
continue
str_p = '\'%s\'' % p
@@ -255,6 +298,7 @@ exit_val = 0
failed_files = []
base_endpoint_map = None
env_endpoint_maps = list()
+param_map = {}
for base_path in path_args:
if os.path.isdir(base_path):
@@ -262,7 +306,7 @@ for base_path in path_args:
for f in files:
if f.endswith('.yaml') and not f.endswith('.j2.yaml'):
file_path = os.path.join(subdir, f)
- failed = validate(file_path)
+ failed = validate(file_path, param_map)
if failed:
failed_files.append(file_path)
exit_val |= failed
@@ -273,7 +317,7 @@ for base_path in path_args:
if env_endpoint_map:
env_endpoint_maps.append(env_endpoint_map)
elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
- failed = validate(base_path)
+ failed = validate(base_path, param_map)
if failed:
failed_files.append(base_path)
exit_val |= failed
@@ -294,9 +338,9 @@ if base_endpoint_map and \
else:
print("%s matches base endpoint map" % env_endpoint_map['file'])
else:
- print("ERROR: Can't validate endpoint maps since a file is missing. "
- "If you meant to delete one of these files you should update this "
- "tool as well.")
+ print("ERROR: Did not find expected number of environments containing the "
+ "EndpointMap parameter. If you meant to add or remove one of these "
+ "environments then you also need to update this tool.")
if not base_endpoint_map:
failed_files.append(ENDPOINT_MAP_FILE)
if len(env_endpoint_maps) != len(envs_containing_endpoint_map):
@@ -305,6 +349,34 @@ else:
failed_files.extend(set(envs_containing_endpoint_map) - matched_files)
exit_val |= 1
+# Validate that duplicate parameters defined in multiple files all have the
+# same definition.
+mismatch_count = 0
+for p, defs in param_map.items():
+ # Nothing to validate if the parameter is only defined once
+ if len(defs) == 1:
+ continue
+ check_data = [d['data'] for d in defs]
+ # Override excluded fields so they don't affect the result
+ exclusions = PARAMETER_DEFINITION_EXCLUSIONS.get(p, [])
+ ex_dict = {}
+ for field in exclusions:
+ ex_dict[field] = 'IGNORED'
+ for d in check_data:
+ d.update(ex_dict)
+ # If all items in the list are not == the first, then the check fails
+ if check_data.count(check_data[0]) != len(check_data):
+ mismatch_count += 1
+ # TODO(bnemec): Make this a hard failure once all the templates have
+ # been fixed.
+ #exit_val |= 1
+ #failed_files.extend([d['filename'] for d in defs])
+ print('Mismatched parameter definitions found for "%s"' % p)
+ print('Definitions found:')
+ for d in defs:
+ print(' %s:\n %s' % (d['filename'], d['data']))
+print('Mismatched parameter definitions: %d' % mismatch_count)
+
if failed_files:
print('Validation failed on:')
for f in failed_files:
diff --git a/tox.ini b/tox.ini
index b92e5456..c87bf7be 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,12 +1,14 @@
[tox]
minversion = 1.6
skipsdist = True
+envlist = py35,py27,pep8
[testenv]
usedevelop = True
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
+commands = python setup.py testr --slowest --testr-args='{posargs}'
[testenv:venv]
commands = {posargs}
@@ -22,3 +24,11 @@ commands = python ./tools/process-templates.py
[testenv:releasenotes]
commands = bash -c tools/releasenotes_tox.sh
+
+[testenv:cover]
+commands = python setup.py test --coverage --coverage-package-name=tripleo_heat_templates --testr-args='{posargs}'
+
+[testenv:genconfig]
+commands =
+ python ./tools/process-templates.py
+ python ./tripleo_heat_templates/environment_generator.py sample-env-generator/
diff --git a/tripleo_heat_templates/__init__.py b/tripleo_heat_templates/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tripleo_heat_templates/__init__.py
diff --git a/tripleo_heat_templates/environment_generator.py b/tripleo_heat_templates/environment_generator.py
new file mode 100755
index 00000000..876dd854
--- /dev/null
+++ b/tripleo_heat_templates/environment_generator.py
@@ -0,0 +1,212 @@
+#!/usr/bin/env python
+
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import os
+import sys
+import yaml
+
+
+_PARAM_FORMAT = u""" # %(description)s
+ %(mandatory)s# Type: %(type)s
+ %(name)s:%(default)s
+"""
+_STATIC_MESSAGE_START = (
+ ' # ******************************************************\n'
+ ' # Static parameters - these are values that must be\n'
+ ' # included in the environment but should not be changed.\n'
+ ' # ******************************************************\n'
+ )
+_STATIC_MESSAGE_END = (' # *********************\n'
+ ' # End static parameters\n'
+ ' # *********************\n'
+ )
+_FILE_HEADER = (
+ '# *******************************************************************\n'
+ '# This file was created automatically by the sample environment\n'
+ '# generator. Developers should use `tox -e genconfig` to update it.\n'
+ '# Users are recommended to make changes to a copy of the file instead\n'
+ '# of the original, if any customizations are needed.\n'
+ '# *******************************************************************\n'
+ )
+# Certain parameter names can't be changed, but shouldn't be shown because
+# they are never intended for direct user input.
+_PRIVATE_OVERRIDES = ['server', 'servers', 'NodeIndex', 'DefaultPasswords']
+# Hidden params are not included by default when the 'all' option is used,
+# but can be explicitly included by referencing them in sample_defaults or
+# static. This allows us to generate sample environments using them when
+# necessary, but they won't be improperly included by accident.
+_HIDDEN_PARAMS = ['EndpointMap', 'RoleName', 'RoleParameters',
+ 'ServiceNetMap',
+ ]
+
+
+def _create_output_dir(target_file):
+ try:
+ os.makedirs(os.path.dirname(target_file))
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+
+def _generate_environment(input_env, parent_env=None):
+ if parent_env is None:
+ parent_env = {}
+ env = dict(parent_env)
+ env.pop('children', None)
+ env.update(input_env)
+ parameter_defaults = {}
+ param_names = []
+ sample_values = env.get('sample_values', {})
+ static_names = env.get('static', [])
+ for template_file, template_data in env['files'].items():
+ with open(template_file) as f:
+ f_data = yaml.safe_load(f)
+ f_params = f_data['parameters']
+ parameter_defaults.update(f_params)
+ if template_data['parameters'] == 'all':
+ new_names = [k for k, v in f_params.items()]
+ for hidden in _HIDDEN_PARAMS:
+ if (hidden not in (static_names + sample_values.keys()) and
+ hidden in new_names):
+ new_names.remove(hidden)
+ else:
+ new_names = template_data['parameters']
+ missing_params = [name for name in new_names
+ if name not in f_params]
+ if missing_params:
+ raise RuntimeError('Did not find specified parameter names %s '
+ 'in file %s for environment %s' %
+ (missing_params, template_file,
+ env['name']))
+ param_names += new_names
+
+ static_defaults = {k: v for k, v in parameter_defaults.items()
+ if k in param_names and
+ k in static_names
+ }
+ parameter_defaults = {k: v for k, v in parameter_defaults.items()
+ if k in param_names and
+ k not in _PRIVATE_OVERRIDES and
+ not k.startswith('_') and
+ k not in static_names
+ }
+
+ for k, v in sample_values.items():
+ if k in parameter_defaults:
+ parameter_defaults[k]['sample'] = v
+ if k in static_defaults:
+ static_defaults[k]['sample'] = v
+
+ def write_sample_entry(f, name, value):
+ default = value.get('default')
+ mandatory = ''
+ if default is None:
+ mandatory = ('# Mandatory. This parameter must be set by the '
+ 'user.\n ')
+ default = '<None>'
+ if value.get('sample') is not None:
+ default = value['sample']
+ # We ultimately cast this to str for output anyway
+ default = str(default)
+ if default == '':
+ default = "''"
+ # If the default value is something like %index%, yaml won't
+ # parse the output correctly unless we wrap it in quotes.
+ # However, not all default values can be wrapped so we need to
+ # do it conditionally.
+ if default.startswith('%'):
+ default = "'%s'" % default
+ if not default.startswith('\n'):
+ default = ' ' + default
+
+ values = {'name': name,
+ 'type': value['type'],
+ 'description':
+ value.get('description', '').rstrip().replace('\n',
+ '\n # '),
+ 'default': default,
+ 'mandatory': mandatory,
+ }
+ f.write(_PARAM_FORMAT % values + '\n')
+
+ target_file = os.path.join('environments', env['name'] + '.yaml')
+ _create_output_dir(target_file)
+ with open(target_file, 'w') as env_file:
+ env_file.write(_FILE_HEADER)
+ # TODO(bnemec): Once Heat allows the title and description to live in
+ # the environment itself, uncomment these entries and make them
+ # top-level keys in the YAML.
+ env_title = env.get('title', '')
+ env_file.write(u'# title: %s\n' % env_title)
+ env_desc = env.get('description', '')
+ env_file.write(u'# description: |\n')
+ for line in env_desc.splitlines():
+ env_file.write(u'# %s\n' % line)
+
+ if parameter_defaults:
+ env_file.write(u'parameter_defaults:\n')
+ for name, value in sorted(parameter_defaults.items()):
+ write_sample_entry(env_file, name, value)
+ if static_defaults:
+ env_file.write(_STATIC_MESSAGE_START)
+ for name, value in sorted(static_defaults.items()):
+ write_sample_entry(env_file, name, value)
+ if static_defaults:
+ env_file.write(_STATIC_MESSAGE_END)
+
+ if env.get('resource_registry'):
+ env_file.write(u'resource_registry:\n')
+ for res, value in sorted(env.get('resource_registry', {}).items()):
+ env_file.write(u' %s: %s\n' % (res, value))
+ print('Wrote sample environment "%s"' % target_file)
+
+ for e in env.get('children', []):
+ _generate_environment(e, env)
+
+
+def generate_environments(config_path):
+ if os.path.isdir(config_path):
+ config_files = os.listdir(config_path)
+ config_files = [os.path.join(config_path, i) for i in config_files
+ if os.path.splitext(i)[1] == '.yaml']
+ else:
+ config_files = [config_path]
+ for config_file in config_files:
+ print('Reading environment definitions from %s' % config_file)
+ with open(config_file) as f:
+ config = yaml.safe_load(f)
+ for env in config['environments']:
+ _generate_environment(env)
+
+
+def usage(exit_code=1):
+ print('Usage: %s [<filename.yaml> | <directory>]' % sys.argv[0])
+ sys.exit(exit_code)
+
+
+def main():
+ try:
+ config_path = sys.argv[1]
+ except IndexError:
+ usage()
+ generate_environments(config_path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tripleo_heat_templates/tests/__init__.py b/tripleo_heat_templates/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tripleo_heat_templates/tests/__init__.py
diff --git a/tripleo_heat_templates/tests/test_environment_generator.py b/tripleo_heat_templates/tests/test_environment_generator.py
new file mode 100644
index 00000000..94d13c71
--- /dev/null
+++ b/tripleo_heat_templates/tests/test_environment_generator.py
@@ -0,0 +1,498 @@
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import io
+import tempfile
+
+import mock
+from oslotest import base
+import six
+import testscenarios
+
+from tripleo_heat_templates import environment_generator
+
+load_tests = testscenarios.load_tests_apply_scenarios
+
+basic_template = '''
+parameters:
+ FooParam:
+ default: foo
+ description: Foo description
+ type: string
+ BarParam:
+ default: 42
+ description: Bar description
+ type: number
+ EndpointMap:
+ default: {}
+ description: Parameter that should not be included by default
+ type: json
+resources:
+ # None
+'''
+basic_private_template = '''
+parameters:
+ FooParam:
+ default: foo
+ description: Foo description
+ type: string
+ _BarParam:
+ default: 42
+ description: Bar description
+ type: number
+resources:
+ # None
+'''
+mandatory_template = '''
+parameters:
+ FooParam:
+ description: Mandatory param
+ type: string
+resources:
+ # None
+'''
+index_template = '''
+parameters:
+ FooParam:
+ description: Param with %index% as its default
+ type: string
+ default: '%index%'
+resources:
+ # None
+'''
+multiline_template = '''
+parameters:
+ FooParam:
+ description: |
+ Parameter with
+ multi-line description
+ type: string
+ default: ''
+resources:
+ # None
+'''
+
+
+class GeneratorTestCase(base.BaseTestCase):
+ content_scenarios = [
+ ('basic',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Bar description
+ # Type: number
+ BarParam: 42
+
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+''',
+ }),
+ ('basic-one-param',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters:
+ - FooParam
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+''',
+ }),
+ ('basic-static-param',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ static:
+ - BarParam
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # Bar description
+ # Type: number
+ BarParam: 42
+
+ # *********************
+ # End static parameters
+ # *********************
+''',
+ }),
+ ('basic-static-param-sample',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ static:
+ - BarParam
+ sample_values:
+ BarParam: 1
+ FooParam: ''
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Foo description
+ # Type: string
+ FooParam: ''
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # Bar description
+ # Type: number
+ BarParam: 1
+
+ # *********************
+ # End static parameters
+ # *********************
+''',
+ }),
+ ('basic-private',
+ {'template': basic_private_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+''',
+ }),
+ ('mandatory',
+ {'template': mandatory_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Mandatory param
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ FooParam: <None>
+
+''',
+ }),
+ ('basic-sample',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ sample_values:
+ FooParam: baz
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Bar description
+ # Type: number
+ BarParam: 42
+
+ # Foo description
+ # Type: string
+ FooParam: baz
+
+''',
+ }),
+ ('basic-resource-registry',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ resource_registry:
+ OS::TripleO::FakeResource: fake-filename.yaml
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Bar description
+ # Type: number
+ BarParam: 42
+
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+resource_registry:
+ OS::TripleO::FakeResource: fake-filename.yaml
+''',
+ }),
+ ('basic-hidden',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ sample_values:
+ EndpointMap: |-2
+
+ foo: bar
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Bar description
+ # Type: number
+ BarParam: 42
+
+ # Parameter that should not be included by default
+ # Type: json
+ EndpointMap:
+ foo: bar
+
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+''',
+ }),
+ ('missing-param',
+ {'template': basic_template,
+ 'exception': RuntimeError,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters:
+ - SomethingNonexistent
+''',
+ 'expected_output': None,
+ }),
+ ('percent-index',
+ {'template': index_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Param with %index% as its default
+ # Type: string
+ FooParam: '%index%'
+
+''',
+ }),
+ ('nested',
+ {'template': multiline_template,
+ 'exception': None,
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ children:
+ - name: nested
+ title: Nested Environment
+ description: Nested description
+ sample_values:
+ FooParam: bar
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Parameter with
+ # multi-line description
+ # Type: string
+ FooParam: ''
+
+''',
+ 'nested_output': '''# title: Nested Environment
+# description: |
+# Nested description
+parameter_defaults:
+ # Parameter with
+ # multi-line description
+ # Type: string
+ FooParam: bar
+
+''',
+ }),
+ ('multi-line-desc',
+ {'template': multiline_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Parameter with
+ # multi-line description
+ # Type: string
+ FooParam: ''
+
+''',
+ }),
+ ]
+
+ @classmethod
+ def generate_scenarios(cls):
+ cls.scenarios = testscenarios.multiply_scenarios(
+ cls.content_scenarios)
+
+ def test_generator(self):
+ fake_input = io.StringIO(six.text_type(self.input_file))
+ fake_template = io.StringIO(six.text_type(self.template))
+ _, fake_output_path = tempfile.mkstemp()
+ fake_output = open(fake_output_path, 'w')
+ with mock.patch('tripleo_heat_templates.environment_generator.open',
+ create=True) as mock_open:
+ mock_se = [fake_input, fake_template, fake_output]
+ if self.nested_output:
+ _, fake_nested_output_path = tempfile.mkstemp()
+ fake_nested_output = open(fake_nested_output_path, 'w')
+ fake_template2 = io.StringIO(six.text_type(self.template))
+ mock_se = [fake_input, fake_template, fake_output,
+ fake_template2, fake_nested_output]
+ mock_open.side_effect = mock_se
+ if not self.exception:
+ environment_generator.generate_environments('ignored.yaml')
+ else:
+ self.assertRaises(self.exception,
+ environment_generator.generate_environments,
+ 'ignored.yaml')
+ return
+ expected = environment_generator._FILE_HEADER + self.expected_output
+ with open(fake_output_path) as f:
+ self.assertEqual(expected, f.read())
+ if self.nested_output:
+ with open(fake_nested_output_path) as f:
+ expected = (environment_generator._FILE_HEADER +
+ self.nested_output)
+ self.assertEqual(expected, f.read())
+
+GeneratorTestCase.generate_scenarios()