aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--capabilities-map.yaml18
-rw-r--r--ci/environments/multinode-3nodes.yaml1
-rw-r--r--ci/environments/multinode-containers.yaml1
-rw-r--r--ci/environments/multinode.yaml1
-rw-r--r--ci/environments/multinode_major_upgrade.yaml1
-rw-r--r--ci/environments/scenario001-multinode-containers.yaml8
-rw-r--r--ci/environments/scenario001-multinode.yaml4
-rw-r--r--ci/environments/scenario002-multinode-containers.yaml2
-rw-r--r--ci/environments/scenario002-multinode.yaml1
-rw-r--r--ci/environments/scenario003-multinode-containers.yaml1
-rw-r--r--ci/environments/scenario003-multinode.yaml1
-rw-r--r--ci/environments/scenario004-multinode-containers.yaml11
-rw-r--r--ci/environments/scenario004-multinode.yaml1
-rw-r--r--ci/environments/scenario006-multinode-containers.yaml1
-rw-r--r--ci/environments/scenario007-multinode-containers.yaml1
-rw-r--r--ci/environments/scenario007-multinode.yaml1
-rw-r--r--common/deploy-steps-tasks.yaml2
-rw-r--r--common/deploy-steps.j2146
-rw-r--r--common/major_upgrade_steps.j2.yaml68
-rw-r--r--common/post-upgrade.j2.yaml2
-rw-r--r--common/services.yaml17
-rw-r--r--deployed-server/deployed-server-roles-data.yaml3
-rwxr-xr-xdocker/docker-puppet.py25
-rw-r--r--docker/services/aodh-api.yaml10
-rw-r--r--docker/services/ceilometer-agent-central.yaml7
-rw-r--r--docker/services/ceilometer-agent-compute.yaml14
-rw-r--r--docker/services/ceilometer-agent-notification.yaml17
-rw-r--r--docker/services/ceph-ansible/ceph-base.yaml49
-rw-r--r--docker/services/ceph-ansible/ceph-client.yaml2
-rw-r--r--docker/services/ceph-ansible/ceph-external.yaml66
-rw-r--r--docker/services/ceph-ansible/ceph-mds.yaml2
-rw-r--r--docker/services/ceph-ansible/ceph-mon.yaml2
-rw-r--r--docker/services/ceph-ansible/ceph-osd.yaml4
-rw-r--r--docker/services/ceph-ansible/ceph-rgw.yaml87
-rw-r--r--docker/services/cinder-api.yaml11
-rw-r--r--docker/services/containers-common.yaml1
-rw-r--r--docker/services/database/mongodb.yaml5
-rw-r--r--docker/services/database/redis.yaml87
-rw-r--r--docker/services/glance-api.yaml7
-rw-r--r--docker/services/gnocchi-api.yaml10
-rw-r--r--docker/services/gnocchi-metricd.yaml2
-rw-r--r--docker/services/gnocchi-statsd.yaml2
-rw-r--r--docker/services/haproxy.yaml5
-rw-r--r--docker/services/heat-api-cfn.yaml6
-rw-r--r--docker/services/heat-api.yaml9
-rw-r--r--docker/services/horizon.yaml7
-rw-r--r--docker/services/ironic-api.yaml8
-rw-r--r--docker/services/ironic-pxe.yaml3
-rw-r--r--docker/services/iscsid.yaml4
-rw-r--r--docker/services/keystone.yaml12
-rw-r--r--docker/services/manila-api.yaml10
-rw-r--r--docker/services/neutron-api.yaml8
-rw-r--r--docker/services/neutron-sriov-agent.yaml108
-rw-r--r--docker/services/nova-api.yaml15
-rw-r--r--docker/services/nova-compute.yaml11
-rw-r--r--docker/services/nova-conductor.yaml12
-rw-r--r--docker/services/nova-libvirt.yaml25
-rw-r--r--docker/services/nova-placement.yaml6
-rw-r--r--docker/services/octavia-api.yaml8
-rw-r--r--docker/services/opendaylight-api.yaml15
-rw-r--r--docker/services/pacemaker/cinder-backup.yaml6
-rw-r--r--docker/services/pacemaker/cinder-volume.yaml6
-rw-r--r--docker/services/pacemaker/database/mysql.yaml24
-rw-r--r--docker/services/pacemaker/database/redis.yaml111
-rw-r--r--docker/services/pacemaker/haproxy.yaml24
-rw-r--r--docker/services/pacemaker/ovn-dbs.yaml140
-rw-r--r--docker/services/pacemaker/rabbitmq.yaml18
-rw-r--r--docker/services/panko-api.yaml10
-rw-r--r--docker/services/swift-proxy.yaml2
-rw-r--r--docker/services/swift-ringbuilder.yaml23
-rw-r--r--docker/services/zaqar.yaml11
-rw-r--r--environments/ceph-ansible/ceph-ansible-external.yaml30
-rw-r--r--environments/ceph-ansible/ceph-rgw.yaml5
-rw-r--r--environments/cinder-dellemc-vmax-iscsi-config.yaml9
-rw-r--r--environments/contrail/roles_data_contrail.yaml2
-rw-r--r--environments/docker.yaml5
-rw-r--r--environments/enable-internal-tls.yaml4
-rw-r--r--environments/hyperconverged-ceph.yaml1
-rw-r--r--environments/manila-cephfsnative-config-docker.yaml20
-rw-r--r--environments/manila-isilon-config.yaml17
-rw-r--r--environments/manila-netapp-config-docker.yaml32
-rw-r--r--environments/manila-vmax-config.yaml19
-rw-r--r--environments/network-environment-v6.yaml60
-rw-r--r--environments/network-isolation-no-tunneling.j2.yaml34
-rw-r--r--environments/network-isolation-no-tunneling.yaml61
-rw-r--r--environments/network-isolation.j2.yaml3
-rw-r--r--environments/neutron-nuage-config.yaml2
-rw-r--r--environments/neutron-opendaylight-dpdk.yaml4
-rw-r--r--environments/neutron-opendaylight-sriov.yaml1
-rw-r--r--environments/neutron-opendaylight.yaml1
-rwxr-xr-xenvironments/neutron-sriov.yaml15
-rw-r--r--environments/puppet-ceph-external.yaml1
-rw-r--r--environments/services-docker/neutron-opendaylight.yaml3
-rw-r--r--environments/services-docker/neutron-ovn-ha.yaml28
-rw-r--r--environments/services-docker/neutron-sriov.yaml12
-rw-r--r--environments/services/neutron-lbaasv2.yaml2
-rw-r--r--environments/ssl/enable-internal-tls.yaml36
-rw-r--r--environments/ssl/enable-tls.yaml1
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh62
-rw-r--r--extraconfig/tasks/tripleo_upgrade_node.sh46
-rwxr-xr-xextraconfig/tasks/yum_update.sh3
-rw-r--r--j2_excludes.yaml7
-rw-r--r--network/external_v6.yaml76
-rw-r--r--network/internal_api_v6.yaml72
-rw-r--r--network/management_v6.yaml71
-rw-r--r--network/network.j2105
-rw-r--r--network/network.network.j2.yaml92
-rw-r--r--network/network_v6.network.j2.yaml2
-rw-r--r--network/storage_mgmt_v6.yaml72
-rw-r--r--network/storage_v6.yaml72
-rw-r--r--network/tenant_v6.yaml72
-rw-r--r--network_data.yaml54
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml4
-rw-r--r--overcloud.j2.yaml88
-rw-r--r--puppet/all-nodes-config.yaml6
-rw-r--r--puppet/role.role.j2.yaml30
-rw-r--r--puppet/services/README.rst4
-rw-r--r--puppet/services/aodh-api.yaml15
-rw-r--r--puppet/services/barbican-api.yaml35
-rw-r--r--puppet/services/ceilometer-api.yaml15
-rw-r--r--puppet/services/ceph-base.yaml7
-rw-r--r--puppet/services/cinder-api.yaml53
-rw-r--r--puppet/services/cinder-backend-dellemc-vmax-iscsi.yaml65
-rw-r--r--puppet/services/database/redis-base.yaml20
-rw-r--r--puppet/services/database/redis.yaml41
-rw-r--r--puppet/services/disabled/mongodb-disabled.yaml5
-rw-r--r--puppet/services/gnocchi-api.yaml15
-rw-r--r--puppet/services/haproxy-public-tls-certmonger.yaml17
-rw-r--r--puppet/services/keystone.yaml15
-rw-r--r--puppet/services/manila-backend-isilon.yaml72
-rw-r--r--puppet/services/manila-backend-vmax.yaml74
-rw-r--r--puppet/services/manila-scheduler.yaml2
-rw-r--r--puppet/services/neutron-base.yaml5
-rw-r--r--puppet/services/neutron-ovs-agent.yaml35
-rw-r--r--puppet/services/neutron-plugin-ml2-nuage.yaml12
-rw-r--r--puppet/services/neutron-plugin-nsx.yaml20
-rw-r--r--puppet/services/neutron-sriov-agent.yaml14
-rw-r--r--puppet/services/neutron-sriov-host-config.yaml78
-rw-r--r--puppet/services/nova-api.yaml167
-rw-r--r--puppet/services/opendaylight-api.yaml10
-rw-r--r--puppet/services/opendaylight-ovs.yaml35
-rw-r--r--puppet/services/openvswitch.yaml4
-rw-r--r--puppet/services/pacemaker/cinder-volume.yaml20
-rw-r--r--puppet/services/pacemaker/database/redis.yaml11
-rw-r--r--puppet/services/panko-api.yaml45
-rw-r--r--puppet/services/rabbitmq.yaml1
-rw-r--r--puppet/services/tacker.yaml1
-rw-r--r--puppet/services/zaqar-api.yaml68
-rw-r--r--releasenotes/notes/adds-post_upgrade_tasks-eba0656012c861a1.yaml12
-rw-r--r--releasenotes/notes/configuring-snat-in-opendaylight-d5ed4d62275e1876.yaml5
-rw-r--r--releasenotes/notes/containerized-services-logs-0dc652513870f46d.yaml11
-rw-r--r--releasenotes/notes/fix-missing-tacker-password-c2ce555cdd52c102.yaml4
-rw-r--r--releasenotes/notes/fix-neutron_admin_auth_url-c88224251d8eb807.yaml4
-rw-r--r--releasenotes/notes/fix-odl-websocket-firewall-9e2f78ebaa39313f.yaml6
-rw-r--r--releasenotes/notes/ipv6_defaults-7dbb62113f4e5084.yaml10
-rw-r--r--releasenotes/notes/isilon_manila_e9677898724a11e7.yaml4
-rw-r--r--releasenotes/notes/odl-qos-48b70c804755e3a5.yaml4
-rw-r--r--releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml1
-rw-r--r--releasenotes/notes/vmax_cinder_a6672898724a11e7.yaml4
-rw-r--r--releasenotes/notes/vmax_manila_2967789872aa11e8.yaml4
-rw-r--r--releasenotes/notes/workflow_tasks-4da5830821b7154b.yaml (renamed from releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml)0
-rw-r--r--roles/Compute.yaml1
-rw-r--r--roles/ComputeHCI.yaml1
-rw-r--r--roles/Controller.yaml3
-rw-r--r--roles/ControllerOpenstack.yaml7
-rw-r--r--roles/Database.yaml1
-rw-r--r--roles/Networker.yaml7
-rw-r--r--roles_data.yaml4
-rw-r--r--sample-env-generator/ssl.yaml133
-rw-r--r--test-requirements.txt2
-rwxr-xr-xtools/yaml-validate.py11
-rwxr-xr-xtripleo_heat_templates/environment_generator.py2
172 files changed, 2523 insertions, 1390 deletions
diff --git a/capabilities-map.yaml b/capabilities-map.yaml
index 1af6be9e..26100639 100644
--- a/capabilities-map.yaml
+++ b/capabilities-map.yaml
@@ -316,6 +316,10 @@ topics:
title: Cinder Dell EMC Unity backend
description: >
Enables a Cinder Dell EMC Unity backend,
+ - file: environments/cinder-dellemc-vmax-iscsi-config.yaml
+ title: Cinder Dell EMC VMAX ISCSI backend
+ description: >
+ Enables a Cinder Dell EMC VMAX ISCSI backend,
configured via puppet
requires:
- overcloud-resource-registry-puppet.yaml
@@ -415,6 +419,20 @@ topics:
- file: environments/manila-vnx-config.yaml
title: Deploys Manila with VNX driver
description: Deploys Manila and configures VNX as its default backend.
+ - title: Manila with VMAX
+ description: >
+ Deploys Manila and configures it with the VMAX driver.
+ environments:
+ - file: environments/manila-vmax-config.yaml
+ title: Deploys Manila with VMAX driver
+ description: Deploys Manila and configures VMAX as its default backend.
+ - title: Manila with Isilon
+ description: >
+ Deploys Manila and configures it with the Isilon driver.
+ environments:
+ - file: environments/manila-isilon-config.yaml
+ title: Deploys Manila with Isilon driver
+ description: Deploys Manila and configures Isilon as its default backend.
requires:
- overcloud-resource-registry-puppet.yaml
- title: Glance backends
diff --git a/ci/environments/multinode-3nodes.yaml b/ci/environments/multinode-3nodes.yaml
index e040b015..647b1c65 100644
--- a/ci/environments/multinode-3nodes.yaml
+++ b/ci/environments/multinode-3nodes.yaml
@@ -24,6 +24,7 @@
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/multinode-containers.yaml b/ci/environments/multinode-containers.yaml
index 03baf4aa..f050d9a2 100644
--- a/ci/environments/multinode-containers.yaml
+++ b/ci/environments/multinode-containers.yaml
@@ -25,6 +25,7 @@ parameter_defaults:
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/multinode.yaml b/ci/environments/multinode.yaml
index f945a021..eeeefc07 100644
--- a/ci/environments/multinode.yaml
+++ b/ci/environments/multinode.yaml
@@ -21,6 +21,7 @@ parameter_defaults:
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/multinode_major_upgrade.yaml b/ci/environments/multinode_major_upgrade.yaml
index 81301349..78127a4f 100644
--- a/ci/environments/multinode_major_upgrade.yaml
+++ b/ci/environments/multinode_major_upgrade.yaml
@@ -14,6 +14,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/scenario001-multinode-containers.yaml b/ci/environments/scenario001-multinode-containers.yaml
index 0cdbef13..e061c0a5 100644
--- a/ci/environments/scenario001-multinode-containers.yaml
+++ b/ci/environments/scenario001-multinode-containers.yaml
@@ -12,7 +12,6 @@ resource_registry:
OS::TripleO::Services::CephClient: ../../docker/services/ceph-ansible/ceph-client.yaml
OS::TripleO::Services::PankoApi: ../../docker/services/panko-api.yaml
OS::TripleO::Services::Collectd: ../../docker/services/collectd.yaml
- OS::TripleO::Services::Tacker: ../../docker/services/tacker.yaml
OS::TripleO::Services::Congress: ../../docker/services/congress.yaml
# TODO fluentd is being containerized: https://review.openstack.org/#/c/467072/
OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
@@ -30,6 +29,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
@@ -86,7 +86,6 @@ parameter_defaults:
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
- OS::TripleO::Services::Collectd
- - OS::TripleO::Services::Tacker
- OS::TripleO::Services::Congress
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
@@ -97,6 +96,8 @@ parameter_defaults:
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
+ # This makes the job twice as fast
+ ceilometer::agent::polling::polling_interval: 15
Debug: true
#NOTE(gfidente): not great but we need this to deploy on ext4
#http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
@@ -105,6 +106,7 @@ parameter_defaults:
- /dev/loop3
journal_size: 512
journal_collocation: true
+ osd_scenario: collocated
CephAnsibleExtraConfig:
ceph_conf_overrides:
global:
@@ -122,12 +124,14 @@ parameter_defaults:
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephPoolDefaultSize: 1
+ DockerCephDaemonImage: ceph/daemon:tag-build-master-jewel-centos-7
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CinderBackupBackend: ceph
GlanceBackend: rbd
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
+ GnocchiArchivePolicy: 'high'
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml
index bab08a30..a9fbdfb7 100644
--- a/ci/environments/scenario001-multinode.yaml
+++ b/ci/environments/scenario001-multinode.yaml
@@ -27,6 +27,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
@@ -94,6 +95,8 @@ parameter_defaults:
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
+ # This makes the job twice as fast
+ ceilometer::agent::polling::polling_interval: 15
Debug: true
#NOTE(gfidente): not great but we need this to deploy on ext4
#http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
@@ -114,6 +117,7 @@ parameter_defaults:
GlanceBackend: rbd
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
+ GnocchiArchivePolicy: 'high'
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
diff --git a/ci/environments/scenario002-multinode-containers.yaml b/ci/environments/scenario002-multinode-containers.yaml
index 43acf6dc..0ca67d00 100644
--- a/ci/environments/scenario002-multinode-containers.yaml
+++ b/ci/environments/scenario002-multinode-containers.yaml
@@ -9,6 +9,7 @@ resource_registry:
OS::TripleO::Services::BarbicanApi: ../../docker/services/barbican-api.yaml
OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
+ OS::TripleO::Services::MongoDb: ../../docker/services/database/mongodb.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
# overcloud-resource-registry.yaml there doesn't have this Docker
@@ -22,6 +23,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/scenario002-multinode.yaml b/ci/environments/scenario002-multinode.yaml
index 5670c213..6c7f4ebb 100644
--- a/ci/environments/scenario002-multinode.yaml
+++ b/ci/environments/scenario002-multinode.yaml
@@ -22,6 +22,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/scenario003-multinode-containers.yaml b/ci/environments/scenario003-multinode-containers.yaml
index e3789ea8..107b66b2 100644
--- a/ci/environments/scenario003-multinode-containers.yaml
+++ b/ci/environments/scenario003-multinode-containers.yaml
@@ -24,6 +24,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/scenario003-multinode.yaml b/ci/environments/scenario003-multinode.yaml
index 5e797b40..968a881b 100644
--- a/ci/environments/scenario003-multinode.yaml
+++ b/ci/environments/scenario003-multinode.yaml
@@ -20,6 +20,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/scenario004-multinode-containers.yaml b/ci/environments/scenario004-multinode-containers.yaml
index 5590de26..e2be75cc 100644
--- a/ci/environments/scenario004-multinode-containers.yaml
+++ b/ci/environments/scenario004-multinode-containers.yaml
@@ -16,10 +16,8 @@ resource_registry:
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
OS::TripleO::Services::ManilaScheduler: ../../docker/services/manila-scheduler.yaml
- OS::TripleO::Services::ManilaShare: ../../docker/services/manila-share.yaml
+ OS::TripleO::Services::ManilaShare: ../../docker/services/pacemaker/manila-share.yaml
OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
- # TODO: containerize NeutronBgpVpnApi
- OS::TripleO::Services::NeutronBgpVpnApi: ../../puppet/services/neutron-bgpvpn-api.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
# overcloud-resource-registry.yaml there doesn't have this Docker
@@ -38,6 +36,7 @@ parameter_defaults:
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephRgw
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
@@ -48,7 +47,6 @@ parameter_defaults:
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
- - OS::TripleO::Services::NeutronBgpVpnApi
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -79,6 +77,9 @@ parameter_defaults:
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Iscsid
+ # TODO: in Queens, re-add bgp-vpn and l2gw services when
+ # containerized.
+ # https://bugs.launchpad.net/bugs/1713612
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
@@ -97,6 +98,4 @@ parameter_defaults:
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephPoolDefaultSize: 1
SwiftCeilometerPipelineEnabled: false
- NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
- BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
NotificationDriver: 'noop'
diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml
index bd30347a..a81f05e1 100644
--- a/ci/environments/scenario004-multinode.yaml
+++ b/ci/environments/scenario004-multinode.yaml
@@ -36,6 +36,7 @@ parameter_defaults:
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephRgw
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/scenario006-multinode-containers.yaml b/ci/environments/scenario006-multinode-containers.yaml
index 4715e339..d0a952d5 100644
--- a/ci/environments/scenario006-multinode-containers.yaml
+++ b/ci/environments/scenario006-multinode-containers.yaml
@@ -9,6 +9,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/scenario007-multinode-containers.yaml b/ci/environments/scenario007-multinode-containers.yaml
index 8e1e6b6c..faf56ba4 100644
--- a/ci/environments/scenario007-multinode-containers.yaml
+++ b/ci/environments/scenario007-multinode-containers.yaml
@@ -15,6 +15,7 @@ resource_registry:
OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/ci/environments/scenario007-multinode.yaml b/ci/environments/scenario007-multinode.yaml
index dd73f476..69a63d8e 100644
--- a/ci/environments/scenario007-multinode.yaml
+++ b/ci/environments/scenario007-multinode.yaml
@@ -22,6 +22,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/common/deploy-steps-tasks.yaml b/common/deploy-steps-tasks.yaml
index f0729425..73d3036c 100644
--- a/common/deploy-steps-tasks.yaml
+++ b/common/deploy-steps-tasks.yaml
@@ -5,7 +5,7 @@
# Per step puppet configuration of the baremetal host
#####################################################
- name: Write the config_step hieradata
- copy: content="{{dict(step=step|int)|to_json}}" dest=/etc/puppet/hieradata/config_step.json force=true
+ copy: content="{{dict(step=step|int)|to_json}}" dest=/etc/puppet/hieradata/config_step.json force=true mode=0600
- name: Run puppet host configuration for step {{step}}
command: >-
puppet apply
diff --git a/common/deploy-steps.j2 b/common/deploy-steps.j2
index 32d1afcf..c076a09d 100644
--- a/common/deploy-steps.j2
+++ b/common/deploy-steps.j2
@@ -1,7 +1,12 @@
# certain initialization steps (run in a container) will occur
# on the role marked as primary controller or the first role listed
-{%- set primary_role = [roles[0]] -%}
-{%- for role in roles -%}
+{%- if enabled_roles is not defined -%}
+ # On upgrade certain roles can be disabled for operator driven upgrades
+ # See major_upgrade_steps.j2.yaml and post-upgrade.j2.yaml
+ {%- set enabled_roles = roles -%}
+{%- endif -%}
+{%- set primary_role = [enabled_roles[0]] -%}
+{%- for role in enabled_roles -%}
{%- if 'primary' in role.tags and 'controller' in role.tags -%}
{%- set _ = primary_role.pop() -%}
{%- set _ = primary_role.append(role) -%}
@@ -55,10 +60,10 @@ conditions:
{% for step in range(1, deploy_steps_max) %}
WorkflowTasks_Step{{step}}_Enabled:
or:
- {%- for role in roles %}
+ {%- for role in enabled_roles %}
- not:
equals:
- - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - get_param: [role_data, {{role.name}}, workflow_tasks, step{{step}}]
- ''
- False
{%- endfor %}
@@ -90,30 +95,30 @@ resources:
_TASKS: {get_file: deploy-steps-tasks.yaml}
{%- for step in range(1, deploy_steps_max) %}
-# BEGIN service_workflow_tasks handling
+# BEGIN workflow_tasks handling
WorkflowTasks_Step{{step}}:
type: OS::Mistral::Workflow
condition: WorkflowTasks_Step{{step}}_Enabled
depends_on:
{%- if step == 1 %}
- {%- for dep in roles %}
+ {%- for dep in enabled_roles %}
- {{dep.name}}PreConfig
- {{dep.name}}ArtifactsDeploy
{%- endfor %}
{%- else %}
- {%- for dep in roles %}
+ {%- for dep in enabled_roles %}
- {{dep.name}}Deployment_Step{{step -1}}
{%- endfor %}
{%- endif %}
properties:
- name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflow_tasks", "step{{step}}"]]}
type: direct
tasks:
yaql:
expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
data:
- {%- for role in roles %}
- - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {%- for role in enabled_roles %}
+ - get_param: [role_data, {{role.name}}, workflow_tasks]
{%- endfor %}
WorkflowTasks_Step{{step}}_Execution:
@@ -143,13 +148,14 @@ resources:
{%- endfor %}
evaluate_env: false
always_update: true
-# END service_workflow_tasks handling
+# END workflow_tasks handling
{% endfor %}
+# Artifacts config and HostPrepConfig is done on all roles, not only
+# enabled_roles, because on upgrade we need to write the json files
+# for the operator driven upgrade scripts (the ansible steps consume them)
{% for role in roles %}
- # Post deployment steps for all roles
- # A single config is re-applied with an incrementing step number
- # {{role.name}} Role steps
+ # Prepare host tasks for {{role.name}}
{{role.name}}ArtifactsConfig:
type: ../puppet/deploy-artifacts.yaml
@@ -182,54 +188,52 @@ resources:
puppet_step_config: {get_param: [role_data, {{role.name}}, step_config]}
tasks:
# Join host_prep_tasks with the other per-host configuration
- yaql:
- expression: $.data.host_prep_tasks + $.data.template_tasks
- data:
- host_prep_tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
- template_tasks:
+ list_concat:
+ - {get_param: [role_data, {{role.name}}, host_prep_tasks]}
+ -
{%- raw %}
- # Write the manifest for baremetal puppet configuration
- - name: Create /var/lib/tripleo-config directory
- file: path=/var/lib/tripleo-config state=directory
- - name: Write the puppet step_config manifest
- copy: content="{{puppet_step_config}}" dest=/var/lib/tripleo-config/puppet_step_config.pp force=yes
- # this creates a JSON config file for our docker-puppet.py script
- - name: Create /var/lib/docker-puppet
- file: path=/var/lib/docker-puppet state=directory
- - name: Write docker-puppet-tasks json files
- copy: content="{{puppet_config | to_json}}" dest=/var/lib/docker-puppet/docker-puppet.json force=yes
- # FIXME: can we move docker-puppet somewhere so it's installed via a package?
- - name: Write docker-puppet.py
- copy: content="{{docker_puppet_script}}" dest=/var/lib/docker-puppet/docker-puppet.py force=yes
- # Here we are dumping all the docker container startup configuration data
- # so that we can have access to how they are started outside of heat
- # and docker-cmd. This lets us create command line tools to test containers.
- # FIXME do we need the docker-container-startup-configs.json or is the new per-step
- # data consumed by paunch enough?
- - name: Write docker-container-startup-configs
- copy: content="{{docker_startup_configs | to_json}}" dest=/var/lib/docker-container-startup-configs.json force=yes
- - name: Write per-step docker-container-startup-configs
- copy: content="{{item.value|to_json}}" dest="/var/lib/tripleo-config/docker-container-startup-config-{{item.key}}.json" force=yes
- with_dict: "{{docker_startup_configs}}"
- - name: Create /var/lib/kolla/config_files directory
- file: path=/var/lib/kolla/config_files state=directory
- - name: Write kolla config json files
- copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes
- with_dict: "{{kolla_config}}"
- ########################################################
- # Bootstrap tasks, only performed on bootstrap_server_id
- ########################################################
- - name: Clean /var/lib/docker-puppet/docker-puppet-tasks*.json files
- file:
- path: "{{item}}"
- state: absent
- with_fileglob:
- - /var/lib/docker-puppet/docker-puppet-tasks*.json
- when: deploy_server_id == bootstrap_server_id
- - name: Write docker-puppet-tasks json files
- copy: content="{{item.value|to_json}}" dest=/var/lib/docker-puppet/docker-puppet-tasks{{item.key.replace("step_", "")}}.json force=yes
- with_dict: "{{docker_puppet_tasks}}"
- when: deploy_server_id == bootstrap_server_id
+ # Write the manifest for baremetal puppet configuration
+ - name: Create /var/lib/tripleo-config directory
+ file: path=/var/lib/tripleo-config state=directory
+ - name: Write the puppet step_config manifest
+ copy: content="{{puppet_step_config}}" dest=/var/lib/tripleo-config/puppet_step_config.pp force=yes mode=0600
+ # this creates a JSON config file for our docker-puppet.py script
+ - name: Create /var/lib/docker-puppet
+ file: path=/var/lib/docker-puppet state=directory
+ - name: Write docker-puppet-tasks json files
+ copy: content="{{puppet_config | to_json}}" dest=/var/lib/docker-puppet/docker-puppet.json force=yes mode=0600
+ # FIXME: can we move docker-puppet somewhere so it's installed via a package?
+ - name: Write docker-puppet.py
+ copy: content="{{docker_puppet_script}}" dest=/var/lib/docker-puppet/docker-puppet.py force=yes mode=0600
+ # Here we are dumping all the docker container startup configuration data
+ # so that we can have access to how they are started outside of heat
+ # and docker-cmd. This lets us create command line tools to test containers.
+ # FIXME do we need the docker-container-startup-configs.json or is the new per-step
+ # data consumed by paunch enough?
+ - name: Write docker-container-startup-configs
+ copy: content="{{docker_startup_configs | to_json}}" dest=/var/lib/docker-container-startup-configs.json force=yes mode=0600
+ - name: Write per-step docker-container-startup-configs
+ copy: content="{{item.value|to_json}}" dest="/var/lib/tripleo-config/docker-container-startup-config-{{item.key}}.json" force=yes mode=0600
+ with_dict: "{{docker_startup_configs}}"
+ - name: Create /var/lib/kolla/config_files directory
+ file: path=/var/lib/kolla/config_files state=directory
+ - name: Write kolla config json files
+ copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes mode=0600
+ with_dict: "{{kolla_config}}"
+ ########################################################
+ # Bootstrap tasks, only performed on bootstrap_server_id
+ ########################################################
+ - name: Clean /var/lib/docker-puppet/docker-puppet-tasks*.json files
+ file:
+ path: "{{item}}"
+ state: absent
+ with_fileglob:
+ - /var/lib/docker-puppet/docker-puppet-tasks*.json
+ when: deploy_server_id == bootstrap_server_id
+ - name: Write docker-puppet-tasks json files
+ copy: content="{{item.value|to_json}}" dest=/var/lib/docker-puppet/docker-puppet-tasks{{item.key.replace("step_", "")}}.json force=yes mode=0600
+ with_dict: "{{docker_puppet_tasks}}"
+ when: deploy_server_id == bootstrap_server_id
{%- endraw %}
{{role.name}}HostPrepDeployment:
@@ -237,9 +241,10 @@ resources:
properties:
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}HostPrepConfig}
+{% endfor %}
- # BEGIN CONFIG STEPS
-
+ # BEGIN CONFIG STEPS, only on enabled_roles
+{%- for role in enabled_roles %}
{{role.name}}PreConfig:
type: OS::TripleO::Tasks::{{role.name}}PreConfig
depends_on: {{role.name}}HostPrepDeployment
@@ -248,6 +253,8 @@ resources:
input_values:
update_identifier: {get_param: DeployIdentifier}
+ # Deployment steps for {{role.name}}
+ # A single config is re-applied with an incrementing step number
{% for step in range(1, deploy_steps_max) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::TripleO::DeploymentSteps
@@ -259,12 +266,12 @@ resources:
# if https://bugs.launchpad.net/heat/+bug/1700569
# is fixed.
{%- if step == 1 %}
- {%- for dep in roles %}
+ {%- for dep in enabled_roles %}
- {{dep.name}}PreConfig
- {{dep.name}}ArtifactsDeploy
{%- endfor %}
{%- else %}
- {%- for dep in roles %}
+ {%- for dep in enabled_roles %}
- {{dep.name}}Deployment_Step{{step -1}}
{%- endfor %}
{%- endif %}
@@ -287,7 +294,7 @@ resources:
# after all the previous deployment steps.
{{role.name}}ExtraConfigPost:
depends_on:
- {%- for dep in roles %}
+ {%- for dep in enabled_roles %}
- {{dep.name}}Deployment_Step5
{%- endfor %}
type: OS::TripleO::NodeExtraConfigPost
@@ -300,7 +307,7 @@ resources:
{{role.name}}PostConfig:
type: OS::TripleO::Tasks::{{role.name}}PostConfig
depends_on:
- {%- for dep in roles %}
+ {%- for dep in enabled_roles %}
- {{dep.name}}ExtraConfigPost
{%- endfor %}
properties:
@@ -356,8 +363,3 @@ outputs:
with_sequence: start=0 end={{upgrade_steps_max-1}}
loop_control:
loop_var: step
- - include: deploy_steps_tasks.yaml
- with_sequence: start=0 end={{deploy_steps_max-1}}
- loop_control:
- loop_var: step
-
diff --git a/common/major_upgrade_steps.j2.yaml b/common/major_upgrade_steps.j2.yaml
index 11113eec..36b342f9 100644
--- a/common/major_upgrade_steps.j2.yaml
+++ b/common/major_upgrade_steps.j2.yaml
@@ -54,7 +54,6 @@ resources:
params:
ROLE_NAME: {{role.name}}
- get_file: ../extraconfig/tasks/pacemaker_common_functions.sh
- - get_file: ../extraconfig/tasks/run_puppet.sh
- get_file: ../extraconfig/tasks/tripleo_upgrade_node.sh
{{role.name}}DeliverUpgradeScriptDeployment:
@@ -129,32 +128,6 @@ resources:
{%- endfor %}
{%- endfor %}
-# Dump the puppet manifests to be apply later when disable_upgrade_deployment
-# is to true
-{% for role in roles if role.disable_upgrade_deployment|default(false) %}
- {{role.name}}DeliverPuppetConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- cat > /root/{{role.name}}_puppet_config.pp << ENDOFCAT
- PUPPET_CLASSES
- ENDOFCAT
- params:
- PUPPET_CLASSES: {get_param: [role_data, {{role.name}}, step_config]}
-
- {{role.name}}DeliverPuppetDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}DeliverPuppetConfig}
-{% endfor %}
-
# Upgrade Steps for all roles
{%- for step in range(0, upgrade_steps_max) %}
# Config resources for step {{step}}
@@ -214,6 +187,43 @@ resources:
role_data: {get_param: role_data}
ctlplane_service_ips: {get_param: ctlplane_service_ips}
+{%- for step in range(0, upgrade_steps_max) %}
+ {%- for role in roles %}
+ {{role.name}}PostUpgradeConfig_Config{{step}}:
+ type: OS::TripleO::UpgradeConfig
+ depends_on:
+ {%- for role_inside in enabled_roles %}
+ {%- if step > 0 %}
+ - {{role_inside.name}}PostUpgradeConfig_Deployment{{step -1}}
+ {%- else %}
+ - AllNodesPostUpgradeSteps
+ {%- endif %}
+ {%- endfor %}
+ properties:
+ UpgradeStepConfig: {get_param: [role_data, {{role.name}}, post_upgrade_tasks]}
+ step: {{step}}
+ {%- endfor %}
+
+ {%- for role in enabled_roles %}
+ {{role.name}}PostUpgradeConfig_Deployment{{step}}:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on:
+ {%- for role_inside in enabled_roles %}
+ {%- if step > 0 %}
+ - {{role_inside.name}}PostUpgradeConfig_Deployment{{step -1}}
+ {%- else %}
+ - AllNodesPostUpgradeSteps
+ {%- endif %}
+ {%- endfor %}
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}PostUpgradeConfig_Config{{step}}}
+ input_values:
+ role: {{role.name}}
+ update_identifier: {get_param: UpdateIdentifier}
+ {%- endfor %}
+{%- endfor %}
+
outputs:
# Output the config for each role, just use Step1 as the config should be
# the same for all steps (only the tag provided differs)
@@ -223,3 +233,7 @@ outputs:
{% for role in roles %}
{{role.name.lower()}}: {get_attr: [{{role.name}}UpgradeConfig_Step1, upgrade_config]}
{% endfor %}
+ RoleConfig:
+ description: Mapping of config data for all roles
+ value: {get_attr: [AllNodesPostUpgradeSteps, RoleConfig]}
+
diff --git a/common/post-upgrade.j2.yaml b/common/post-upgrade.j2.yaml
index 7cd6abdf..af47c6ea 100644
--- a/common/post-upgrade.j2.yaml
+++ b/common/post-upgrade.j2.yaml
@@ -1,4 +1,4 @@
# Note the include here is the same as post.j2.yaml but the data used at
# # the time of rendering is different if any roles disable upgrades
-{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% set enabled_roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
{% include 'deploy-steps.j2' %}
diff --git a/common/services.yaml b/common/services.yaml
index a8186e43..a0015c7e 100644
--- a/common/services.yaml
+++ b/common/services.yaml
@@ -174,13 +174,13 @@ resources:
expression: coalesce($.data.role_data, []).where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
data: {role_data: {get_attr: [ServiceChain, role_data]}}
- ServiceWorkflowTasks:
+ WorkflowTasks:
type: OS::Heat::Value
properties:
type: json
value:
yaql:
- expression: coalesce($.data.role_data, []).where($ != null).select($.get('service_workflow_tasks')).where($ != null).reduce($1.mergeWith($2), {})
+ expression: coalesce($.data.role_data, []).where($ != null).select($.get('workflow_tasks')).where($ != null).reduce($1.mergeWith($2), {})
data: {role_data: {get_attr: [ServiceChain, role_data]}}
UpgradeTasks:
@@ -193,6 +193,16 @@ resources:
expression: coalesce($.data, []).where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
+ PostUpgradeTasks:
+ type: OS::Heat::Value
+ properties:
+ type: comma_delimited_list
+ value:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: coalesce($.data, []).where($ != null).select($.get('post_upgrade_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+
UpdateTasks:
type: OS::Heat::Value
properties:
@@ -260,9 +270,10 @@ outputs:
config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
global_config_settings: {get_attr: [GlobalConfigSettings, value]}
service_config_settings: {get_attr: [ServiceConfigSettings, value]}
- service_workflow_tasks: {get_attr: [ServiceWorkflowTasks, value]}
+ workflow_tasks: {get_attr: [WorkflowTasks, value]}
step_config: {get_attr: [PuppetStepConfig, value]}
upgrade_tasks: {get_attr: [UpgradeTasks, value]}
+ post_upgrade_tasks: {get_attr: [PostUpgradeTasks, value]}
update_tasks: {get_attr: [UpdateTasks, value]}
upgrade_batch_tasks: {get_attr: [UpgradeBatchTasks, value]}
service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
diff --git a/deployed-server/deployed-server-roles-data.yaml b/deployed-server/deployed-server-roles-data.yaml
index d4bc1df1..73dc9099 100644
--- a/deployed-server/deployed-server-roles-data.yaml
+++ b/deployed-server/deployed-server-roles-data.yaml
@@ -82,10 +82,12 @@
- OS::TripleO::Services::ManilaApi
- OS::TripleO::Services::ManilaScheduler
- OS::TripleO::Services::ManilaBackendGeneric
+ - OS::TripleO::Services::ManilaBackendIsilon
- OS::TripleO::Services::ManilaBackendNetapp
- OS::TripleO::Services::ManilaBackendUnity
- OS::TripleO::Services::ManilaBackendCephFs
- OS::TripleO::Services::ManilaBackendVNX
+ - OS::TripleO::Services::ManilaBackendVMAX
- OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::AodhEvaluator
@@ -131,6 +133,7 @@
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::NeutronSriovHostConfig
- OS::TripleO::Services::OpenDaylightOvs
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py
index 4659cf53..cc247031 100755
--- a/docker/docker-puppet.py
+++ b/docker/docker-puppet.py
@@ -26,6 +26,7 @@ import sys
import subprocess
import sys
import tempfile
+import time
import multiprocessing
logger = None
@@ -59,10 +60,23 @@ def short_hostname():
def pull_image(name):
log.info('Pulling image: %s' % name)
- subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- cmd_stdout, cmd_stderr = subproc.communicate()
+ retval = -1
+ count = 0
+ while retval != 0:
+ count += 1
+ subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ retval = subproc.returncode
+ if retval != 0:
+ time.sleep(3)
+ log.warning('docker pull failed: %s' % cmd_stderr)
+ log.warning('retrying pulling image: %s' % name)
+ if count >= 5:
+ log.error('Failed to pull image: %s' % name)
+ break
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr:
@@ -257,7 +271,7 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
'--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
'--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
'--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
- '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',
+ '--volume', '%s:/var/lib/config-data/:rw' % os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data'),
'--volume', 'tripleo_logs:/var/log/tripleo/',
# Syslog socket for puppet logs
'--volume', '/dev/log:/dev/log',
@@ -366,6 +380,7 @@ for infile in infiles:
outfile = os.path.join(os.path.dirname(infile), "hashed-" + os.path.basename(infile))
with open(outfile, 'w') as out_f:
+ os.chmod(out_f.name, 0600)
json.dump(infile_data, out_f)
if not success:
diff --git a/docker/services/aodh-api.yaml b/docker/services/aodh-api.yaml
index da4b981c..49c5f9c5 100644
--- a/docker/services/aodh-api.yaml
+++ b/docker/services/aodh-api.yaml
@@ -102,7 +102,8 @@ outputs:
user: root
volumes:
- /var/log/containers/aodh:/var/log/aodh
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R aodh:aodh /var/log/aodh']
+ - /var/log/containers/httpd/aodh-api:/var/log/httpd
+ command: ['/bin/bash', '-c', 'chown -R aodh:aodh /var/log/aodh']
step_3:
aodh_db_sync:
image: *aodh_api_image
@@ -117,6 +118,7 @@ outputs:
- /var/lib/config-data/aodh/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro
- /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
- /var/log/containers/aodh:/var/log/aodh
+ - /var/log/containers/httpd/aodh-api:/var/log/httpd
command: "/usr/bin/bootstrap_host_exec aodh_api su aodh -s /bin/bash -c /usr/bin/aodh-dbsync"
step_4:
aodh_api:
@@ -131,6 +133,7 @@ outputs:
- /var/lib/kolla/config_files/aodh_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/aodh/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/aodh:/var/log/aodh
+ - /var/log/containers/httpd/aodh-api:/var/log/httpd
-
if:
- internal_tls_enabled
@@ -146,8 +149,11 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/aodh
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/aodh
+ - /var/log/containers/httpd/aodh-api
upgrade_tasks:
- name: Stop and disable aodh service (running under httpd)
tags: step2
diff --git a/docker/services/ceilometer-agent-central.yaml b/docker/services/ceilometer-agent-central.yaml
index 424c316f..d772462d 100644
--- a/docker/services/ceilometer-agent-central.yaml
+++ b/docker/services/ceilometer-agent-central.yaml
@@ -69,7 +69,7 @@ outputs:
config_image: {get_param: DockerCeilometerConfigImage}
kolla_config:
/var/lib/kolla/config_files/ceilometer_agent_central.json:
- command: /usr/bin/ceilometer-polling --polling-namespaces central
+ command: /usr/bin/ceilometer-polling --polling-namespaces central --logfile /var/log/ceilometer/central.log
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
@@ -116,6 +116,11 @@ outputs:
- '/usr/bin/bootstrap_host_exec'
- 'ceilometer_agent_central'
- "su ceilometer -s /bin/bash -c 'for n in {1..10}; do /usr/bin/ceilometer-upgrade --skip-metering-database && exit 0 || sleep 5; done; exit 1'"
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/ceilometer
+ state: directory
upgrade_tasks:
- name: Stop and disable ceilometer agent central service
tags: step2
diff --git a/docker/services/ceilometer-agent-compute.yaml b/docker/services/ceilometer-agent-compute.yaml
index 535b1693..90b30d46 100644
--- a/docker/services/ceilometer-agent-compute.yaml
+++ b/docker/services/ceilometer-agent-compute.yaml
@@ -69,7 +69,7 @@ outputs:
config_image: {get_param: DockerCeilometerConfigImage}
kolla_config:
/var/lib/kolla/config_files/ceilometer_agent_compute.json:
- command: /usr/bin/ceilometer-polling --polling-namespaces compute
+ command: /usr/bin/ceilometer-polling --polling-namespaces compute --logfile /var/log/ceilometer/compute.log
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
@@ -89,8 +89,14 @@ outputs:
- /var/lib/kolla/config_files/ceilometer_agent_compute.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/ceilometer/:/var/lib/kolla/config_files/src:ro
- /var/run/libvirt:/var/run/libvirt:ro
+ - /var/log/containers/ceilometer:/var/log/ceilometer
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/ceilometer
+ state: directory
upgrade_tasks:
- name: Check if openstack-ceilometer-compute is deployed
command: systemctl is-enabled openstack-ceilometer-compute
@@ -99,14 +105,14 @@ outputs:
register: openstack_ceilometer_compute_enabled
- name: Check if openstack-ceilometer-polling is deployed
command: systemctl is-enabled openstack-ceilometer-polling
- tags: step2
+ tags: step2
ignore_errors: True
register: openstack_ceilometer_polling_enabled
- name: Stop and disable ceilometer compute agent
tags: step2
service: name=openstack-ceilometer-compute state=stopped enabled=no
- when: openstack_ceilometer_compute_enabled.rc == 0
+ when: openstack_ceilometer_compute_enabled.rc|default('') == 0
- name: Stop and disable ceilometer polling agent
tags: step2
service: name=openstack-ceilometer-polling state=stopped enabled=no
- when: openstack_ceilometer_polling_enabled.rc == 0
+ when: openstack_ceilometer_polling_enabled.rc|default('') == 0
diff --git a/docker/services/ceilometer-agent-notification.yaml b/docker/services/ceilometer-agent-notification.yaml
index 7f1d442a..891750ad 100644
--- a/docker/services/ceilometer-agent-notification.yaml
+++ b/docker/services/ceilometer-agent-notification.yaml
@@ -69,12 +69,20 @@ outputs:
config_image: {get_param: DockerCeilometerConfigImage}
kolla_config:
/var/lib/kolla/config_files/ceilometer_agent_notification.json:
- command: /usr/bin/ceilometer-agent-notification
+ command: /usr/bin/ceilometer-agent-notification --logfile /var/log/ceilometer/agent-notification.log
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
merge: true
preserve_properties: true
+ - source: "/var/lib/kolla/config_files/src-panko/*"
+ dest: "/"
+ merge: true
+ preserve_properties: true
+ permissions:
+ - path: /etc/panko
+ owner: root:ceilometer
+ recurse: true
docker_config:
step_3:
ceilometer_init_log:
@@ -96,8 +104,15 @@ outputs:
-
- /var/lib/kolla/config_files/ceilometer_agent_notification.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/ceilometer/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/puppet-generated/panko/:/var/lib/kolla/config_files/src-panko:ro
+ - /var/log/containers/ceilometer:/var/log/ceilometer
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/ceilometer
+ state: directory
upgrade_tasks:
- name: Stop and disable ceilometer agent notification service
tags: step2
diff --git a/docker/services/ceph-ansible/ceph-base.yaml b/docker/services/ceph-ansible/ceph-base.yaml
index bf2c86c4..b60f98c2 100644
--- a/docker/services/ceph-ansible/ceph-base.yaml
+++ b/docker/services/ceph-ansible/ceph-base.yaml
@@ -58,13 +58,17 @@ parameters:
type: string
description: List of ceph-ansible tags to skip
default: 'package-install,with_pkg'
+ CephConfigOverrides:
+ type: json
+ description: Extra config settings to dump into ceph.conf
+ default: {}
CephClusterFSID:
type: string
description: The Ceph cluster FSID. Must be a UUID.
CephPoolDefaultPgNum:
description: default pg_num to use for the RBD pools
type: number
- default: 32
+ default: 128
CephPools:
description: >
It can be used to override settings for one of the predefined pools, or to create
@@ -100,6 +104,14 @@ parameters:
CephClientUserName:
default: openstack
type: string
+ CephRgwClientName:
+ default: radosgw
+ type: string
+ CephRgwKey:
+ description: The cephx key for the radosgw client. Can be created
+ with ceph-authtool --gen-print-key.
+ type: string
+ hidden: true
CephPoolDefaultSize:
description: default minimum replication for RBD copies
type: number
@@ -115,10 +127,13 @@ parameters:
CephIPv6:
default: False
type: boolean
+ SwiftPassword:
+ description: The password for the swift service account
+ type: string
+ hidden: true
DockerCephDaemonImage:
description: image
type: string
- default: 'ceph/daemon:tag-build-master-jewel-centos-7'
conditions:
custom_registry_host:
@@ -167,7 +182,7 @@ outputs:
config_volume: ''
step_config: ''
docker_config: {}
- service_workflow_tasks:
+ workflow_tasks:
step2:
- name: ceph_base_ansible_workflow
workflow: { get_param: CephAnsibleWorkflowName }
@@ -241,16 +256,36 @@ outputs:
- - client
- {get_param: ManilaCephFSNativeCephFSAuthId}
key: {get_param: CephManilaClientKey}
- mon_cap: "allow r, allow command auth del, allow command auth caps, allow command auth get, allow command auth get-or-create"
+ mon_cap: 'allow r, allow command \\\"auth del\\\", allow command \\\"auth caps\\\", allow command \\\"auth get\\\", allow command \\\"auth get-or-create\\\"'
mds_cap: "allow *"
osd_cap: "allow rw"
mode: "0644"
+ - name:
+ list_join:
+ - '.'
+ - - client
+ - {get_param: CephRgwClientName}
+ key: {get_param: CephRgwKey}
+ mon_cap: "allow rw"
+ osd_cap: "allow rwx"
+ mode: "0644"
keys: *openstack_keys
pools: []
ceph_conf_overrides:
- global:
- osd_pool_default_size: {get_param: CephPoolDefaultSize}
- osd_pool_default_pg_num: {get_param: CephPoolDefaultPgNum}
+ map_merge:
+ - global:
+ osd_pool_default_size: {get_param: CephPoolDefaultSize}
+ osd_pool_default_pg_num: {get_param: CephPoolDefaultPgNum}
+ osd_pool_default_pgp_num: {get_param: CephPoolDefaultPgNum}
+ rgw_keystone_api_version: 3
+ rgw_keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ rgw_keystone_accepted_roles: 'Member, _member_, admin'
+ rgw_keystone_admin_domain: default
+ rgw_keystone_admin_project: service
+ rgw_keystone_admin_user: swift
+ rgw_keystone_admin_password: {get_param: SwiftPassword}
+ rgw_s3_auth_use_keystone: 'true'
+ - {get_param: CephConfigOverrides}
ntp_service_enabled: false
generate_fsid: false
ip_version:
diff --git a/docker/services/ceph-ansible/ceph-client.yaml b/docker/services/ceph-ansible/ceph-client.yaml
index 55d8d9da..0b782941 100644
--- a/docker/services/ceph-ansible/ceph-client.yaml
+++ b/docker/services/ceph-ansible/ceph-client.yaml
@@ -54,5 +54,5 @@ outputs:
config_volume: ''
step_config: ''
docker_config: {}
- service_workflow_tasks: {get_attr: [CephBase, role_data, service_workflow_tasks]}
+ workflow_tasks: {get_attr: [CephBase, role_data, workflow_tasks]}
config_settings: {}
diff --git a/docker/services/ceph-ansible/ceph-external.yaml b/docker/services/ceph-ansible/ceph-external.yaml
new file mode 100644
index 00000000..bb2fc20a
--- /dev/null
+++ b/docker/services/ceph-ansible/ceph-external.yaml
@@ -0,0 +1,66 @@
+heat_template_version: pike
+
+description: >
+ Ceph External service.
+
+parameters:
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ CephExternalMonHost:
+ default: ''
+ type: string
+ description: List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments.
+
+resources:
+ CephBase:
+ type: ./ceph-base.yaml
+ properties:
+ ServiceData: {get_param: ServiceData}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceph External service.
+ value:
+ service_name: ceph_client
+ upgrade_tasks: []
+ step_config: ''
+ puppet_config:
+ config_image: ''
+ config_volume: ''
+ step_config: ''
+ docker_config: {}
+ workflow_tasks: {get_attr: [CephBase, role_data, workflow_tasks]}
+ config_settings:
+ ceph_client_ansible_vars:
+ map_merge:
+ - {get_attr: [CephBase, role_data, config_settings, ceph_common_ansible_vars]}
+ - external_cluster_mon_ips: {get_param: CephExternalMonHost} \ No newline at end of file
diff --git a/docker/services/ceph-ansible/ceph-mds.yaml b/docker/services/ceph-ansible/ceph-mds.yaml
index 4ef3a669..abdb3c3f 100644
--- a/docker/services/ceph-ansible/ceph-mds.yaml
+++ b/docker/services/ceph-ansible/ceph-mds.yaml
@@ -68,7 +68,7 @@ outputs:
config_volume: ''
step_config: ''
docker_config: {}
- service_workflow_tasks: {get_attr: [CephBase, role_data, service_workflow_tasks]}
+ workflow_tasks: {get_attr: [CephBase, role_data, workflow_tasks]}
config_settings:
map_merge:
- tripleo.ceph_mds.firewall_rules:
diff --git a/docker/services/ceph-ansible/ceph-mon.yaml b/docker/services/ceph-ansible/ceph-mon.yaml
index 90149d1e..45f939c2 100644
--- a/docker/services/ceph-ansible/ceph-mon.yaml
+++ b/docker/services/ceph-ansible/ceph-mon.yaml
@@ -71,7 +71,7 @@ outputs:
config_volume: ''
step_config: ''
docker_config: {}
- service_workflow_tasks: {get_attr: [CephBase, role_data, service_workflow_tasks]}
+ workflow_tasks: {get_attr: [CephBase, role_data, workflow_tasks]}
config_settings:
map_merge:
- tripleo.ceph_mon.firewall_rules:
diff --git a/docker/services/ceph-ansible/ceph-osd.yaml b/docker/services/ceph-ansible/ceph-osd.yaml
index 6e0f4a60..a441f5c9 100644
--- a/docker/services/ceph-ansible/ceph-osd.yaml
+++ b/docker/services/ceph-ansible/ceph-osd.yaml
@@ -38,6 +38,7 @@ parameters:
- /dev/vdb
journal_size: 512
journal_collocation: true
+ osd_scenario: collocated
resources:
CephBase:
@@ -62,7 +63,7 @@ outputs:
config_volume: ''
step_config: ''
docker_config: {}
- service_workflow_tasks: {get_attr: [CephBase, role_data, service_workflow_tasks]}
+ workflow_tasks: {get_attr: [CephBase, role_data, workflow_tasks]}
config_settings:
map_merge:
- tripleo.ceph_osd.firewall_rules:
@@ -72,4 +73,5 @@ outputs:
- ceph_osd_ansible_vars:
map_merge:
- {get_attr: [CephBase, role_data, config_settings, ceph_common_ansible_vars]}
+ - osd_objectstore: filestore
- {get_param: CephAnsibleDisksConfig} \ No newline at end of file
diff --git a/docker/services/ceph-ansible/ceph-rgw.yaml b/docker/services/ceph-ansible/ceph-rgw.yaml
new file mode 100644
index 00000000..4479fdbf
--- /dev/null
+++ b/docker/services/ceph-ansible/ceph-rgw.yaml
@@ -0,0 +1,87 @@
+heat_template_version: pike
+
+description: >
+ Ceph RadosGW service.
+
+parameters:
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ SwiftPassword:
+ description: The password for the swift service account
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+
+resources:
+ CephBase:
+ type: ./ceph-base.yaml
+ properties:
+ ServiceData: {get_param: ServiceData}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceph RadosGW service.
+ value:
+ service_name: ceph_rgw
+ upgrade_tasks: []
+ step_config: ''
+ puppet_config:
+ config_image: ''
+ config_volume: ''
+ step_config: ''
+ docker_config: {}
+ workflow_tasks: {get_attr: [CephBase, role_data, workflow_tasks]}
+ config_settings:
+ map_merge:
+ - tripleo.ceph_rgw.firewall_rules:
+ '122 ceph rgw':
+ dport: {get_param: [EndpointMap, CephRgwInternal, port]}
+ - ceph_rgw_ansible_vars:
+ map_merge:
+ - {get_attr: [CephBase, role_data, config_settings, ceph_common_ansible_vars]}
+ - radosgw_keystone: true
+ radosgw_keystone_ssl: false
+ radosgw_address_block: {get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephRgwNetwork]}]}
+ radosgw_civetweb_port: {get_param: [EndpointMap, CephRgwInternal, port]}
+ service_config_settings:
+ keystone:
+ ceph::rgw::keystone::auth::public_url: {get_param: [EndpointMap, CephRgwPublic, uri]}
+ ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
+ ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
+ ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
+ ceph::rgw::keystone::auth::roles: [ 'admin', 'Member', '_member_' ]
+ ceph::rgw::keystone::auth::tenant: service
+ ceph::rgw::keystone::auth::user: swift
+ ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
diff --git a/docker/services/cinder-api.yaml b/docker/services/cinder-api.yaml
index 519b2328..06705309 100644
--- a/docker/services/cinder-api.yaml
+++ b/docker/services/cinder-api.yaml
@@ -111,6 +111,7 @@ outputs:
user: root
volumes:
- /var/log/containers/cinder:/var/log/cinder
+ - /var/log/containers/httpd/cinder-api:/var/log/httpd
command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
step_3:
cinder_api_db_sync:
@@ -125,6 +126,7 @@ outputs:
-
- /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
- /var/log/containers/cinder:/var/log/cinder
+ - /var/log/containers/httpd/cinder-api:/var/log/httpd
command:
- '/usr/bin/bootstrap_host_exec'
- 'cinder_api'
@@ -145,6 +147,7 @@ outputs:
- /var/lib/kolla/config_files/cinder_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/cinder/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/cinder:/var/log/cinder
+ - /var/log/containers/httpd/cinder-api:/var/log/httpd
-
if:
- internal_tls_enabled
@@ -163,6 +166,8 @@ outputs:
user: root
privileged: false
restart: always
+ healthcheck:
+ test: /bin/true
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
@@ -170,6 +175,7 @@ outputs:
- /var/lib/kolla/config_files/cinder_api_cron.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/cinder/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/cinder:/var/log/cinder
+ - /var/log/containers/httpd/cinder-api:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
@@ -178,8 +184,11 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/cinder
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/cinder
+ - /var/log/containers/httpd/cinder-api
upgrade_tasks:
- name: Stop and disable cinder_api service
tags: step2
diff --git a/docker/services/containers-common.yaml b/docker/services/containers-common.yaml
index 2c894da5..9f982f8b 100644
--- a/docker/services/containers-common.yaml
+++ b/docker/services/containers-common.yaml
@@ -64,6 +64,7 @@ outputs:
# Syslog socket
- /dev/log:/dev/log
- /etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro
+ - /sys/fs/selinux:/sys/fs/selinux
- if:
- internal_tls_enabled
- - list_join:
diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml
index 9b5c5b8f..5cf6f925 100644
--- a/docker/services/database/mongodb.yaml
+++ b/docker/services/database/mongodb.yaml
@@ -157,6 +157,11 @@ outputs:
metadata_settings:
get_attr: [MongodbPuppetBase, role_data, metadata_settings]
upgrade_tasks:
+ - name: Check for mongodb service
+ stat: path=/usr/lib/systemd/system/mongod.service
+ tags: common
+ register: mongod_service
- name: Stop and disable mongodb service
tags: step2
service: name=mongod state=stopped enabled=no
+ when: mongod_service.stat.exists
diff --git a/docker/services/database/redis.yaml b/docker/services/database/redis.yaml
index 980a8c6d..487b4c67 100644
--- a/docker/services/database/redis.yaml
+++ b/docker/services/database/redis.yaml
@@ -36,9 +36,19 @@ parameters:
default: {}
description: Parameters specific to the role
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
+ ContainersCommon:
+ type: ../containers-common.yaml
+
RedisBase:
type: ../../../puppet/services/database/redis.yaml
properties:
@@ -56,6 +66,8 @@ outputs:
map_merge:
- {get_attr: [RedisBase, role_data, config_settings]}
- redis::daemonize: false
+ tripleo::stunnel::manage_service: false
+ tripleo::stunnel::foreground: 'yes'
step_config: &step_config
get_attr: [RedisBase, role_data, step_config]
service_config_settings: {get_attr: [RedisBase, role_data, service_config_settings]}
@@ -80,31 +92,60 @@ outputs:
- path: /var/run/redis
owner: redis:redis
recurse: true
+ /var/lib/kolla/config_files/redis_tls_proxy.json:
+ command: stunnel /etc/stunnel/stunnel.conf
+ config_files:
+ - source: "/var/lib/kolla/config_files/src/*"
+ dest: "/"
+ merge: true
+ preserve_properties: true
docker_config:
step_1:
- redis_init_logs:
- start_order: 0
- detach: false
- image: &redis_image {get_param: DockerRedisImage}
- privileged: false
- user: root
- volumes:
- - /var/log/containers/redis:/var/log/redis
- command: ['/bin/bash', '-c', 'chown -R redis:redis /var/log/redis']
- redis:
- start_order: 1
- image: *redis_image
- net: host
- privileged: false
- restart: always
- volumes:
- - /run:/run
- - /var/lib/kolla/config_files/redis.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/puppet-generated/redis/:/var/lib/kolla/config_files/src:ro
- - /etc/localtime:/etc/localtime:ro
- - /var/log/containers/redis:/var/log/redis
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ map_merge:
+ - redis_init_logs:
+ start_order: 0
+ detach: false
+ image: &redis_image {get_param: DockerRedisImage}
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/redis:/var/log/redis
+ command: ['/bin/bash', '-c', 'chown -R redis:redis /var/log/redis']
+ - redis:
+ start_order: 1
+ image: *redis_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/redis.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/puppet-generated/redis/:/var/lib/kolla/config_files/src:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /var/log/containers/redis:/var/log/redis
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - if:
+ - internal_tls_enabled
+ - redis_tls_proxy:
+ start_order: 2
+ image: *redis_image
+ net: host
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/redis_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/puppet-generated/redis/:/var/lib/kolla/config_files/src:ro
+ - /etc/pki/tls/certs/redis.crt:/etc/pki/tls/certs/redis.crt:ro
+ - /etc/pki/tls/private/redis.key:/etc/pki/tls/private/redis.key:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - {}
+ metadata_settings:
+ get_attr: [RedisBase, role_data, metadata_settings]
host_prep_tasks:
- name: create persistent directories
file:
diff --git a/docker/services/glance-api.yaml b/docker/services/glance-api.yaml
index df226b15..8f2bd604 100644
--- a/docker/services/glance-api.yaml
+++ b/docker/services/glance-api.yaml
@@ -118,6 +118,7 @@ outputs:
user: root
volumes:
- /var/log/containers/glance:/var/log/glance
+ - /var/log/containers/httpd/glance-api:/var/log/httpd
command: ['/bin/bash', '-c', 'chown -R glance:glance /var/log/glance']
step_3:
glance_api_db_sync:
@@ -133,6 +134,7 @@ outputs:
- /var/lib/kolla/config_files/glance_api.json:/var/lib/kolla/config_files/config.json
- /var/lib/config-data/puppet-generated/glance_api/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/glance:/var/log/glance
+ - /var/log/containers/httpd/glance-api:/var/log/httpd
- /etc/ceph:/var/lib/kolla/config_files/src-ceph:ro
-
if:
@@ -176,8 +178,11 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/glance
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/glance
+ - /var/log/containers/httpd/glance-api
- name: ensure ceph configurations exist
file:
path: /etc/ceph
diff --git a/docker/services/gnocchi-api.yaml b/docker/services/gnocchi-api.yaml
index 1443da40..47b3b811 100644
--- a/docker/services/gnocchi-api.yaml
+++ b/docker/services/gnocchi-api.yaml
@@ -104,7 +104,8 @@ outputs:
user: root
volumes:
- /var/log/containers/gnocchi:/var/log/gnocchi
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
+ - /var/log/containers/httpd/gnocchi-api:/var/log/httpd
+ command: ['/bin/bash', '-c', 'chown -R gnocchi:gnocchi /var/log/gnocchi']
step_4:
gnocchi_db_sync:
image: *gnocchi_api_image
@@ -119,6 +120,7 @@ outputs:
- /var/lib/config-data/gnocchi/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro
- /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
- /var/log/containers/gnocchi:/var/log/gnocchi
+ - /var/log/containers/httpd/gnocchi-api:/var/log/httpd
- /etc/ceph:/etc/ceph:ro
command:
str_replace:
@@ -138,6 +140,7 @@ outputs:
- /var/lib/kolla/config_files/gnocchi_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/gnocchi/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/gnocchi:/var/log/gnocchi
+ - /var/log/containers/httpd/gnocchi-api:/var/log/httpd
- /etc/ceph:/var/lib/kolla/config_files/src-ceph:ro
-
if:
@@ -154,8 +157,11 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/gnocchi
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/gnocchi
+ - /var/log/containers/httpd/gnocchi-api
- name: ensure ceph configurations exist
file:
path: /etc/ceph
diff --git a/docker/services/gnocchi-metricd.yaml b/docker/services/gnocchi-metricd.yaml
index 5a6958a0..9a114458 100644
--- a/docker/services/gnocchi-metricd.yaml
+++ b/docker/services/gnocchi-metricd.yaml
@@ -90,7 +90,7 @@ outputs:
owner: gnocchi:gnocchi
recurse: true
docker_config:
- step_4:
+ step_5:
gnocchi_metricd:
image: {get_param: DockerGnocchiMetricdImage}
net: host
diff --git a/docker/services/gnocchi-statsd.yaml b/docker/services/gnocchi-statsd.yaml
index 2957312b..834d0055 100644
--- a/docker/services/gnocchi-statsd.yaml
+++ b/docker/services/gnocchi-statsd.yaml
@@ -90,7 +90,7 @@ outputs:
owner: gnocchi:gnocchi
recurse: true
docker_config:
- step_4:
+ step_5:
gnocchi_statsd:
image: {get_param: DockerGnocchiStatsdImage}
net: host
diff --git a/docker/services/haproxy.yaml b/docker/services/haproxy.yaml
index f0e2f71d..70e1f893 100644
--- a/docker/services/haproxy.yaml
+++ b/docker/services/haproxy.yaml
@@ -96,8 +96,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [HAProxyBase, role_data, config_settings]
- - tripleo::haproxy::haproxy_daemon: false
- tripleo::haproxy::haproxy_service_manage: false
+ - tripleo::haproxy::haproxy_service_manage: false
# NOTE(jaosorior): We disable the CRL since we have no way to restart haproxy
# when this is updated
tripleo::haproxy::crl_file: null
@@ -130,7 +129,7 @@ outputs:
- null
kolla_config:
/var/lib/kolla/config_files/haproxy.json:
- command: haproxy -f /etc/haproxy/haproxy.cfg
+ command: /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
diff --git a/docker/services/heat-api-cfn.yaml b/docker/services/heat-api-cfn.yaml
index 70612899..cfe11cd6 100644
--- a/docker/services/heat-api-cfn.yaml
+++ b/docker/services/heat-api-cfn.yaml
@@ -107,6 +107,7 @@ outputs:
- /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/heat_api_cfn/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/heat:/var/log/heat
+ - /var/log/containers/httpd/heat-api-cfn:/var/log/httpd
-
if:
- internal_tls_enabled
@@ -122,8 +123,11 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/heat
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/heat
+ - /var/log/containers/httpd/heat-api-cfn
upgrade_tasks:
- name: Check if heat_api_cfn is deployed
command: systemctl is-enabled openstack-heat-api-cfn
diff --git a/docker/services/heat-api.yaml b/docker/services/heat-api.yaml
index 54c7bedd..2bb588de 100644
--- a/docker/services/heat-api.yaml
+++ b/docker/services/heat-api.yaml
@@ -118,6 +118,7 @@ outputs:
- /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/heat_api/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/heat:/var/log/heat
+ - /var/log/containers/httpd/heat-api:/var/log/httpd
-
if:
- internal_tls_enabled
@@ -136,6 +137,8 @@ outputs:
user: root
privileged: false
restart: always
+ healthcheck:
+ test: /bin/true
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
@@ -143,13 +146,17 @@ outputs:
- /var/lib/kolla/config_files/heat_api_cron.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/heat_api/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/heat:/var/log/heat
+ - /var/log/containers/httpd/heat-api:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/heat
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/heat
+ - /var/log/containers/httpd/heat-api
upgrade_tasks:
- name: Check is heat_api is deployed
command: systemctl is-enabled openstack-heat-api
diff --git a/docker/services/horizon.yaml b/docker/services/horizon.yaml
index f2f2b8dc..9a2c8bad 100644
--- a/docker/services/horizon.yaml
+++ b/docker/services/horizon.yaml
@@ -110,6 +110,7 @@ outputs:
command: ['/bin/bash', '-c', 'touch /var/log/horizon/horizon.log && chown -R apache:apache /var/log/horizon && chmod -R a+rx /etc/openstack-dashboard']
volumes:
- /var/log/containers/horizon:/var/log/horizon
+ - /var/log/containers/httpd/horizon:/var/log/httpd
- /var/lib/config-data/horizon/etc/:/etc/
step_3:
horizon:
@@ -124,6 +125,7 @@ outputs:
- /var/lib/kolla/config_files/horizon.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/horizon/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/horizon:/var/log/horizon
+ - /var/log/containers/httpd/horizon:/var/log/httpd
-
if:
- internal_tls_enabled
@@ -139,8 +141,11 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/horizon
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/horizon
+ - /var/log/containers/httpd/horizon
upgrade_tasks:
- name: Stop and disable horizon service (running under httpd)
tags: step2
diff --git a/docker/services/ironic-api.yaml b/docker/services/ironic-api.yaml
index 2a9735b5..38710f3b 100644
--- a/docker/services/ironic-api.yaml
+++ b/docker/services/ironic-api.yaml
@@ -97,6 +97,7 @@ outputs:
user: root
volumes:
- /var/log/containers/ironic:/var/log/ironic
+ - /var/log/containers/httpd/ironic-api:/var/log/httpd
command: ['/bin/bash', '-c', 'chown -R ironic:ironic /var/log/ironic']
step_3:
ironic_db_sync:
@@ -112,6 +113,7 @@ outputs:
-
- /var/lib/config-data/ironic_api/etc/ironic:/etc/ironic:ro
- /var/log/containers/ironic:/var/log/ironic
+ - /var/log/containers/httpd/ironic-api:/var/log/httpd
command: "/usr/bin/bootstrap_host_exec ironic_api su ironic -s /bin/bash -c 'ironic-dbsync --config-file /etc/ironic/ironic.conf'"
step_4:
ironic_api:
@@ -127,13 +129,17 @@ outputs:
- /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/ironic_api/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/ironic:/var/log/ironic
+ - /var/log/containers/httpd/ironic-api:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/ironic
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/ironic
+ - /var/log/containers/httpd/ironic-api
upgrade_tasks:
- name: Stop and disable ironic_api service
tags: step2
diff --git a/docker/services/ironic-pxe.yaml b/docker/services/ironic-pxe.yaml
index 48d2e1ee..878eef63 100644
--- a/docker/services/ironic-pxe.yaml
+++ b/docker/services/ironic-pxe.yaml
@@ -92,6 +92,7 @@ outputs:
- /var/lib/ironic:/var/lib/ironic/
- /dev/log:/dev/log
- /var/log/containers/ironic:/var/log/ironic
+ - /var/log/containers/httpd/ironic-pxe:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
ironic_pxe_http:
@@ -108,6 +109,7 @@ outputs:
- /var/lib/config-data/puppet-generated/ironic/:/var/lib/kolla/config_files/src:ro
- /var/lib/ironic:/var/lib/ironic/
- /var/log/containers/ironic:/var/log/ironic
+ - /var/log/containers/httpd/ironic-pxe:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
@@ -118,3 +120,4 @@ outputs:
with_items:
- /var/lib/ironic
- /var/log/containers/ironic
+ - /var/log/containers/httpd/ironic-pxe
diff --git a/docker/services/iscsid.yaml b/docker/services/iscsid.yaml
index 80519800..c34a59d5 100644
--- a/docker/services/iscsid.yaml
+++ b/docker/services/iscsid.yaml
@@ -109,7 +109,7 @@ outputs:
- name: Stop and disable iscsid service
tags: step2
service: name=iscsid state=stopped enabled=no
- when: stat_iscsid_service.stat.exists
+ when: (stat_iscsid_service.stat|default('')).exists|default(false)
- name: stat /lib/systemd/system/iscsid.socket
tags: step2
stat: path=/lib/systemd/system/iscsid.socket
@@ -117,4 +117,4 @@ outputs:
- name: Stop and disable iscsid.socket service
tags: step2
service: name=iscsid.socket state=stopped enabled=no
- when: stat_iscsid_socket.stat.exists
+ when: (stat_iscsid_socket.stat|default('')).exists|default(false)
diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml
index 4c2c1d16..8f4a2014 100644
--- a/docker/services/keystone.yaml
+++ b/docker/services/keystone.yaml
@@ -121,9 +121,10 @@ outputs:
keystone_init_log:
image: &keystone_image {get_param: DockerKeystoneImage}
user: root
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R keystone:keystone /var/log/keystone']
+ command: ['/bin/bash', '-c', 'chown -R keystone:keystone /var/log/keystone']
volumes:
- /var/log/containers/keystone:/var/log/keystone
+ - /var/log/containers/httpd/keystone:/var/log/httpd
step_3:
keystone_db_sync:
image: *keystone_image
@@ -138,6 +139,7 @@ outputs:
- /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/keystone/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/keystone:/var/log/keystone
+ - /var/log/containers/httpd/keystone:/var/log/httpd
-
if:
- internal_tls_enabled
@@ -175,6 +177,8 @@ outputs:
privileged: false
restart: always
command: ['/bin/bash', '-c', '/usr/local/bin/kolla_set_configs && /usr/sbin/crond -n']
+ healthcheck:
+ test: /bin/true
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
@@ -182,6 +186,7 @@ outputs:
- /var/lib/kolla/config_files/keystone_cron.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/keystone/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/keystone:/var/log/keystone
+ - /var/log/containers/httpd/keystone:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
docker_puppet_tasks:
@@ -194,8 +199,11 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/keystone
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/keystone
+ - /var/log/containers/httpd/keystone
upgrade_tasks:
- name: Stop and disable keystone service (running under httpd)
tags: step2
diff --git a/docker/services/manila-api.yaml b/docker/services/manila-api.yaml
index 7b2dbfaf..a0e501ec 100644
--- a/docker/services/manila-api.yaml
+++ b/docker/services/manila-api.yaml
@@ -90,7 +90,8 @@ outputs:
user: root
volumes:
- /var/log/containers/manila:/var/log/manila
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R manila:manila /var/log/manila']
+ - /var/log/containers/httpd/manila-api:/var/log/httpd
+ command: ['/bin/bash', '-c', 'chown -R manila:manila /var/log/manila']
step_3:
manila_api_db_sync:
user: root
@@ -103,6 +104,7 @@ outputs:
-
- /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
- /var/log/containers/manila:/var/log/manila
+ - /var/log/containers/httpd/manila-api:/var/log/httpd
command: "/usr/bin/bootstrap_host_exec manila_api su manila -s /bin/bash -c '/usr/bin/manila-manage db sync'"
step_4:
manila_api:
@@ -116,13 +118,17 @@ outputs:
- /var/lib/kolla/config_files/manila_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/manila/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/manila:/var/log/manila
+ - /var/log/containers/httpd/manila-api:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
- name: Create persistent manila logs directory
file:
- path: /var/log/containers/manila
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/manila
+ - /var/log/containers/httpd/manila-api
upgrade_tasks:
- name: Stop and disable manila_api service
tags: step2
diff --git a/docker/services/neutron-api.yaml b/docker/services/neutron-api.yaml
index 85a07128..c028fc28 100644
--- a/docker/services/neutron-api.yaml
+++ b/docker/services/neutron-api.yaml
@@ -110,6 +110,7 @@ outputs:
user: root
volumes:
- /var/log/containers/neutron:/var/log/neutron
+ - /var/log/containers/httpd/neutron-api:/var/log/httpd
command: ['/bin/bash', '-c', 'chown -R neutron:neutron /var/log/neutron']
step_3:
neutron_db_sync:
@@ -126,6 +127,7 @@ outputs:
- /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
- /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
- /var/log/containers/neutron:/var/log/neutron
+ - /var/log/containers/httpd/neutron-api:/var/log/httpd
command: ['/usr/bin/bootstrap_host_exec', 'neutron_api', 'neutron-db-manage', 'upgrade', 'heads']
# FIXME: we should make config file permissions right
# and run as neutron user
@@ -144,6 +146,7 @@ outputs:
- /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/neutron/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/neutron:/var/log/neutron
+ - /var/log/containers/httpd/neutron-api:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- if:
@@ -167,8 +170,11 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/neutron
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/neutron
+ - /var/log/containers/httpd/neutron-api
upgrade_tasks:
- name: Check if neutron_server is deployed
command: systemctl is-enabled neutron-server
diff --git a/docker/services/neutron-sriov-agent.yaml b/docker/services/neutron-sriov-agent.yaml
new file mode 100644
index 00000000..a9914987
--- /dev/null
+++ b/docker/services/neutron-sriov-agent.yaml
@@ -0,0 +1,108 @@
+heat_template_version: pike
+
+description: >
+ OpenStack Neutron SR-IOV service
+
+parameters:
+ DockerNeutronSriovImage:
+ description: The container image to use for the Neutron SR-IOV agent
+ type: string
+ DockerNeutronConfigImage:
+ description: The container image to use for the neutron config_volume
+ type: string
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ NeutronSriovAgentBase:
+ type: ../../puppet/services/neutron-sriov-agent.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceData: {get_param: ServiceData}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for Neutron sriov service
+ value:
+ service_name: {get_attr: [NeutronSriovAgentBase, role_data, service_name]}
+ config_settings: {get_attr: [NeutronSriovAgentBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NeutronSriovAgentBase, role_data, step_config]
+ puppet_config:
+ config_volume: neutron
+ puppet_tags: neutron_config,neutron_agent_sriov_numvfs,neutron_sriov_agent_config
+ step_config: *step_config
+ config_image: {get_param: DockerNeutronConfigImage}
+ kolla_config:
+ /var/lib/kolla/config_files/neutron_sriov_agent.json:
+ command: /usr/bin/neutron-sriov-nic-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/sriov_agent.ini --config-dir /etc/neutron/conf.d/common
+ config_files:
+ - source: "/var/lib/kolla/config_files/src/*"
+ dest: "/"
+ merge: true
+ preserve_properties: true
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
+ docker_config:
+ step_4:
+ neutron_sriov_agent:
+ image: {get_param: DockerNeutronSriovImage}
+ net: host
+ pid: host
+ privileged: true
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron_sriov_agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/puppet-generated/neutron/:/var/lib/kolla/config_files/src:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /var/log/containers/neutron:/var/log/neutron
+ - /sys/class/net:/sys/class/net:rw
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable neutron_sriov_agent service
+ tags: step2
+ service: name=neutron-sriov-nic-agent state=stopped enabled=no
diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml
index 37c4da5b..9f1ae865 100644
--- a/docker/services/nova-api.yaml
+++ b/docker/services/nova-api.yaml
@@ -116,7 +116,8 @@ outputs:
user: root
volumes:
- /var/log/containers/nova:/var/log/nova
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R nova:nova /var/log/nova']
+ - /var/log/containers/httpd/nova-api:/var/log/httpd
+ command: ['/bin/bash', '-c', 'chown -R nova:nova /var/log/nova']
step_3:
nova_api_db_sync:
start_order: 0
@@ -131,6 +132,7 @@ outputs:
- /var/lib/config-data/nova/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro
- /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
- /var/log/containers/nova:/var/log/nova
+ - /var/log/containers/httpd/nova-api:/var/log/httpd
command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage api_db sync'"
# FIXME: we probably want to wait on the 'cell_v2 update' in order for this
# to be capable of upgrading a baremetal setup. This is to ensure the name
@@ -178,6 +180,7 @@ outputs:
- /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/nova:/var/log/nova
+ - /var/log/containers/httpd/nova-api:/var/log/httpd
-
if:
- internal_tls_enabled
@@ -196,6 +199,8 @@ outputs:
user: root
privileged: false
restart: always
+ healthcheck:
+ test: /bin/true
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
@@ -203,6 +208,7 @@ outputs:
- /var/lib/kolla/config_files/nova_api_cron.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/nova:/var/log/nova
+ - /var/log/containers/httpd/nova-api:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
step_5:
@@ -213,14 +219,17 @@ outputs:
detach: false
volumes: *nova_api_bootstrap_volumes
user: root
- command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 discover_hosts'"
+ command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 discover_hosts --verbose'"
metadata_settings:
get_attr: [NovaApiBase, role_data, metadata_settings]
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/nova
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/nova
+ - /var/log/containers/httpd/nova-api
upgrade_tasks:
- name: Stop and disable nova_api service
tags: step2
diff --git a/docker/services/nova-compute.yaml b/docker/services/nova-compute.yaml
index 39d1740c..bf7841be 100644
--- a/docker/services/nova-compute.yaml
+++ b/docker/services/nova-compute.yaml
@@ -41,6 +41,10 @@ parameters:
description: Port that dockerized nova migration target sshd service
binds to.
type: number
+ UpgradeLevelNovaCompute:
+ type: string
+ description: Nova Compute upgrade level
+ default: ''
resources:
@@ -142,6 +146,13 @@ outputs:
path: /etc/ceph
state: directory
upgrade_tasks:
+ - name: Set compute upgrade level to auto
+ tags: step1
+ ini_file:
+ str_replace:
+ template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
+ params:
+ LEVEL: {get_param: UpgradeLevelNovaCompute}
- name: Stop and disable nova-compute service
tags: step2
service: name=openstack-nova-compute state=stopped enabled=no
diff --git a/docker/services/nova-conductor.yaml b/docker/services/nova-conductor.yaml
index ae737056..17d137cc 100644
--- a/docker/services/nova-conductor.yaml
+++ b/docker/services/nova-conductor.yaml
@@ -36,7 +36,10 @@ parameters:
default: {}
description: Parameters specific to the role
type: json
-
+ UpgradeLevelNovaCompute:
+ type: string
+ description: Nova Compute upgrade level
+ default: ''
resources:
@@ -108,6 +111,13 @@ outputs:
path: /var/log/containers/nova
state: directory
upgrade_tasks:
+ - name: Set compute upgrade level to auto
+ tags: step1
+ ini_file:
+ str_replace:
+ template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
+ params:
+ LEVEL: {get_param: UpgradeLevelNovaCompute}
- name: Stop and disable nova_conductor service
tags: step2
service: name=openstack-nova-conductor state=stopped enabled=no
diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml
index 8f151cfe..d20c093d 100644
--- a/docker/services/nova-libvirt.yaml
+++ b/docker/services/nova-libvirt.yaml
@@ -204,6 +204,7 @@ outputs:
- /var/lib/libvirt:/var/lib/libvirt
- /var/log/libvirt/qemu:/var/log/libvirt/qemu:ro
- /var/log/containers/nova:/var/log/nova
+ - /var/lib/vhost_sockets:/var/lib/vhost_sockets
-
if:
- use_tls_for_live_migration
@@ -252,6 +253,30 @@ outputs:
- /etc/libvirt/qemu
- /var/lib/libvirt
- /var/log/containers/nova
+ # qemu user on host will be cretaed by libvirt package install, ensure
+ # the qemu user created with same uid/gid as like libvirt package.
+ # These specific values are required since ovs is running on host.
+ # Once ovs with DPDK is containerized, we could modify this uid/gid
+ # to match with kolla config values.
+ - name: ensure qemu group is present on the host
+ group:
+ name: qemu
+ gid: 107
+ state: present
+ - name: ensure qemu user is present on the host
+ user:
+ name: qemu
+ uid: 107
+ group: qemu
+ state: present
+ shell: /sbin/nologin
+ comment: qemu user
+ - name: create directory for vhost-user sockets with qemu ownership
+ file:
+ path: /var/lib/vhost_sockets
+ state: directory
+ owner: qemu
+ group: qemu
- name: ensure ceph configurations exist
file:
path: /etc/ceph
diff --git a/docker/services/nova-placement.yaml b/docker/services/nova-placement.yaml
index 26d17560..d66a6fb8 100644
--- a/docker/services/nova-placement.yaml
+++ b/docker/services/nova-placement.yaml
@@ -111,6 +111,7 @@ outputs:
- /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/nova_placement/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/nova:/var/log/nova
+ - /var/log/containers/httpd/nova-placement:/var/log/httpd
-
if:
- internal_tls_enabled
@@ -128,8 +129,11 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/nova
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/nova
+ - /var/log/containers/httpd/nova-placement
upgrade_tasks:
- name: Stop and disable nova_placement service (running under httpd)
tags: step2
diff --git a/docker/services/octavia-api.yaml b/docker/services/octavia-api.yaml
index 86730ebc..da698991 100644
--- a/docker/services/octavia-api.yaml
+++ b/docker/services/octavia-api.yaml
@@ -111,6 +111,7 @@ outputs:
# configuration.
- /var/lib/config-data/puppet-generated/octavia/etc/octavia:/etc/octavia/
- /var/log/containers/octavia:/var/log/octavia
+ - /var/log/containers/httpd/octavia-api:/var/log/httpd
command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-api; chown -R octavia:octavia /etc/octavia/conf.d/octavia-api; chown -R octavia:octavia /var/log/octavia']
step_3:
octavia_db_sync:
@@ -126,6 +127,7 @@ outputs:
-
- /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
- /var/log/containers/octavia:/var/log/octavia
+ - /var/log/containers/httpd/octavia-api:/var/log/httpd
command: "/usr/bin/bootstrap_host_exec octavia_api su octavia -s /bin/bash -c '/usr/bin/octavia-db-manage upgrade head'"
step_4:
map_merge:
@@ -142,6 +144,7 @@ outputs:
- /var/lib/kolla/config_files/octavia_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/octavia/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/octavia:/var/log/octavia
+ - /var/log/containers/httpd/octavia-api:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- if:
@@ -166,8 +169,11 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/octavia
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/octavia
+ - /var/log/containers/httpd/octavia-api
upgrade_tasks:
- name: Stop and disable octavia_api service
tags: step2
diff --git a/docker/services/opendaylight-api.yaml b/docker/services/opendaylight-api.yaml
index 6a62f65e..2a6fcfe8 100644
--- a/docker/services/opendaylight-api.yaml
+++ b/docker/services/opendaylight-api.yaml
@@ -97,10 +97,21 @@ outputs:
-
- /var/lib/kolla/config_files/opendaylight_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/opendaylight/:/var/lib/kolla/config_files/src:ro
+ - /var/log/containers/opendaylight:/opt/opendaylight/data/log
+ - /var/lib/opendaylight/journal:/opt/opendaylight/journal
+ - /var/lib/opendaylight/snapshots:/opt/opendaylight/snapshots
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/opendaylight
+ - /var/lib/opendaylight/snapshots
+ - /var/lib/opendaylight/journal
upgrade_tasks:
- name: Stop and disable opendaylight_api service
tags: step2
- service: name=opendaylight state=stopped enabled=no
+ service: name=opendaylight state=stopped enabled=no \ No newline at end of file
diff --git a/docker/services/pacemaker/cinder-backup.yaml b/docker/services/pacemaker/cinder-backup.yaml
index c2117c04..cdb8c1bc 100644
--- a/docker/services/pacemaker/cinder-backup.yaml
+++ b/docker/services/pacemaker/cinder-backup.yaml
@@ -188,6 +188,9 @@ outputs:
resource: openstack-cinder-backup
state: disable
wait_for_resource: true
+ register: output
+ retries: 5
+ until: output.rc == 0
when: is_bootstrap_node
- name: Delete the stopped openstack-cinder-backup cluster resource.
tags: step2
@@ -195,6 +198,9 @@ outputs:
resource: openstack-cinder-backup
state: delete
wait_for_resource: true
+ register: output
+ retries: 5
+ until: output.rc == 0
when: is_bootstrap_node
- name: Disable cinder_backup service
tags: step2
diff --git a/docker/services/pacemaker/cinder-volume.yaml b/docker/services/pacemaker/cinder-volume.yaml
index a4f69517..15c5e099 100644
--- a/docker/services/pacemaker/cinder-volume.yaml
+++ b/docker/services/pacemaker/cinder-volume.yaml
@@ -206,6 +206,9 @@ outputs:
resource: openstack-cinder-volume
state: disable
wait_for_resource: true
+ register: output
+ retries: 5
+ until: output.rc == 0
when: is_bootstrap_node
- name: Delete the stopped openstack-cinder-volume cluster resource.
tags: step2
@@ -213,6 +216,9 @@ outputs:
resource: openstack-cinder-volume
state: delete
wait_for_resource: true
+ register: output
+ retries: 5
+ until: output.rc == 0
when: is_bootstrap_node
- name: Disable cinder_volume service from boot
tags: step2
diff --git a/docker/services/pacemaker/database/mysql.yaml b/docker/services/pacemaker/database/mysql.yaml
index 3de1696d..9dace271 100644
--- a/docker/services/pacemaker/database/mysql.yaml
+++ b/docker/services/pacemaker/database/mysql.yaml
@@ -159,15 +159,17 @@ outputs:
detach: false
image: {get_param: DockerMysqlImage}
net: host
+ user: root
# Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done
command:
- 'bash'
- - '-ec'
+ - '-ecx'
-
list_join:
- "\n"
- - 'if [ -e /var/lib/mysql/mysql ]; then exit 0; fi'
- - 'kolla_start'
+ - 'echo -e "\n[mysqld]\nwsrep_provider=none" >> /etc/my.cnf'
+ - 'sudo -u mysql -E kolla_start'
- 'mysqld_safe --skip-networking --wsrep-on=OFF &'
- 'timeout ${DB_MAX_TIMEOUT} /bin/bash -c ''until mysqladmin -uroot -p"${DB_ROOT_PASSWORD}" ping 2>/dev/null; do sleep 1; done'''
- 'mysql -uroot -p"${DB_ROOT_PASSWORD}" -e "CREATE USER ''clustercheck''@''localhost'' IDENTIFIED BY ''${DB_CLUSTERCHECK_PASSWORD}'';"'
@@ -266,20 +268,34 @@ outputs:
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+ - name: Check cluster resource status
+ tags: step2
+ pacemaker_resource:
+ resource: galera
+ state: master
+ check_mode: true
+ ignore_errors: true
+ register: galera_res
- name: Disable the galera cluster resource
tags: step2
pacemaker_resource:
resource: galera
state: disable
wait_for_resource: true
- when: is_bootstrap_node
+ register: output
+ retries: 5
+ until: output.rc == 0
+ when: is_bootstrap_node and galera_res|succeeded
- name: Delete the stopped galera cluster resource.
tags: step2
pacemaker_resource:
resource: galera
state: delete
wait_for_resource: true
- when: is_bootstrap_node
+ register: output
+ retries: 5
+ until: output.rc == 0
+ when: is_bootstrap_node and galera_res|succeeded
- name: Disable mysql service
tags: step2
service: name=mariadb enabled=no
diff --git a/docker/services/pacemaker/database/redis.yaml b/docker/services/pacemaker/database/redis.yaml
index 0b8aa046..4d26a084 100644
--- a/docker/services/pacemaker/database/redis.yaml
+++ b/docker/services/pacemaker/database/redis.yaml
@@ -36,9 +36,19 @@ parameters:
default: {}
description: Parameters specific to the role
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
+ ContainersCommon:
+ type: ../../containers-common.yaml
+
RedisBase:
type: ../../../../puppet/services/database/redis.yaml
properties:
@@ -74,6 +84,8 @@ outputs:
- 3124
- 6379
- 26379
+ tripleo::stunnel::manage_service: false
+ tripleo::stunnel::foreground: 'yes'
step_config: ""
service_config_settings: {get_attr: [RedisBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
@@ -109,6 +121,13 @@ outputs:
- path: /var/log/redis
owner: redis:redis
recurse: true
+ /var/lib/kolla/config_files/redis_tls_proxy.json:
+ command: stunnel /etc/stunnel/stunnel.conf
+ config_files:
+ - source: "/var/lib/kolla/config_files/src/*"
+ dest: "/"
+ merge: true
+ preserve_properties: true
docker_config:
step_1:
redis_image_tag:
@@ -134,32 +153,54 @@ outputs:
- /usr/bin:/usr/bin:ro
- /var/run/docker.sock:/var/run/docker.sock:rw
step_2:
- redis_init_bundle:
- start_order: 2
- detach: false
- net: host
- user: root
- config_volume: 'redis_init_bundle'
- command:
- - '/bin/bash'
- - '-c'
- - str_replace:
- template:
- list_join:
- - '; '
- - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
- - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
- params:
- TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
- CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::redis_bundle'
- image: *redis_config_image
- volumes:
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /etc/puppet:/tmp/puppet-etc:ro
- - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
- - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
- - /dev/shm:/dev/shm:rw
+ map_merge:
+ - redis_init_bundle:
+ start_order: 2
+ detach: false
+ net: host
+ user: root
+ config_volume: 'redis_init_bundle'
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+ CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::redis_bundle'
+ image: *redis_config_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ - if:
+ - internal_tls_enabled
+ - redis_tls_proxy:
+ start_order: 3
+ image: *redis_image_pcmklatest
+ net: host
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/redis_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/puppet-generated/redis/:/var/lib/kolla/config_files/src:ro
+ - /etc/pki/tls/certs/redis.crt:/etc/pki/tls/certs/redis.crt:ro
+ - /etc/pki/tls/private/redis.key:/etc/pki/tls/private/redis.key:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - {}
+ metadata_settings:
+ get_attr: [RedisBase, role_data, metadata_settings]
host_prep_tasks:
- name: create /var/run/redis
file:
@@ -181,20 +222,34 @@ outputs:
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+ - name: Check cluster resource status
+ tags: step2
+ pacemaker_resource:
+ resource: {get_attr: [RedisBase, role_data, service_name]}
+ state: master
+ check_mode: true
+ ignore_errors: true
+ register: redis_res
- name: Disable the redis cluster resource
tags: step2
pacemaker_resource:
resource: {get_attr: [RedisBase, role_data, service_name]}
state: disable
wait_for_resource: true
- when: is_bootstrap_node
+ register: output
+ retries: 5
+ until: output.rc == 0
+ when: is_bootstrap_node and redis_res|succeeded
- name: Delete the stopped redis cluster resource.
tags: step2
pacemaker_resource:
resource: {get_attr: [RedisBase, role_data, service_name]}
state: delete
wait_for_resource: true
- when: is_bootstrap_node
+ register: output
+ retries: 5
+ until: output.rc == 0
+ when: is_bootstrap_node and redis_res|succeeded
- name: Disable redis service
tags: step2
service: name=redis enabled=no
diff --git a/docker/services/pacemaker/haproxy.yaml b/docker/services/pacemaker/haproxy.yaml
index 2e5c7424..2cc04e96 100644
--- a/docker/services/pacemaker/haproxy.yaml
+++ b/docker/services/pacemaker/haproxy.yaml
@@ -78,8 +78,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [HAProxyBase, role_data, config_settings]
- - tripleo::haproxy::haproxy_daemon: false
- haproxy_docker: true
+ - haproxy_docker: true
tripleo::profile::pacemaker::haproxy_bundle::haproxy_docker_image: &haproxy_image {get_param: DockerHAProxyImage}
# the list of directories that contain the certs to bind mount in the countainer
# bind-mounting the directories rather than all the cert, key and pem files ensures
@@ -88,6 +87,7 @@ outputs:
- get_param: InternalTLSCAFile
- get_param: HAProxyInternalTLSKeysDirectory
- get_param: HAProxyInternalTLSCertsDirectory
+ - get_param: DeployedSSLCertificatePath
tripleo::profile::pacemaker::haproxy_bundle::internal_certs_directory: {get_param: HAProxyInternalTLSCertsDirectory}
tripleo::profile::pacemaker::haproxy_bundle::internal_keys_directory: {get_param: HAProxyInternalTLSKeysDirectory}
# disable the use CRL file until we can restart the container when the file expires
@@ -119,7 +119,7 @@ outputs:
data: *tls_mapping
kolla_config:
/var/lib/kolla/config_files/haproxy.json:
- command: haproxy -f /etc/haproxy/haproxy.cfg
+ command: /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg
config_files:
- source: "/var/lib/kolla/config_files/src/*"
dest: "/"
@@ -223,17 +223,31 @@ outputs:
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+ - name: Check cluster resource status
+ tags: step2
+ pacemaker_resource:
+ resource: {get_attr: [HAProxyBase, role_data, service_name]}
+ state: started
+ check_mode: true
+ ignore_errors: true
+ register: haproxy_res
- name: Disable the haproxy cluster resource.
tags: step2
pacemaker_resource:
resource: {get_attr: [HAProxyBase, role_data, service_name]}
state: disable
wait_for_resource: true
- when: is_bootstrap_node
+ register: output
+ retries: 5
+ until: output.rc == 0
+ when: is_bootstrap_node and haproxy_res|succeeded
- name: Delete the stopped haproxy cluster resource.
tags: step2
pacemaker_resource:
resource: {get_attr: [HAProxyBase, role_data, service_name]}
state: delete
wait_for_resource: true
- when: is_bootstrap_node
+ register: output
+ retries: 5
+ until: output.rc == 0
+ when: is_bootstrap_node and haproxy_res|succeeded
diff --git a/docker/services/pacemaker/ovn-dbs.yaml b/docker/services/pacemaker/ovn-dbs.yaml
new file mode 100644
index 00000000..03c5a397
--- /dev/null
+++ b/docker/services/pacemaker/ovn-dbs.yaml
@@ -0,0 +1,140 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized OVN DBs service managed by pacemaker
+
+parameters:
+ DockerOvnDbsImage:
+ description: image
+ type: string
+ DockerOvnDbsConfigImage:
+ description: image
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ OVNNorthboundServerPort:
+ description: Port of the OVN Northbound DB server
+ type: number
+ default: 6641
+ OVNSouthboundServerPort:
+ description: Port of the OVN Southbound DB server
+ type: number
+ default: 6642
+
+resources:
+
+ ContainersCommon:
+ type: ./../containers-common.yaml
+
+ OVNDbsBase:
+ type: ../../../puppet/services/pacemaker/ovn-dbs.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceData: {get_param: ServiceData}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ OVNNorthboundServerPort: {get_param: OVNNorthboundServerPort}
+ OVNSouthboundServerPort: {get_param: OVNSouthboundServerPort}
+
+outputs:
+ role_data:
+ description: Role data for the OVN Dbs HA role.
+ value:
+ service_name: {get_attr: [OVNDbsBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [OVNDbsBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::ovn_dbs_bundle::ovn_dbs_docker_image: {get_param: DockerOvnDbsImage}
+ - tripleo::profile::pacemaker::ovn_dbs_bundle::nb_db_port: {get_param: OVNNorthboundServerPort}
+ - tripleo::profile::pacemaker::ovn_dbs_bundle::sb_db_port: {get_param: OVNSouthboundServerPort}
+ step_config: ''
+ service_config_settings: {get_attr: [OVNDbsBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: 'ovn_dbs'
+ puppet_tags: 'exec'
+ step_config: ''
+ config_image: &ovn_dbs_config_image {get_param: DockerOvnDbsConfigImage}
+ kolla_config:
+ /var/lib/kolla/config_files/ovn_dbs.json:
+ command: /usr/sbin/pacemaker_remoted
+ config_files:
+ - dest: /etc/libqb/force-filesystem-sockets
+ source: /dev/null
+ owner: root
+ perm: '0644'
+ - source: "/var/lib/kolla/config_files/src/*"
+ dest: "/"
+ merge: true
+ preserve_properties: true
+ optional: true
+ docker_config:
+ step_3:
+ ovn_dbs_init_bundle:
+ start_order: 1
+ detach: false
+ net: host
+ user: root
+ config_volume: 'ovn_dbs_init_bundle'
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 3}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+ CONFIG:
+ list_join:
+ - ';'
+ - - 'include ::tripleo::profile::base::pacemaker'
+ - 'include ::tripleo::profile::pacemaker::ovn_dbs_bundle'
+ image: *ovn_dbs_config_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/openvswitch
+ - /var/lib/openvswitch/ovn
+ upgrade_tasks:
+ - name: Stop and disable ovn-northd service
+ tags: step2
+ service: name=ovn-northd state=stopped enabled=no
diff --git a/docker/services/pacemaker/rabbitmq.yaml b/docker/services/pacemaker/rabbitmq.yaml
index ba1abaf9..7333689c 100644
--- a/docker/services/pacemaker/rabbitmq.yaml
+++ b/docker/services/pacemaker/rabbitmq.yaml
@@ -215,20 +215,34 @@ outputs:
- name: set is_bootstrap_node fact
tags: common
set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+ - name: Check cluster resource status
+ tags: step2
+ pacemaker_resource:
+ resource: {get_attr: [RabbitmqBase, role_data, service_name]}
+ state: started
+ check_mode: true
+ ignore_errors: true
+ register: rabbitmq_res
- name: Disable the rabbitmq cluster resource.
tags: step2
pacemaker_resource:
resource: {get_attr: [RabbitmqBase, role_data, service_name]}
state: disable
wait_for_resource: true
- when: is_bootstrap_node
+ register: output
+ retries: 5
+ until: output.rc == 0
+ when: is_bootstrap_node and rabbitmq_res|succeeded
- name: Delete the stopped rabbitmq cluster resource.
tags: step2
pacemaker_resource:
resource: {get_attr: [RabbitmqBase, role_data, service_name]}
state: delete
wait_for_resource: true
- when: is_bootstrap_node
+ register: output
+ retries: 5
+ until: output.rc == 0
+ when: is_bootstrap_node and rabbitmq_res|succeeded
- name: Disable rabbitmq service
tags: step2
service: name=rabbitmq-server enabled=no
diff --git a/docker/services/panko-api.yaml b/docker/services/panko-api.yaml
index 626d9176..3edd9049 100644
--- a/docker/services/panko-api.yaml
+++ b/docker/services/panko-api.yaml
@@ -104,7 +104,8 @@ outputs:
user: root
volumes:
- /var/log/containers/panko:/var/log/panko
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R panko:panko /var/log/panko']
+ - /var/log/containers/httpd/panko-api:/var/log/httpd
+ command: ['/bin/bash', '-c', 'chown -R panko:panko /var/log/panko']
step_3:
panko_db_sync:
image: *panko_api_image
@@ -119,6 +120,7 @@ outputs:
- /var/lib/config-data/panko/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro
- /var/lib/config-data/panko/etc/panko:/etc/panko:ro
- /var/log/containers/panko:/var/log/panko
+ - /var/log/containers/httpd/panko-api:/var/log/httpd
command: "/usr/bin/bootstrap_host_exec panko_api su panko -s /bin/bash -c '/usr/bin/panko-dbsync'"
step_4:
panko_api:
@@ -134,6 +136,7 @@ outputs:
- /var/lib/kolla/config_files/panko_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/panko/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/panko:/var/log/panko
+ - /var/log/containers/httpd/panko-api:/var/log/httpd
-
if:
- internal_tls_enabled
@@ -149,7 +152,10 @@ outputs:
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/panko
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/panko
+ - /var/log/containers/httpd/panko-api
metadata_settings:
get_attr: [PankoApiPuppetBase, role_data, metadata_settings]
diff --git a/docker/services/swift-proxy.yaml b/docker/services/swift-proxy.yaml
index 374db250..86871210 100644
--- a/docker/services/swift-proxy.yaml
+++ b/docker/services/swift-proxy.yaml
@@ -111,6 +111,7 @@ outputs:
- /srv/node:/srv/node
- /dev:/dev
- /var/log/containers/swift:/var/log/swift
+ - /var/log/containers/httpd/swift-proxy:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- if:
@@ -138,6 +139,7 @@ outputs:
state: directory
with_items:
- /var/log/containers/swift
+ - /var/log/containers/httpd/swift-proxy
- /srv/node
upgrade_tasks:
- name: Stop and disable swift_proxy service
diff --git a/docker/services/swift-ringbuilder.yaml b/docker/services/swift-ringbuilder.yaml
index e4e2c7d2..2a44f703 100644
--- a/docker/services/swift-ringbuilder.yaml
+++ b/docker/services/swift-ringbuilder.yaml
@@ -7,6 +7,10 @@ parameters:
DockerSwiftConfigImage:
description: The container image to use for the swift config_volume
type: string
+ DockerSwiftRingbuilderConfigImage:
+ description: Fake parameter to bypass config_volume yaml validation
+ type: string
+ default: ''
ServiceData:
default: {}
description: Dictionary packing service data
@@ -92,9 +96,22 @@ outputs:
service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
- config_volume: 'swift'
+ config_volume: 'swift_ringbuilder'
puppet_tags: exec,fetch_swift_ring_tarball,extract_swift_ring_tarball,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance,create_swift_ring_tarball,upload_swift_ring_tarball
step_config: *step_config
- config_image: {get_param: DockerSwiftConfigImage}
+ config_image: &swift_ringbuilder_image {get_param: DockerSwiftConfigImage}
kolla_config: {}
- docker_config: {}
+ docker_config:
+ step_3:
+ swift_copy_rings:
+ image: *swift_ringbuilder_image
+ user: root
+ detach: false
+ command:
+ # Use bash to run the cp command so that wildcards can be used
+ - '/bin/bash'
+ - '-c'
+ - 'cp -v -a -t /etc/swift /swift_ringbuilder/etc/swift/*.gz /swift_ringbuilder/etc/swift/*.builder /swift_ringbuilder/etc/swift/backups'
+ volumes:
+ - /var/lib/config-data/puppet-generated/swift/etc/swift:/etc/swift:rw
+ - /var/lib/config-data/swift_ringbuilder:/swift_ringbuilder:ro
diff --git a/docker/services/zaqar.yaml b/docker/services/zaqar.yaml
index b6fb4001..ab30ab5a 100644
--- a/docker/services/zaqar.yaml
+++ b/docker/services/zaqar.yaml
@@ -116,6 +116,7 @@ outputs:
user: root
volumes:
- /var/log/containers/zaqar:/var/log/zaqar
+ - /var/log/containers/httpd/zaqar:/var/log/httpd
command: ['/bin/bash', '-c', 'chown -R zaqar:zaqar /var/log/zaqar']
step_3:
zaqar_db_sync:
@@ -130,7 +131,8 @@ outputs:
-
- /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
- /var/log/containers/zaqar:/var/log/zaqar
- command: "/usr/bin/bootstrap_host_exec zaqar su zaqar -s /bin/bash -c 'zaqar-sql-db-manage upgrade head'"
+ - /var/log/containers/httpd/zaqar:/var/log/httpd
+ command: "/usr/bin/bootstrap_host_exec zaqar_api su zaqar -s /bin/bash -c 'zaqar-sql-db-manage upgrade head'"
- {}
- step_4:
zaqar:
@@ -148,6 +150,7 @@ outputs:
- /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/zaqar/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/zaqar:/var/log/zaqar
+ - /var/log/containers/httpd/zaqar:/var/log/httpd
-
if:
- internal_tls_enabled
@@ -172,13 +175,17 @@ outputs:
- /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/zaqar/:/var/lib/kolla/config_files/src:ro
- /var/log/containers/zaqar:/var/log/zaqar
+ - /var/log/containers/httpd/zaqar:/var/log/httpd
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
- name: create persistent logs directory
file:
- path: /var/log/containers/zaqar
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/zaqar
+ - /var/log/containers/httpd/zaqar
upgrade_tasks:
- name: Stop and disable zaqar service
tags: step2
diff --git a/environments/ceph-ansible/ceph-ansible-external.yaml b/environments/ceph-ansible/ceph-ansible-external.yaml
new file mode 100644
index 00000000..ed3bedbd
--- /dev/null
+++ b/environments/ceph-ansible/ceph-ansible-external.yaml
@@ -0,0 +1,30 @@
+resource_registry:
+ OS::TripleO::Services::CephExternal: ../../docker/services/ceph-ansible/ceph-external.yaml
+ OS::TripleO::Services::CephMon: OS::Heat::None
+ OS::TripleO::Services::CephClient: OS::Heat::None
+ OS::TripleO::Services::CephOSD: OS::Heat::None
+
+parameter_defaults:
+ # NOTE: These example parameters are required when using CephExternal
+ #CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
+ #CephClientKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
+ #CephExternalMonHost: '172.16.1.7, 172.16.1.8'
+
+ # the following parameters enable Ceph backends for Cinder, Glance, Gnocchi and Nova
+ NovaEnableRbdBackend: true
+ CinderEnableRbdBackend: true
+ CinderBackupBackend: ceph
+ GlanceBackend: rbd
+ GnocchiBackend: rbd
+ # If the Ceph pools which host VMs, Volumes and Images do not match these
+ # names OR the client keyring to use is not named 'openstack', edit the
+ # following as needed.
+ NovaRbdPoolName: vms
+ CinderRbdPoolName: volumes
+ CinderBackupRbdPoolName: backups
+ GlanceRbdPoolName: images
+ GnocchiRbdPoolName: metrics
+ CephClientUserName: openstack
+
+ # finally we disable the Cinder LVM backend
+ CinderEnableIscsiBackend: false
diff --git a/environments/ceph-ansible/ceph-rgw.yaml b/environments/ceph-ansible/ceph-rgw.yaml
new file mode 100644
index 00000000..4b09a703
--- /dev/null
+++ b/environments/ceph-ansible/ceph-rgw.yaml
@@ -0,0 +1,5 @@
+resource_registry:
+ OS::TripleO::Services::CephRgw: ../../docker/services/ceph-ansible/ceph-rgw.yaml
+ OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::SwiftStorage: OS::Heat::None
+ OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
diff --git a/environments/cinder-dellemc-vmax-iscsi-config.yaml b/environments/cinder-dellemc-vmax-iscsi-config.yaml
new file mode 100644
index 00000000..3832dada
--- /dev/null
+++ b/environments/cinder-dellemc-vmax-iscsi-config.yaml
@@ -0,0 +1,9 @@
+# A Heat environment file which can be used to enable a
+# Cinder Dell EMC VMAX backend, configured via puppet
+resource_registry:
+ OS::TripleO::Services::CinderBackendDellEMCVMAXISCSI: ../puppet/services/cinder-backend-dellemc-vmax-iscsi.yaml
+
+parameter_defaults:
+ CinderEnableDellEMCVMAXISCSIBackend: true
+ CinderDellEMCVMAXISCSIBackendName: 'tripleo_dellemc_vmax_iscsi'
+ CinderDellEMCVMAXISCSIConfigFile: ''
diff --git a/environments/contrail/roles_data_contrail.yaml b/environments/contrail/roles_data_contrail.yaml
index 38b70e53..bd697160 100644
--- a/environments/contrail/roles_data_contrail.yaml
+++ b/environments/contrail/roles_data_contrail.yaml
@@ -87,8 +87,10 @@
- OS::TripleO::Services::ManilaBackendGeneric
- OS::TripleO::Services::ManilaBackendNetapp
- OS::TripleO::Services::ManilaBackendUnity
+ - OS::TripleO::Services::ManilaBackendIsilon
- OS::TripleO::Services::ManilaBackendCephFs
- OS::TripleO::Services::ManilaBackendVNX
+ - OS::TripleO::Services::ManilaBackendVMAX
- OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::AodhEvaluator
diff --git a/environments/docker.yaml b/environments/docker.yaml
index dfa30b08..06e3d3ab 100644
--- a/environments/docker.yaml
+++ b/environments/docker.yaml
@@ -30,7 +30,6 @@ resource_registry:
OS::TripleO::Services::HAproxy: ../docker/services/haproxy.yaml
OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
- OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml
OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
@@ -57,3 +56,7 @@ resource_registry:
# OS::TripleO::Services::CinderScheduler: ../docker/services/cinder-scheduler.yaml
# OS::TripleO::Services::CinderBackup: ../docker/services/cinder-backup.yaml
# OS::TripleO::Services::CinderVolume: ../docker/services/cinder-volume.yaml
+ #
+ # If SR-IOV is enabled on the compute nodes, it will need the SR-IOV
+ # host configuration.
+ OS::TripleO::Services::NeutronSriovHostConfig: OS::Heat::None
diff --git a/environments/enable-internal-tls.yaml b/environments/enable-internal-tls.yaml
index 2fdecb4f..e85185d6 100644
--- a/environments/enable-internal-tls.yaml
+++ b/environments/enable-internal-tls.yaml
@@ -1,3 +1,7 @@
+# ********************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/enable-internal-tls.yaml
+# instead.
+# ********************************************************************************
# A Heat environment file which can be used to enable a
# a TLS for in the internal network via certmonger
parameter_defaults:
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml
index 81044170..e674dc64 100644
--- a/environments/hyperconverged-ceph.yaml
+++ b/environments/hyperconverged-ceph.yaml
@@ -29,6 +29,7 @@ parameter_defaults:
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Tuned
- OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::NeutronSriovHostConfig
- OS::TripleO::Services::OpenDaylightOvs
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
diff --git a/environments/manila-cephfsnative-config-docker.yaml b/environments/manila-cephfsnative-config-docker.yaml
new file mode 100644
index 00000000..8c822918
--- /dev/null
+++ b/environments/manila-cephfsnative-config-docker.yaml
@@ -0,0 +1,20 @@
+# EXPERIMENTAL: The configuration enabled by this environment
+# is not considered production-ready.
+#
+# A Heat environment file which can be used to enable a
+# a Manila CephFS Native driver backend.
+resource_registry:
+ OS::TripleO::Services::ManilaApi: ../docker/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../docker/services/manila-scheduler.yaml
+ # Only manila-share is pacemaker managed:
+ OS::TripleO::Services::ManilaShare: ../docker/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
+
+
+parameter_defaults:
+ ManilaCephFSNativeBackendName: cephfsnative
+ ManilaCephFSNativeDriverHandlesShareServers: false
+ ManilaCephFSNativeCephFSConfPath: '/etc/ceph/ceph.conf'
+ ManilaCephFSNativeCephFSAuthId: 'manila'
+ ManilaCephFSNativeCephFSClusterName: 'ceph'
+ ManilaCephFSNativeCephFSEnableSnapshots: false
diff --git a/environments/manila-isilon-config.yaml b/environments/manila-isilon-config.yaml
new file mode 100644
index 00000000..809900c8
--- /dev/null
+++ b/environments/manila-isilon-config.yaml
@@ -0,0 +1,17 @@
+# This environment file enables Manila with the Isilon backend.
+resource_registry:
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ # Only manila-share is pacemaker managed:
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendIsilon: ../puppet/services/manila-backend-isilon.yaml
+
+parameter_defaults:
+ ManilaIsilonBackendName: tripleo_isilon
+ ManilaIsilonDriverHandlesShareServers: true
+ ManilaIsilonNasLogin: ''
+ ManilaIsilonNasPassword: ''
+ ManilaIsilonNasServer: ''
+ ManilaIsilonNasRootDir: ''
+ ManilaIsilonNasServerPort: 8080
+ ManilaIsilonNasServerSecure: ''
diff --git a/environments/manila-netapp-config-docker.yaml b/environments/manila-netapp-config-docker.yaml
new file mode 100644
index 00000000..6db6dff4
--- /dev/null
+++ b/environments/manila-netapp-config-docker.yaml
@@ -0,0 +1,32 @@
+# EXPERIMENTAL: The configuration enabled by this environment
+# is not considered production-ready.
+#
+# This environment file enables Manila with the Netapp backend.
+resource_registry:
+ OS::TripleO::Services::ManilaApi: ../docker/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../docker/services/manila-scheduler.yaml
+ # Only manila-share is pacemaker managed:
+ OS::TripleO::Services::ManilaShare: ../docker/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
+
+parameter_defaults:
+ ManilaNetappBackendName: tripleo_netapp
+ ManilaNetappDriverHandlesShareServers: true
+ ManilaNetappLogin: ''
+ ManilaNetappPassword: ''
+ ManilaNetappServerHostname: ''
+ ManilaNetappTransportType: 'http'
+ ManilaNetappStorageFamily: 'ontap_cluster'
+ ManilaNetappServerPort: 80
+ ManilaNetappVolumeNameTemplate: 'share_%(share_id)s'
+ ManilaNetappVserver: ''
+ ManilaNetappVserverNameTemplate: 'os_%s'
+ ManilaNetappLifNameTemplate: 'os_%(net_allocation_id)s'
+ ManilaNetappAggrNameSearchPattern: '(.*)'
+ ManilaNetappRootVolumeAggr: ''
+ ManilaNetappRootVolume: 'root'
+ ManilaNetappPortNameSearchPattern: '(.*)'
+ ManilaNetappTraceFlags: ''
+ ManilaNetappEnabledShareProtocols: 'nfs3, nfs4.0'
+ ManilaNetappVolumeSnapshotReservePercent: 5
+ ManilaNetappSnapmirrorQuiesceTimeout: 3600
diff --git a/environments/manila-vmax-config.yaml b/environments/manila-vmax-config.yaml
new file mode 100644
index 00000000..8c9946ca
--- /dev/null
+++ b/environments/manila-vmax-config.yaml
@@ -0,0 +1,19 @@
+# This environment file enables Manila with the VMAX backend.
+resource_registry:
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ # Only manila-share is pacemaker managed:
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendVMAX: ../puppet/services/manila-backend-vmax.yaml
+
+parameter_defaults:
+ ManilaVMAXBackendName: tripleo_manila_vmax
+ ManilaVMAXDriverHandlesShareServers: true
+ ManilaVMAXNasLogin: ''
+ ManilaVMAXNasPassword: ''
+ ManilaVMAXNasServer: ''
+ ManilaVMAXServerContainer: ''
+ ManilaVMAXShareDataPools: ''
+ ManilaVMAXEthernetPorts: ''
+
+
diff --git a/environments/network-environment-v6.yaml b/environments/network-environment-v6.yaml
new file mode 100644
index 00000000..1617f897
--- /dev/null
+++ b/environments/network-environment-v6.yaml
@@ -0,0 +1,60 @@
+#This file is an example of an environment file for defining the isolated
+#networks and related parameters.
+resource_registry:
+ # Network Interface templates to use (these files must exist)
+ OS::TripleO::BlockStorage::Net::SoftwareConfig:
+ ../network/config/single-nic-vlans/cinder-storage.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig:
+ ../network/config/single-nic-vlans/compute.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig:
+ ../network/config/single-nic-vlans/controller.yaml
+ OS::TripleO::ObjectStorage::Net::SoftwareConfig:
+ ../network/config/single-nic-vlans/swift-storage.yaml
+ OS::TripleO::CephStorage::Net::SoftwareConfig:
+ ../network/config/single-nic-vlans/ceph-storage.yaml
+
+parameter_defaults:
+ # This section is where deployment-specific configuration is done
+ # CIDR subnet mask length for provisioning network
+ ControlPlaneSubnetCidr: '24'
+ # Gateway router for the provisioning network (or Undercloud IP)
+ ControlPlaneDefaultRoute: 192.168.24.254
+ EC2MetadataIp: 192.168.24.1 # Generally the IP of the Undercloud
+ # Customize the IP subnets to match the local environment
+ InternalApiNetCidr: 'fd00:fd00:fd00:2000::/64'
+ StorageNetCidr: 'fd00:fd00:fd00:3000::/64'
+ StorageMgmtNetCidr: 'fd00:fd00:fd00:4000::/64'
+ # Tenant tunneling network is IPv4 until IPv6 is fully supported
+ TenantNetCidr: 172.16.0.0/24
+ ExternalNetCidr: '2001:db8:fd00:1000::/64'
+ # Customize the VLAN IDs to match the local environment
+ InternalApiNetworkVlanID: 20
+ StorageNetworkVlanID: 30
+ StorageMgmtNetworkVlanID: 40
+ TenantNetworkVlanID: 50
+ ExternalNetworkVlanID: 10
+ # Customize the IP ranges on each network to use for static IPs and VIPs
+ InternalApiAllocationPools: [{'start': 'fd00:fd00:fd00:2000::10', 'end': 'fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe'}]
+ StorageAllocationPools: [{'start': 'fd00:fd00:fd00:3000::10', 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'}]
+ StorageMgmtAllocationPools: [{'start': 'fd00:fd00:fd00:4000::10', 'end': 'fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe'}]
+ TenantAllocationPools: [{'start': '172.16.0.10', 'end': '172.16.0.200'}]
+ # Leave room if the external network is also used for floating IPs
+ ExternalAllocationPools: [{'start': '2001:db8:fd00:1000::10', 'end': '2001:db8:fd00:1000:ffff:ffff:ffff:fffe'}]
+ # Gateway router for the external network
+ ExternalInterfaceDefaultRoute: '2001:db8:fd00:1000::1'
+ # Uncomment if using the Management Network (see network-management-v6.yaml)
+ # ManagementNetCidr: 'fd00:fd00:fd00:6000::/64'
+ # ManagementAllocationPools: [{'start': 'fd00:fd00:fd00:6000::10', 'end': 'fd00:fd00:fd00:6000:ffff:ffff:ffff:fffe'}]
+ # Use either this parameter or ControlPlaneDefaultRoute in the NIC templates
+ # ManagementInterfaceDefaultRoute: 'fd00:fd00:fd00:6000::1'
+ # Define the DNS servers (maximum 2) for the overcloud nodes
+ DnsServers: ["8.8.8.8","8.8.4.4"]
+ # List of Neutron network types for tenant networks (will be used in order)
+ NeutronNetworkType: 'vxlan,vlan'
+ # The tunnel type for the tenant network (vxlan or gre). Set to '' to disable tunneling.
+ NeutronTunnelTypes: 'vxlan'
+ # Neutron VLAN ranges per network, for example 'datacentre:1:499,tenant:500:1000':
+ NeutronNetworkVLANRanges: 'datacentre:1:1000'
+ # Customize bonding options, e.g. "mode=4 lacp_rate=1 updelay=1000 miimon=100"
+ # for Linux bonds w/LACP, or "bond_mode=active-backup" for OVS active/backup.
+ BondInterfaceOvsOptions: "bond_mode=active-backup"
diff --git a/environments/network-isolation-no-tunneling.j2.yaml b/environments/network-isolation-no-tunneling.j2.yaml
new file mode 100644
index 00000000..6bf00f1e
--- /dev/null
+++ b/environments/network-isolation-no-tunneling.j2.yaml
@@ -0,0 +1,34 @@
+# ******************************************************************************
+# DEPRECATED: Modify networks used for custom roles by modifying the role file
+# in the roles/ directory, or disable the network entirely by setting network to
+# "enabled: false" in network_data.yaml.
+# ******************************************************************************
+# Enable the creation of Neutron networks for isolated Overcloud
+# traffic and configure each role to assign ports (related
+# to that role) on these networks. This version of the environment
+# has no dedicated VLAN for tunneling, for deployments that use
+# VLAN mode, flat provider networks, etc.
+resource_registry:
+ # networks as defined in network_data.yaml, except for tenant net
+ {%- for network in networks if network.enabled|default(true) and network.name != 'Tenant' %}
+ OS::TripleO::Network::{{network.name}}: ../network/{{network.name_lower|default(network.name.lower())}}.yaml
+ {%- endfor %}
+ OS::TripleO::Network::Tenant: OS::Heat::None
+
+ # Port assignments for the VIPs
+ {%- for network in networks if network.vip and network.name != 'Tenant' %}
+ OS::TripleO::Network::Ports::{{network.name}}VipPort: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+ {%- endfor %}
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+ # Port assignments for each role are determined by the role definition.
+{%- for role in roles %}
+ # Port assignments for the {{role.name}} role.
+ {%- for network in networks %}
+ {%- if network.name in role.networks|default([]) and network.enabled|default(true) and network.name != 'Tenant'%}
+ OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+ {%- elif network.enabled|default(true) %}
+ OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/noop.yaml
+ {%- endif %}
+ {%- endfor %}
+{% endfor %}
diff --git a/environments/network-isolation-no-tunneling.yaml b/environments/network-isolation-no-tunneling.yaml
deleted file mode 100644
index ff1d7887..00000000
--- a/environments/network-isolation-no-tunneling.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-# Enable the creation of Neutron networks for isolated Overcloud
-# traffic and configure each role to assign ports (related
-# to that role) on these networks. This version of the environment
-# has no dedicated VLAN for tunneling, for deployments that use
-# VLAN mode, flat provider networks, etc.
-resource_registry:
- OS::TripleO::Network::External: ../network/external.yaml
- OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
- OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
- OS::TripleO::Network::Storage: ../network/storage.yaml
- OS::TripleO::Network::Tenant: ../network/noop.yaml
- # Management network is optional and disabled by default.
- # To enable it, include environments/network-management.yaml
- #OS::TripleO::Network::Management: ../network/management.yaml
-
- # Port assignments for the VIPs
- OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
- OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
- OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
- OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
-
- # Port assignments for the controller role
- OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
- OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::Controller::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the compute role
- OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
- OS::TripleO::Compute::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the ceph storage role
- OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the swift storage role
- OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the block storage role
- OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
diff --git a/environments/network-isolation.j2.yaml b/environments/network-isolation.j2.yaml
index 1b792afd..2db1a828 100644
--- a/environments/network-isolation.j2.yaml
+++ b/environments/network-isolation.j2.yaml
@@ -22,9 +22,6 @@ resource_registry:
{%- endfor %}
OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
-
- OS::TripleO::{{primary_role_name}}::Ports::RedisVipPort: ../network/ports/vip.yaml
-
{%- for role in roles %}
# Port assignments for the {{role.name}}
{%- for network in networks %}
diff --git a/environments/neutron-nuage-config.yaml b/environments/neutron-nuage-config.yaml
index ce64311b..fb47770f 100644
--- a/environments/neutron-nuage-config.yaml
+++ b/environments/neutron-nuage-config.yaml
@@ -28,6 +28,8 @@ parameter_defaults:
NeutronTunnelIdRanges: ''
NeutronNetworkVLANRanges: ''
NeutronVniRanges: ''
+ NovaPatchConfigMonkeyPatch: false
+ NovaPatchConfigMonkeyPatchModules: ''
NovaOVSBridge: 'default_bridge'
NeutronMetadataProxySharedSecret: 'default'
InstanceNameTemplate: 'inst-%08x'
diff --git a/environments/neutron-opendaylight-dpdk.yaml b/environments/neutron-opendaylight-dpdk.yaml
index d675252d..0d598980 100644
--- a/environments/neutron-opendaylight-dpdk.yaml
+++ b/environments/neutron-opendaylight-dpdk.yaml
@@ -9,9 +9,11 @@ resource_registry:
parameter_defaults:
NeutronEnableForceMetadata: true
+ NeutronPluginExtensions: 'port_security'
NeutronMechanismDrivers: 'opendaylight_v2'
- NeutronServicePlugins: 'odl-router_v2'
+ NeutronServicePlugins: 'odl-router_v2,trunk'
NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+ OpenDaylightSNATMechanism: 'controller'
ComputeOvsDpdkParameters:
OvsEnableDpdk: True
diff --git a/environments/neutron-opendaylight-sriov.yaml b/environments/neutron-opendaylight-sriov.yaml
index 5c0a0350..3a212ed3 100644
--- a/environments/neutron-opendaylight-sriov.yaml
+++ b/environments/neutron-opendaylight-sriov.yaml
@@ -11,6 +11,7 @@ resource_registry:
parameter_defaults:
NeutronEnableForceMetadata: true
+ NeutronPluginExtensions: 'port_security'
NeutronMechanismDrivers: ['sriovnicswitch','opendaylight_v2']
NeutronServicePlugins: 'odl-router_v2,trunk'
diff --git a/environments/neutron-opendaylight.yaml b/environments/neutron-opendaylight.yaml
index 4644725d..e9b1ac11 100644
--- a/environments/neutron-opendaylight.yaml
+++ b/environments/neutron-opendaylight.yaml
@@ -12,3 +12,4 @@ parameter_defaults:
NeutronEnableForceMetadata: true
NeutronMechanismDrivers: 'opendaylight_v2'
NeutronServicePlugins: 'odl-router_v2,trunk'
+ NeutronPluginExtensions: 'port_security'
diff --git a/environments/neutron-sriov.yaml b/environments/neutron-sriov.yaml
index 591e2260..26c28d28 100755
--- a/environments/neutron-sriov.yaml
+++ b/environments/neutron-sriov.yaml
@@ -4,16 +4,5 @@ resource_registry:
parameter_defaults:
NeutronMechanismDrivers: ['sriovnicswitch', 'openvswitch']
-
- # Add PciPassthroughFilter to the scheduler default filters
- #NovaSchedulerDefaultFilters: ['RetryFilter','AvailabilityZoneFilter','RamFilter','ComputeFilter','ComputeCapabilitiesFilter','ImagePropertiesFilter','ServerGroupAntiAffinityFilter','ServerGroupAffinityFilter','PciPassthroughFilter']
- #NovaSchedulerAvailableFilters: ["nova.scheduler.filters.all_filters","nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter"]
-
- #NeutronPhysicalDevMappings: "datacentre:ens20f2"
-
- # Number of VFs that needs to be configured for a physical interface
- #NeutronSriovNumVFs: "ens20f2:5"
-
- #NovaPCIPassthrough:
- # - devname: "ens20f2"
- # physical_network: "datacentre"
+ NovaSchedulerDefaultFilters: ['RetryFilter','AvailabilityZoneFilter','RamFilter','ComputeFilter','ComputeCapabilitiesFilter','ImagePropertiesFilter','ServerGroupAntiAffinityFilter','ServerGroupAffinityFilter','PciPassthroughFilter']
+ NovaSchedulerAvailableFilters: ["nova.scheduler.filters.all_filters","nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter"]
diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml
index 7718b821..65bbc3e4 100644
--- a/environments/puppet-ceph-external.yaml
+++ b/environments/puppet-ceph-external.yaml
@@ -27,6 +27,7 @@ parameter_defaults:
# following as needed.
NovaRbdPoolName: vms
CinderRbdPoolName: volumes
+ CinderBackupRbdPoolName: backups
GlanceRbdPoolName: images
GnocchiRbdPoolName: metrics
CephClientUserName: openstack
diff --git a/environments/services-docker/neutron-opendaylight.yaml b/environments/services-docker/neutron-opendaylight.yaml
index b749cb69..873957ae 100644
--- a/environments/services-docker/neutron-opendaylight.yaml
+++ b/environments/services-docker/neutron-opendaylight.yaml
@@ -10,7 +10,6 @@ resource_registry:
parameter_defaults:
NeutronEnableForceMetadata: true
+ NeutronPluginExtensions: 'port_security'
NeutronMechanismDrivers: 'opendaylight_v2'
NeutronServicePlugins: 'odl-router_v2,trunk'
- DockerNeutronApiImage: 'centos-binary-neutron-server-opendaylight:latest'
- DockerNeutronConfigImage: 'centos-binary-neutron-server-opendaylight:latest'
diff --git a/environments/services-docker/neutron-ovn-ha.yaml b/environments/services-docker/neutron-ovn-ha.yaml
new file mode 100644
index 00000000..7d3c1d19
--- /dev/null
+++ b/environments/services-docker/neutron-ovn-ha.yaml
@@ -0,0 +1,28 @@
+# A Heat environment that can be used to deploy OVN services with non HA OVN DB servers.
+resource_registry:
+ OS::TripleO::Docker::NeutronMl2PluginBase: ../../puppet/services/neutron-plugin-ml2-ovn.yaml
+ OS::TripleO::Services::OVNController: ../../docker/services/ovn-controller.yaml
+ OS::TripleO::Services::OVNDBs: ../../docker/services/pacemaker/ovn-dbs.yaml
+# Disabling Neutron services that overlap with OVN
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+
+
+parameter_defaults:
+ NeutronMechanismDrivers: ovn
+ OVNVifType: ovs
+ OVNNeutronSyncMode: log
+ OVNQosDriver: ovn-qos
+ OVNTunnelEncapType: geneve
+ NeutronEnableDHCPAgent: false
+ NeutronTypeDrivers: 'geneve,vxlan,vlan,flat'
+ NeutronNetworkType: 'geneve'
+ NeutronServicePlugins: 'qos,ovn-router'
+ NeutronVniRanges: ['1:65536', ]
+ # TODO (numans) - This is temporary and needs to be handled in tripleo-common
+ DockerNeutronApiImage: 'tripleoupstream/centos-binary-neutron-server-ovn:latest'
+ DockerNeutronConfigImage: 'tripleoupstream/centos-binary-neutron-server-ovn:latest'
diff --git a/environments/services-docker/neutron-sriov.yaml b/environments/services-docker/neutron-sriov.yaml
new file mode 100644
index 00000000..91725868
--- /dev/null
+++ b/environments/services-docker/neutron-sriov.yaml
@@ -0,0 +1,12 @@
+# EXPERIMENTAL: The configuration enabled by this environment is not considered
+# production-ready.
+#
+# A Heat environment that can be used to enable SR-IOV support in neutron.
+resource_registry:
+ OS::TripleO::Services::NeutronSriovAgent: ../../docker/services/neutron-sriov-agent.yaml
+ OS::TripleO::Services::NeutronSriovHostConfig: ../../puppet/services/neutron-sriov-host-config.yaml
+
+parameter_defaults:
+ NeutronMechanismDrivers: ['sriovnicswitch','openvswitch']
+ NovaSchedulerDefaultFilters: ['RetryFilter','AvailabilityZoneFilter','RamFilter','ComputeFilter','ComputeCapabilitiesFilter','ImagePropertiesFilter','ServerGroupAntiAffinityFilter','ServerGroupAffinityFilter','PciPassthroughFilter']
+ NovaSchedulerAvailableFilters: ["nova.scheduler.filters.all_filters","nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter"]
diff --git a/environments/services/neutron-lbaasv2.yaml b/environments/services/neutron-lbaasv2.yaml
index 385bb2fe..ca42d20d 100644
--- a/environments/services/neutron-lbaasv2.yaml
+++ b/environments/services/neutron-lbaasv2.yaml
@@ -8,7 +8,7 @@
# - OVS: neutron.agent.linux.interface.OVSInterfaceDriver
# - LinuxBridges: neutron.agent.linux.interface.BridgeInterfaceDriver
resource_registry:
- OS::TripleO::Services::NeutronLbaasv2Agent: ../puppet/services/neutron-lbaas.yaml
+ OS::TripleO::Services::NeutronLbaasv2Agent: ../../puppet/services/neutron-lbaas.yaml
parameter_defaults:
NeutronLbaasInterfaceDriver: "neutron.agent.linux.interface.OVSInterfaceDriver"
diff --git a/environments/ssl/enable-internal-tls.yaml b/environments/ssl/enable-internal-tls.yaml
new file mode 100644
index 00000000..287ed19f
--- /dev/null
+++ b/environments/ssl/enable-internal-tls.yaml
@@ -0,0 +1,36 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable SSL on OpenStack Internal Endpoints
+# description: |
+# A Heat environment file which can be used to enable TLS for the internal
+# network via certmonger
+parameter_defaults:
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ #
+ # Type: boolean
+ EnableInternalTLS: True
+
+ # Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host.
+ # Type: string
+ RabbitClientUseSSL: True
+
+ # Extra properties or metadata passed to Nova for the created nodes in the overcloud. It's accessible via the Nova metadata API.
+ # Type: json
+ ServerMetadata:
+ ipa_enroll: True
+
+ # *********************
+ # End static parameters
+ # *********************
+resource_registry:
+ OS::TripleO::ServiceServerMetadataHook: ../extraconfig/nova_metadata/krb-service-principals.yaml
+ OS::TripleO::Services::CertmongerUser: ../puppet/services/certmonger-user.yaml
+ OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml
+ OS::TripleO::Services::TLSProxyBase: ../puppet/services/apache.yaml
diff --git a/environments/ssl/enable-tls.yaml b/environments/ssl/enable-tls.yaml
index c8ed2bd2..e5ef8563 100644
--- a/environments/ssl/enable-tls.yaml
+++ b/environments/ssl/enable-tls.yaml
@@ -11,7 +11,6 @@
# must also be used.
parameter_defaults:
# The content of the SSL certificate (without Key) in PEM format.
- # Mandatory. This parameter must be set by the user.
# Type: string
SSLCertificate: |
The contents of your certificate go here
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index 367f50d7..eb004070 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -383,3 +383,65 @@ worfklow. Exiting."
exit 1
fi
}
+
+# This function tries to resolve an RPM dependency issue that can arise when
+# updating ceph packages on nodes that do not run the ceph-osd service. These
+# nodes do not require the ceph-osd package, and updates will fail if the
+# ceph-osd package cannot be updated because it's not available in any enabled
+# repo. The dependency issue is resolved by removing the ceph-osd package from
+# nodes that don't require it.
+#
+# No change is made to nodes that use the ceph-osd service (e.g. ceph storage
+# nodes, and hyperconverged nodes running ceph-osd and compute services). The
+# ceph-osd package is left in place, and the currently enabled repos will be
+# used to update all ceph packages.
+function yum_pre_update {
+ echo "Checking for ceph-osd dependency issues"
+
+ # No need to proceed if the ceph-osd package isn't installed
+ if ! rpm -q ceph-osd >/dev/null 2>&1; then
+ echo "ceph-osd package is not installed"
+ return
+ fi
+
+ # Do not proceed if there's any sign that the ceph-osd package is in use:
+ # - Are there OSD entries in /var/lib/ceph/osd?
+ # - Are any ceph-osd processes running?
+ # - Are there any ceph data disks (as identified by 'ceph-disk')
+ if [ -n "$(ls -A /var/lib/ceph/osd 2>/dev/null)" ]; then
+ echo "ceph-osd package is required (there are OSD entries in /var/lib/ceph/osd)"
+ return
+ fi
+
+ if [ "$(pgrep -xc ceph-osd)" != "0" ]; then
+ echo "ceph-osd package is required (there are ceph-osd processes running)"
+ return
+ fi
+
+ if ceph-disk list |& grep -q "ceph data"; then
+ echo "ceph-osd package is required (ceph data disks detected)"
+ return
+ fi
+
+ # Get a list of all ceph packages available from the currently enabled
+ # repos. Use "--showduplicates" to ensure the list includes installed
+ # packages that happen to be up to date.
+ local ceph_pkgs="$(yum list available --showduplicates 'ceph-*' |& awk '/^ceph/ {print $1}' | sort -u)"
+
+ # No need to proceed if no ceph packages are available from the currently
+ # enabled repos.
+ if [ -z "$ceph_pkgs" ]; then
+ echo "ceph packages are not available from any enabled repo"
+ return
+ fi
+
+ # No need to proceed if the ceph-osd package *is* available
+ if [[ $ceph_pkgs =~ ceph-osd ]]; then
+ echo "ceph-osd package is available from an enabled repo"
+ return
+ fi
+
+ echo "ceph-osd package is not required, but is preventing updates to other ceph packages"
+ echo "Removing ceph-osd package to allow updates to other ceph packages"
+ yum -y remove ceph-osd
+}
diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh
index baf838e4..95de99be 100644
--- a/extraconfig/tasks/tripleo_upgrade_node.sh
+++ b/extraconfig/tasks/tripleo_upgrade_node.sh
@@ -26,52 +26,8 @@ fi
DEBUG="true"
SCRIPT_NAME=$(basename $0)
$(declare -f log_debug)
-$(declare -f manage_systemd_service)
-$(declare -f systemctl_swift)
-$(declare -f special_case_ovs_upgrade_if_needed)
-# pin nova messaging +-1 for the nova-compute service
-if [[ -n \$NOVA_COMPUTE ]]; then
- crudini --set /etc/nova/nova.conf upgrade_levels compute auto
-fi
-
-special_case_ovs_upgrade_if_needed
-
-if [[ -n \$SWIFT_STORAGE ]]; then
- systemctl_swift stop
-fi
-
-yum -y update
-
-if [[ -n \$SWIFT_STORAGE ]]; then
- systemctl_swift start
-fi
-# Due to bug#1640177 we need to restart compute agent
-if [[ -n \$NOVA_COMPUTE ]]; then
- log_debug "Restarting openstack ceilometer agent compute"
- systemctl restart openstack-ceilometer-compute
- yum install -y openstack-nova-migration
- # https://bugs.launchpad.net/tripleo/+bug/1707926 stop&disable libvirtd
- log_debug "Stop and disable libvirtd service for upgrade to containers"
- systemctl stop libvirtd
- systemctl disable libvirtd
- log_debug "Stop and disable openstack-nova-compute for upgrade to containers"
- systemctl stop openstack-nova-compute
- systemctl disable openstack-nova-compute
-fi
-
-# Apply puppet manifest to converge just right after the ${ROLE} upgrade
-$(declare -f run_puppet)
-for step in 1 2 3 4 5 6; do
- log_debug "Running puppet step \$step for ${ROLE}"
- if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then
- log_debug "Puppet failure at step \${step}"
- exit 1
- fi
- log_debug "Completed puppet step \$step"
-done
-
-log_debug "TripleO upgrade run completed."
+log_debug "$UPGRADE_SCRIPT has completed - moving onto ansible playbooks"
ENDOFCAT
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index a2a04e8e..c0c92a60 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -85,6 +85,9 @@ fi
# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
special_case_ovs_upgrade_if_needed
+# Resolve any RPM dependency issues before attempting the update
+yum_pre_update
+
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Pacemaker running, stopping cluster node and doing full package update"
node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")
diff --git a/j2_excludes.yaml b/j2_excludes.yaml
index 74fb3bb1..71d3d6e8 100644
--- a/j2_excludes.yaml
+++ b/j2_excludes.yaml
@@ -5,9 +5,4 @@
# name:
# - puppet/cephstorage-role.yaml
name:
- - network/internal_api_v6.yaml
- - network/external_v6.yaml
- - network/storage_v6.yaml
- - network/storage_mgmt_v6.yaml
- - network/tenant_v6.yaml
- - network/management_v6.yaml
+ - None
diff --git a/network/external_v6.yaml b/network/external_v6.yaml
deleted file mode 100644
index 9d1c3d00..00000000
--- a/network/external_v6.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-heat_template_version: pike
-
-description: >
- External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
-
-parameters:
- # the defaults here work for static IP assignment (IPAM) only
- ExternalNetCidr:
- # OpenStack uses the EUI-64 address format, which requires a /64 prefix
- default: '2001:db8:fd00:1000::/64'
- description: Cidr for the external network.
- type: string
- ExternalNetValueSpecs:
- default: {'provider:physical_network': 'external', 'provider:network_type': 'flat'}
- description: Value specs for the external network.
- type: json
- ExternalNetAdminStateUp:
- default: false
- description: The admin state of the network.
- type: boolean
- ExternalNetShared:
- default: false
- description: Whether this network is shared across all tenants.
- type: boolean
- ExternalNetName:
- default: external
- description: The name of the external network.
- type: string
- ExternalSubnetName:
- default: external_subnet
- description: The name of the external subnet in Neutron.
- type: string
- ExternalAllocationPools:
- default: [{'start': '2001:db8:fd00:1000::10', 'end': '2001:db8:fd00:1000:ffff:ffff:ffff:fffe'}]
- description: Ip allocation pool range for the external network.
- type: json
- IPv6AddressMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 address mode
- type: string
- IPv6RAMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 router advertisement mode
- type: string
- ExternalInterfaceDefaultRoute:
- default: '2001:db8:fd00:1000::1'
- description: default route for the external network
- type: string
-
-resources:
- ExternalNetwork:
- type: OS::Neutron::Net
- properties:
- admin_state_up: {get_param: ExternalNetAdminStateUp}
- name: {get_param: ExternalNetName}
- shared: {get_param: ExternalNetShared}
- value_specs: {get_param: ExternalNetValueSpecs}
-
- ExternalSubnet:
- type: OS::Neutron::Subnet
- properties:
- ip_version: 6
- ipv6_address_mode: {get_param: IPv6AddressMode}
- ipv6_ra_mode: {get_param: IPv6RAMode}
- cidr: {get_param: ExternalNetCidr}
- name: {get_param: ExternalSubnetName}
- network: {get_resource: ExternalNetwork}
- allocation_pools: {get_param: ExternalAllocationPools}
- gateway_ip: {get_param: ExternalInterfaceDefaultRoute}
-
-outputs:
- OS::stack_id:
- description: Neutron external network
- value: {get_resource: ExternalNetwork}
- subnet_cidr:
- value: {get_attr: [ExternalSubnet, cidr]}
diff --git a/network/internal_api_v6.yaml b/network/internal_api_v6.yaml
deleted file mode 100644
index 6a0912e2..00000000
--- a/network/internal_api_v6.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-heat_template_version: pike
-
-description: >
- Internal API network. Used for most APIs, Database, RPC.
-
-parameters:
- # the defaults here work for static IP assignment (IPAM) only
- InternalApiNetCidr:
- # OpenStack uses the EUI-64 address format, which requires a /64 prefix
- default: 'fd00:fd00:fd00:2000::/64'
- description: Cidr for the internal_api network.
- type: string
- InternalApiNetValueSpecs:
- default: {'provider:physical_network': 'internal_api', 'provider:network_type': 'flat'}
- description: Value specs for the internal_api network.
- type: json
- InternalApiNetAdminStateUp:
- default: false
- description: The admin state of the network.
- type: boolean
- InternalApiNetShared:
- default: false
- description: Whether this network is shared across all tenants.
- type: boolean
- InternalApiNetName:
- default: internal_api
- description: The name of the internal_api network.
- type: string
- InternalApiSubnetName:
- default: internal_api_subnet
- description: The name of the internal_api subnet in Neutron.
- type: string
- InternalApiAllocationPools:
- default: [{'start': 'fd00:fd00:fd00:2000::10', 'end': 'fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe'}]
- description: Ip allocation pool range for the internal_api network.
- type: json
- IPv6AddressMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 address mode
- type: string
- IPv6RAMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 router advertisement mode
- type: string
-
-resources:
- InternalApiNetwork:
- type: OS::Neutron::Net
- properties:
- admin_state_up: {get_param: InternalApiNetAdminStateUp}
- name: {get_param: InternalApiNetName}
- shared: {get_param: InternalApiNetShared}
- value_specs: {get_param: InternalApiNetValueSpecs}
-
- InternalApiSubnet:
- type: OS::Neutron::Subnet
- properties:
- ip_version: 6
- ipv6_address_mode: {get_param: IPv6AddressMode}
- ipv6_ra_mode: {get_param: IPv6RAMode}
- cidr: {get_param: InternalApiNetCidr}
- name: {get_param: InternalApiSubnetName}
- network: {get_resource: InternalApiNetwork}
- allocation_pools: {get_param: InternalApiAllocationPools}
- gateway_ip: null
-
-outputs:
- OS::stack_id:
- description: Neutron internal network
- value: {get_resource: InternalApiNetwork}
- subnet_cidr:
- value: {get_attr: [InternalApiSubnet, cidr]}
diff --git a/network/management_v6.yaml b/network/management_v6.yaml
deleted file mode 100644
index 2eb8c876..00000000
--- a/network/management_v6.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-heat_template_version: pike
-
-description: >
- Management network. System administration, SSH, DNS, NTP, etc. This network
- would usually be the default gateway for the non-controller nodes.
-
-parameters:
- # the defaults here work for static IP assignment (IPAM) only
- ManagementNetCidr:
- default: 'fd00:fd00:fd00:6000::/64'
- description: Cidr for the management network.
- type: string
- ManagementNetValueSpecs:
- default: {'provider:physical_network': 'management', 'provider:network_type': 'flat'}
- description: Value specs for the management network.
- type: json
- ManagementNetAdminStateUp:
- default: false
- description: The admin state of the network.
- type: boolean
- ManagementNetShared:
- default: false
- description: Whether this network is shared across all tenants.
- type: boolean
- ManagementNetName:
- default: management
- description: The name of the management network.
- type: string
- ManagementSubnetName:
- default: management_subnet
- description: The name of the management subnet in Neutron.
- type: string
- ManagementAllocationPools:
- default: [{'start': 'fd00:fd00:fd00:6000::10', 'end': 'fd00:fd00:fd00:6000:ffff:ffff:ffff:fffe'}]
- description: Ip allocation pool range for the management network.
- type: json
- IPv6AddressMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 address mode
- type: string
- IPv6RAMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 router advertisement mode
- type: string
-
-resources:
- ManagementNetwork:
- type: OS::Neutron::Net
- properties:
- admin_state_up: {get_param: ManagementNetAdminStateUp}
- name: {get_param: ManagementNetName}
- shared: {get_param: ManagementNetShared}
- value_specs: {get_param: ManagementNetValueSpecs}
-
- ManagementSubnet:
- type: OS::Neutron::Subnet
- properties:
- ip_version: 6
- ipv6_address_mode: {get_param: IPv6AddressMode}
- ipv6_ra_mode: {get_param: IPv6RAMode}
- cidr: {get_param: ManagementNetCidr}
- name: {get_param: ManagementSubnetName}
- network: {get_resource: ManagementNetwork}
- allocation_pools: {get_param: ManagementAllocationPools}
-
-outputs:
- OS::stack_id:
- description: Neutron management network
- value: {get_resource: ManagementNetwork}
- subnet_cidr:
- value: {get_attr: [ManagementSubnet, cidr]}
diff --git a/network/network.j2 b/network/network.j2
new file mode 100644
index 00000000..61a5b57c
--- /dev/null
+++ b/network/network.j2
@@ -0,0 +1,105 @@
+heat_template_version: pike
+
+description: >
+ {{network.name}} network definition (automatically generated).
+
+parameters:
+ # the defaults here work for static IP assignment (IPAM) only
+ {{network.name}}NetCidr:
+{%- if network.ipv6 or ipv6_override %}
+ default: "{{network.ipv6_subnet|default(network.ip_subnet|default(""))}}"
+{%- else %}
+ default: "{{network.ip_subnet|default("")}}"
+{%- endif %}
+ description: Cidr for the {{network.name_lower}} network.
+ type: string
+ {{network.name}}NetValueSpecs:
+ default: {'provider:physical_network': '{{network.name_lower}}', 'provider:network_type': 'flat'}
+ description: Value specs for the {{network.name_lower}} network.
+ type: json
+{%- if not ":" in network.ip_subnet and not network.ipv6 and not ipv6_override %}
+ {{network.name}}NetEnableDHCP:
+ default: false
+ description: Whether to enable DHCP on the associated subnet (IPv4 only).
+ type: boolean
+{%- endif %}
+ {{network.name}}NetAdminStateUp:
+ default: false
+ description: The admin state of the network.
+ type: boolean
+ {{network.name}}NetShared:
+ default: false
+ description: Whether this network is shared across all tenants.
+ type: boolean
+ {{network.name}}NetName:
+ default: {{network.name_lower}}
+ description: The name of the {{network.name_lower}} network.
+ type: string
+ {{network.name}}SubnetName:
+ default: {{network.name_lower}}_subnet
+ description: The name of the {{network.name_lower}} subnet in Neutron.
+ type: string
+ {{network.name}}AllocationPools:
+{%- if ":" in network.ip_subnet or network.ipv6 or ipv6_override %}
+ default: {{network.ipv6_allocation_pools|default(network.allocation_pools|default([]))}}
+{%- else %}
+ default: {{network.allocation_pools|default([])}}
+{%- endif %}
+ description: Ip allocation pool range for the {{network.name_lower}} network.
+ type: json
+{%- if ":" in network.ip_subnet or network.ipv6 or ipv6_override %}
+ IPv6AddressMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 address mode
+ type: string
+ IPv6RAMode:
+ default: dhcpv6-stateful
+ description: Neutron subnet IPv6 router advertisement mode
+ type: string
+{%- endif %}
+ {{network.name}}InterfaceDefaultRoute:
+{%- if network.ipv6 or ipv6_override %}
+ default: "{{network.gateway_ipv6|default(network.gateway_ip|default(''))}}"
+{%- else %}
+ default: "{{network.gateway_ip|default('')}}"
+{%- endif %}
+ description: default route for the {{network.name_lower}} network
+ type: string
+{%- if network.vlan %}
+ {{network.name}}NetworkVlanID:
+ default: {{network.vlan}}
+ description: Vlan ID for the {{network.name}} network traffic.
+ type: number
+{%- endif %}
+
+resources:
+ {{network.name}}Network:
+ type: OS::Neutron::Net
+ properties:
+ admin_state_up: {get_param: {{network.name}}NetAdminStateUp}
+ name: {get_param: {{network.name}}NetName}
+ shared: {get_param: {{network.name}}NetShared}
+ value_specs: {get_param: {{network.name}}NetValueSpecs}
+
+ {{network.name}}Subnet:
+ type: OS::Neutron::Subnet
+ properties:
+{%- if ":" in network.ip_subnet or network.ipv6 or ipv6_override %}
+ ip_version: 6
+ ipv6_address_mode: {get_param: IPv6AddressMode}
+ ipv6_ra_mode: {get_param: IPv6RAMode}
+{%- else %}
+ enable_dhcp: {get_param: {{network.name}}NetEnableDHCP}
+{%- endif %}
+ cidr: {get_param: {{network.name}}NetCidr}
+ name: {get_param: {{network.name}}SubnetName}
+ network: {get_resource: {{network.name}}Network}
+ allocation_pools: {get_param: {{network.name}}AllocationPools}
+ gateway_ip: {get_param: {{network.name}}InterfaceDefaultRoute}
+
+outputs:
+ OS::stack_id:
+ description: {{network.name_lower}} network
+ value: {get_resource: {{network.name}}Network}
+ subnet_cidr:
+ value: {get_attr: [{{network.name}}Subnet, cidr]}
diff --git a/network/network.network.j2.yaml b/network/network.network.j2.yaml
index 29d58cd5..d9ff6169 100644
--- a/network/network.network.j2.yaml
+++ b/network/network.network.j2.yaml
@@ -1,91 +1 @@
-heat_template_version: pike
-
-description: >
- {{network.name}} network definition (automatically generated).
-
-parameters:
- # the defaults here work for static IP assignment (IPAM) only
- {{network.name}}NetCidr:
- default: {{network.ip_subnet|default("")}}
- description: Cidr for the {{network.name_lower}} network.
- type: string
- {{network.name}}NetValueSpecs:
- default: {'provider:physical_network': '{{network.name_lower}}', 'provider:network_type': 'flat'}
- description: Value specs for the {{network.name_lower}} network.
- type: json
- {{network.name}}NetAdminStateUp:
- default: false
- description: The admin state of the network.
- type: boolean
- {{network.name}}NetEnableDHCP:
- default: false
- description: Whether to enable DHCP on the associated subnet.
- type: boolean
- {{network.name}}NetShared:
- default: false
- description: Whether this network is shared across all tenants.
- type: boolean
- {{network.name}}NetName:
- default: {{network.name_lower}}
- description: The name of the {{network.name_lower}} network.
- type: string
- {{network.name}}SubnetName:
- default: {{network.name_lower}}_subnet
- description: The name of the {{network.name_lower}} subnet in Neutron.
- type: string
- {{network.name}}AllocationPools:
- default: {{network.allocation_pools|default([])}}
- description: Ip allocation pool range for the {{network.name_lower}} network.
- type: json
- {{network.name}}InterfaceDefaultRoute:
- default: {{network.gateway_ip|default('""')}}
- description: default route for the {{network.name_lower}} network
- type: string
-{%- if network.vlan %}
- {{network.name}}NetworkVlanID:
- default: {{network.vlan}}
- description: Vlan ID for the {{network.name}} network traffic.
- type: number
-{%- endif %}
-{%- if network.ipv6 %}
- IPv6AddressMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 address mode
- type: string
- IPv6RAMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 router advertisement mode
- type: string
-{%- endif %}
-
-resources:
- {{network.name}}Network:
- type: OS::Neutron::Net
- properties:
- admin_state_up: {get_param: {{network.name}}NetAdminStateUp}
- name: {get_param: {{network.name}}NetName}
- shared: {get_param: {{network.name}}NetShared}
- value_specs: {get_param: {{network.name}}NetValueSpecs}
-
- {{network.name}}Subnet:
- type: OS::Neutron::Subnet
- properties:
- cidr: {get_param: {{network.name}}NetCidr}
- name: {get_param: {{network.name}}SubnetName}
- network: {get_resource: {{network.name}}Network}
- allocation_pools: {get_param: {{network.name}}AllocationPools}
- gateway_ip: {get_param: {{network.name}}InterfaceDefaultRoute}
-{%- if network.ipv6 %}
- ip_version: 6
- ipv6_address_mode: {get_param: IPv6AddressMode}
- ipv6_ra_mode: {get_param: IPv6RAMode}
-{%- else %}
- enable_dhcp: {get_param: {{network.name}}NetEnableDHCP}
-{%- endif %}
-
-outputs:
- OS::stack_id:
- description: {{network.name_lower}} network
- value: {get_resource: {{network.name}}Network}
- subnet_cidr:
- value: {get_attr: [{{network.name}}Subnet, cidr]}
+{% include 'network.j2' %}
diff --git a/network/network_v6.network.j2.yaml b/network/network_v6.network.j2.yaml
new file mode 100644
index 00000000..809d145c
--- /dev/null
+++ b/network/network_v6.network.j2.yaml
@@ -0,0 +1,2 @@
+{% set ipv6_override = true -%}
+{% include 'network.j2' %}
diff --git a/network/storage_mgmt_v6.yaml b/network/storage_mgmt_v6.yaml
deleted file mode 100644
index 7ed4c92e..00000000
--- a/network/storage_mgmt_v6.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-heat_template_version: pike
-
-description: >
- Storage management network. Storage replication, etc.
-
-parameters:
- # the defaults here work for static IP assignment (IPAM) only
- StorageMgmtNetCidr:
- # OpenStack uses the EUI-64 address format, which requires a /64 prefix
- default: 'fd00:fd00:fd00:4000::/64'
- description: Cidr for the storage_mgmt network.
- type: string
- StorageMgmtNetValueSpecs:
- default: {'provider:physical_network': 'storage_mgmt', 'provider:network_type': 'flat'}
- description: Value specs for the storage_mgmt network.
- type: json
- StorageMgmtNetAdminStateUp:
- default: false
- description: The admin state of the network.
- type: boolean
- StorageMgmtNetShared:
- default: false
- description: Whether this network is shared across all tenants.
- type: boolean
- StorageMgmtNetName:
- default: storage_mgmt
- description: The name of the storage_mgmt network.
- type: string
- StorageMgmtSubnetName:
- default: storage_mgmt_subnet
- description: The name of the storage_mgmt subnet in Neutron.
- type: string
- StorageMgmtAllocationPools:
- default: [{'start': 'fd00:fd00:fd00:4000::10', 'end': 'fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe'}]
- description: Ip allocation pool range for the storage_mgmt network.
- type: json
- IPv6AddressMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 address mode
- type: string
- IPv6RAMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 router advertisement mode
- type: string
-
-resources:
- StorageMgmtNetwork:
- type: OS::Neutron::Net
- properties:
- admin_state_up: {get_param: StorageMgmtNetAdminStateUp}
- name: {get_param: StorageMgmtNetName}
- shared: {get_param: StorageMgmtNetShared}
- value_specs: {get_param: StorageMgmtNetValueSpecs}
-
- StorageMgmtSubnet:
- type: OS::Neutron::Subnet
- properties:
- ip_version: 6
- ipv6_address_mode: {get_param: IPv6AddressMode}
- ipv6_ra_mode: {get_param: IPv6RAMode}
- cidr: {get_param: StorageMgmtNetCidr}
- name: {get_param: StorageMgmtSubnetName}
- network: {get_resource: StorageMgmtNetwork}
- allocation_pools: {get_param: StorageMgmtAllocationPools}
- gateway_ip: null
-
-outputs:
- OS::stack_id:
- description: Neutron storage management network
- value: {get_resource: StorageMgmtNetwork}
- subnet_cidr:
- value: {get_attr: [StorageMgmtSubnet, cidr]}
diff --git a/network/storage_v6.yaml b/network/storage_v6.yaml
deleted file mode 100644
index 51edd4b3..00000000
--- a/network/storage_v6.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-heat_template_version: pike
-
-description: >
- Storage network.
-
-parameters:
- # the defaults here work for static IP assignment (IPAM) only
- StorageNetCidr:
- # OpenStack uses the EUI-64 address format, which requires a /64 prefix
- default: 'fd00:fd00:fd00:3000::/64'
- description: Cidr for the storage network.
- type: string
- StorageNetValueSpecs:
- default: {'provider:physical_network': 'storage', 'provider:network_type': 'flat'}
- description: Value specs for the storage network.
- type: json
- StorageNetAdminStateUp:
- default: false
- description: The admin state of the network.
- type: boolean
- StorageNetShared:
- default: false
- description: Whether this network is shared across all tenants.
- type: boolean
- StorageNetName:
- default: storage
- description: The name of the storage network.
- type: string
- StorageSubnetName:
- default: storage_subnet
- description: The name of the storage subnet in Neutron.
- type: string
- StorageAllocationPools:
- default: [{'start': 'fd00:fd00:fd00:3000::10', 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'}]
- description: Ip allocation pool range for the storage network.
- type: json
- IPv6AddressMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 address mode
- type: string
- IPv6RAMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 router advertisement mode
- type: string
-
-resources:
- StorageNetwork:
- type: OS::Neutron::Net
- properties:
- admin_state_up: {get_param: StorageNetAdminStateUp}
- name: {get_param: StorageNetName}
- shared: {get_param: StorageNetShared}
- value_specs: {get_param: StorageNetValueSpecs}
-
- StorageSubnet:
- type: OS::Neutron::Subnet
- properties:
- ip_version: 6
- ipv6_address_mode: {get_param: IPv6AddressMode}
- ipv6_ra_mode: {get_param: IPv6RAMode}
- cidr: {get_param: StorageNetCidr}
- name: {get_param: StorageSubnetName}
- network: {get_resource: StorageNetwork}
- allocation_pools: {get_param: StorageAllocationPools}
- gateway_ip: null
-
-outputs:
- OS::stack_id:
- description: Neutron storage network
- value: {get_resource: StorageNetwork}
- subnet_cidr:
- value: {get_attr: [StorageSubnet, cidr]}
diff --git a/network/tenant_v6.yaml b/network/tenant_v6.yaml
deleted file mode 100644
index 9f139cb1..00000000
--- a/network/tenant_v6.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-heat_template_version: pike
-
-description: >
- Tenant IPv6 network.
-
-parameters:
- # the defaults here work for static IP assignment (IPAM) only
- TenantNetCidr:
- # OpenStack uses the EUI-64 address format, which requires a /64 prefix
- default: 'fd00:fd00:fd00:5000::/64'
- description: Cidr for the tenant network.
- type: string
- TenantNetValueSpecs:
- default: {'provider:physical_network': 'tenant', 'provider:network_type': 'flat'}
- description: Value specs for the tenant network.
- type: json
- TenantNetAdminStateUp:
- default: false
- description: The admin state of the network.
- type: boolean
- TenantNetShared:
- default: false
- description: Whether this network is shared across all tenants.
- type: boolean
- TenantNetName:
- default: tenant
- description: The name of the tenant network.
- type: string
- TenantSubnetName:
- default: tenant_subnet
- description: The name of the tenant subnet in Neutron.
- type: string
- TenantAllocationPools:
- default: [{'start': 'fd00:fd00:fd00:5000::10', 'end': 'fd00:fd00:fd00:5000:ffff:ffff:ffff:fffe'}]
- description: Ip allocation pool range for the tenant network.
- type: json
- IPv6AddressMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 address mode
- type: string
- IPv6RAMode:
- default: dhcpv6-stateful
- description: Neutron subnet IPv6 router advertisement mode
- type: string
-
-resources:
- TenantNetwork:
- type: OS::Neutron::Net
- properties:
- admin_state_up: {get_param: TenantNetAdminStateUp}
- name: {get_param: TenantNetName}
- shared: {get_param: TenantNetShared}
- value_specs: {get_param: TenantNetValueSpecs}
-
- TenantSubnet:
- type: OS::Neutron::Subnet
- properties:
- ip_version: 6
- ipv6_address_mode: {get_param: IPv6AddressMode}
- ipv6_ra_mode: {get_param: IPv6RAMode}
- cidr: {get_param: TenantNetCidr}
- name: {get_param: TenantSubnetName}
- network: {get_resource: TenantNetwork}
- allocation_pools: {get_param: TenantAllocationPools}
- gateway_ip: null
-
-outputs:
- OS::stack_id:
- description: Neutron tenant network
- value: {get_resource: TenantNetwork}
- subnet_cidr:
- value: {get_attr: [TenantSubnet, cidr]}
diff --git a/network_data.yaml b/network_data.yaml
index 6ad37dfe..bce82cb2 100644
--- a/network_data.yaml
+++ b/network_data.yaml
@@ -6,19 +6,22 @@
# name_lower: lowercase version of name used for filenames
# (optional, defaults to name.lower())
# enabled: Is the network enabled (optional, defaults to true)
-# ipv6: Does this network use IPv6 IPs? (optional, defaults to false)
-# (optional, may use parameter defaults in environment to set)
# vlan: vlan for the network (optional)
# vip: Enable creation of a virtual IP on this network
-# [TODO] (dsneddon@redhat.com) - Enable dynamic creation of VIP ports,
-# to support VIPs on non-default networks.
-# See https://bugs.launchpad.net/tripleo/+bug/1667104
-# ip_subnet: IP/CIDR, e.g. '192.168.24.0/24' (optional, may use parameter defaults)
-# allocation_pools: IP range list e.g. [{'start':'10.0.0.4', 'end':'10.0.0.250}]
+# ip_subnet: IP/CIDR, e.g. '192.168.24.0/24' or '2001:db8:fd00:1000::/64'
+# (optional, may use parameter defaults instead)
+# allocation_pools: IP range list e.g. [{'start':'10.0.0.4', 'end':'10.0.0.250'}]
# gateway_ip: gateway for the network (optional, may use parameter defaults)
-# NOTE: IP-related values set parameter defaults in templates, may be overridden.
-# compat_name: for existing stack you may need to override the default transformation
-# for the resource's name.
+# ipv6_subnet: Optional, sets default IPv6 subnet if IPv4 is already defined.
+# ipv6_allocation_pools: Set default IPv6 allocation pools if IPv4 allocation pools
+# are already defined.
+# ipv6_gateway: Set an IPv6 gateway if IPv4 gateway already defined.
+# ipv6: If ip_subnet not defined, this specifies that the network is IPv6-only.
+# NOTE: IP-related values set parameter defaults in templates, may be overridden,
+# either by operators, or e.g in environments/network-isolation-v6.yaml where we
+# set some default IPv6 addresses.
+# compat_name: for existing stack you may need to override the default
+# transformation for the resource's name.
#
# Example:
# - name Example
@@ -27,8 +30,17 @@
# allocation_pools: [{'start': '10.0.2.4', 'end': '10.0.2.250'}]
# gateway_ip: '10.0.2.254'
#
-# TODO (dsneddon) remove existing templates from j2_excludes.yaml
-# and generate all templates dynamically.
+# To support backward compatility, two versions of the network definitions will
+# be created, network/<network>.yaml and network/<network>_v6.yaml. Only
+# one of these files may be used in the deployment at a time, since the
+# parameters used for configuration are the same in both files. In the
+# future, this behavior may be changed to create only one file for custom
+# networks. You may specify IPv6 addresses for ip_subnet, allocation_pools,
+# and gateway_ip if no IPv4 addresses are used for a custom network, or set
+# ipv6: true, and the network/<network>.yaml file will be configured as IPv6.
+#
+# For configuring both IPv4 and IPv6 on the same interface, use two separate
+# networks, and then assign both IPs in the custom NIC configuration templates.
- name: External
vip: true
@@ -36,31 +48,45 @@
ip_subnet: '10.0.0.0/24'
allocation_pools: [{'start': '10.0.0.4', 'end': '10.0.0.250'}]
gateway_ip: '10.0.0.1'
+ ipv6_subnet: '2001:db8:fd00:1000::/64'
+ ipv6_allocation_pools: [{'start': '2001:db8:fd00:1000::10', 'end': '2001:db8:fd00:1000:ffff:ffff:ffff:fffe'}]
+ gateway_ipv6: '2001:db8:fd00:1000::1'
- name: InternalApi
name_lower: internal_api
vip: true
ip_subnet: '172.16.2.0/24'
allocation_pools: [{'start': '172.16.2.4', 'end': '172.16.2.250'}]
+ ipv6_subnet: 'fd00:fd00:fd00:2000::/64'
+ ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:2000::10', 'end': 'fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe'}]
compat_name: Internal
- name: Storage
vip: true
name_lower: storage
ip_subnet: '172.16.1.0/24'
allocation_pools: [{'start': '172.16.1.4', 'end': '172.16.1.250'}]
+ ipv6_subnet: 'fd00:fd00:fd00:3000::/64'
+ ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:3000::10', 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'}]
- name: StorageMgmt
name_lower: storage_mgmt
vip: true
ip_subnet: '172.16.3.0/24'
allocation_pools: [{'start': '172.16.3.4', 'end': '172.16.3.250'}]
+ ipv6_subnet: 'fd00:fd00:fd00:4000::/64'
+ ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:4000::10', 'end': 'fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe'}]
- name: Tenant
vip: false # Tenant network does not use VIPs
name_lower: tenant
ip_subnet: '172.16.0.0/24'
allocation_pools: [{'start': '172.16.0.4', 'end': '172.16.0.250'}]
+ ipv6_subnet: 'fd00:fd00:fd00:5000::/64'
+ ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:5000::10', 'end': 'fd00:fd00:fd00:5000:ffff:ffff:ffff:fffe'}]
- name: Management
- # Management network is disabled by default
- enabled: false
+ # Management network is enabled by default for backwards-compatibility, but
+ # is not included in any roles by default. Add to role definitions to use.
+ enabled: true
vip: false # Management network does not use VIPs
name_lower: management
ip_subnet: '10.0.1.0/24'
allocation_pools: [{'start': '10.0.1.4', 'end': '10.0.1.250'}]
+ ipv6_subnet: 'fd00:fd00:fd00:6000::/64'
+ ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:6000::10', 'end': 'fd00:fd00:fd00:6000:ffff:ffff:ffff:fffe'}]
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index 0fa0a9fe..e5ae5279 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -169,6 +169,7 @@ resource_registry:
OS::TripleO::Services::Pacemaker: OS::Heat::None
OS::TripleO::Services::PacemakerRemote: OS::Heat::None
OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronSriovHostConfig: OS::Heat::None
OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
OS::TripleO::Services::Qdr: OS::Heat::None
OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml
@@ -231,8 +232,10 @@ resource_registry:
OS::TripleO::Services::ManilaScheduler: OS::Heat::None
OS::TripleO::Services::ManilaShare: OS::Heat::None
OS::TripleO::Services::ManilaBackendGeneric: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendIsilon: OS::Heat::None
OS::TripleO::Services::ManilaBackendNetapp: OS::Heat::None
OS::TripleO::Services::ManilaBackendUnity: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendVMAX: OS::Heat::None
OS::TripleO::Services::ManilaBackendCephFs: OS::Heat::None
OS::TripleO::Services::ManilaBackendVNX: OS::Heat::None
OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None
@@ -267,6 +270,7 @@ resource_registry:
OS::TripleO::Services::CinderBackendDellPs: OS::Heat::None
OS::TripleO::Services::CinderBackendDellSc: OS::Heat::None
OS::TripleO::Services::CinderBackendDellEMCUnity: OS::Heat::None
+ OS::TripleO::Services::CinderBackendDellEMCVMAXISCSI: OS::Heat::None
OS::TripleO::Services::CinderBackendNetApp: OS::Heat::None
OS::TripleO::Services::CinderBackendScaleIO: OS::Heat::None
OS::TripleO::Services::CinderBackendVRTSHyperScale: OS::Heat::None
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index 2e398671..367ac5b6 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -70,17 +70,14 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
-{%- endfor %}
- controllerExtraConfig:
- default: {}
- description: |
- DEPRECATED use ControllerExtraConfig instead
- type: json
- NovaComputeExtraConfig:
+{%- if role.deprecated_param_extraconfig is defined %}
+ {{role.deprecated_param_extraconfig}}:
default: {}
description: |
- DEPRECATED use ComputeExtraConfig instead
+ DEPRECATED use {{role.name}}ExtraConfig instead
type: json
+{%- endif %}
+{%- endfor %}
NeutronControlPlaneID:
default: 'ctlplane'
type: string
@@ -188,15 +185,16 @@ parameters:
doing an update which requires removal of specific resources.
Example format ComputeRemovalPolicies: [{'resource_list': ['0']}]
-{% if role.name != 'Compute' %}
{{role.name}}SchedulerHints:
+ type: json
description: Optional scheduler hints to pass to nova
-{% else %}
- NovaComputeSchedulerHints:
- description: DEPRECATED - use ComputeSchedulerHints instead
-{% endif %}
+ default: {}
+{%- if role.deprecated_param_scheduler_hints is defined %}
+ {{role.deprecated_param_scheduler_hints}}:
type: json
+ description: DEPRECATED - use {{role.name}}SchedulerHints instead
default: {}
+{%- endif %}
{{role.name}}Parameters:
type: json
@@ -229,6 +227,24 @@ parameters:
description: >
List of server hostnames to blacklist from any triggered deployments.
+{% for role in roles %}
+{%- if role.deprecated_param_scheduler_hints is defined or role.deprecated_param_extraconfig is defined %}
+{%- if not parameter_groups_defined|default(false) %}
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+{%- set parameter_groups_defined = true %}
+{%- endif %}
+{%- endif %}
+{%- if role.deprecated_param_scheduler_hints is defined %}
+ - {{role.deprecated_param_scheduler_hints}}
+{%- endif %}
+{%- if role.deprecated_param_extraconfig is defined %}
+ - {{role.deprecated_param_extraconfig}}
+{%- endif %}
+{%- endfor %}
+
conditions:
add_vips_to_etc_hosts: {equals : [{get_param: AddVipsToEtcHosts}, True]}
@@ -407,17 +423,10 @@ resources:
map_merge:
- get_attr: [{{role.name}}ServiceConfigSettings, value]
- get_param: ExtraConfig
- {%- if role.name == 'Controller' %}
- - map_merge:
- - get_param: controllerExtraConfig
- - get_param: {{role.name}}ExtraConfig
- {%- elif role.name == 'Compute' %}
- - map_merge:
- - get_param: NovaComputeExtraConfig
- - get_param: {{role.name}}ExtraConfig
- {%- else %}
+{%- if role.deprecated_param_extraconfig is defined %}
+ - get_param: {{role.deprecated_param_extraconfig}}
+{%- endif %}
- get_param: {{role.name}}ExtraConfig
- {%- endif %}
# Filter any null/None service_names which may be present due to mapping
# of services to OS::Heat::None
@@ -525,14 +534,19 @@ resources:
params:
'%stackname%': {get_param: 'OS::stack_name'}
NodeIndex: '%index%'
- {% if role.name != 'Compute' %}
- {{role.name}}SchedulerHints: {get_param: {{role.name}}SchedulerHints}
- {% else %}
- NovaComputeSchedulerHints: {get_param: NovaComputeSchedulerHints}
- {% endif %}
+ # Note, SchedulerHints must be defined here, not only in the
+ # nested template, as it can contain %index%
+ {{role.name}}SchedulerHints:
+ map_merge:
+{%- if role.deprecated_param_scheduler_hints is defined %}
+ - {get_param: {{role.deprecated_param_scheduler_hints}}}
+{%- endif %}
+ - {get_param: {{role.name}}SchedulerHints}
ServiceConfigSettings: {get_attr: [{{role.name}}ServiceConfigSettings, value]}
ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]}
MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]}
+ LoggingSources: {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_sources]}
+ LoggingGroups: {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_groups]}
ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]}
DeploymentServerBlacklistDict: {get_attr: [DeploymentServerBlacklistDict, value]}
RoleParameters: {get_param: {{role.name}}Parameters}
@@ -624,24 +638,6 @@ resources:
{% for role in roles %}
- {get_attr: [{{role.name}}ServiceNames, value]}
{% endfor %}
- logging_groups:
- yaql:
- expression: >
- $.data.groups.flatten()
- data:
- groups:
-{% for role in roles %}
- - {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_groups]}
-{% endfor %}
- logging_sources:
- yaql:
- expression: >
- $.data.sources.flatten()
- data:
- sources:
-{% for role in roles %}
- - {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_sources]}
-{% endfor %}
controller_ips: {get_attr: [{{primary_role_name}}, ip_address]}
controller_names: {get_attr: [{{primary_role_name}}, hostname]}
service_ips:
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 3044fe39..37c1d4e5 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -16,10 +16,6 @@ parameters:
type: comma_delimited_list
controller_ips:
type: comma_delimited_list
- logging_groups:
- type: json
- logging_sources:
- type: json
service_ips:
type: json
service_node_names:
@@ -113,8 +109,6 @@ resources:
bootstrap_nodeid_ip: {get_input: bootstrap_nodeid_ip}
all_nodes:
map_merge:
- - tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
- - tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
- enabled_services:
yaql:
expression: $.data.distinct()
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 5453e65c..15da1773 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -105,6 +105,11 @@ parameters:
description: DEPRECATED - use {{role.name}}IPs instead
type: json
{%- endif %}
+ {{role.name}}NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: []
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -148,12 +153,6 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
-{%- if role.deprecated_param_scheduler_hints is defined %}
- {{role.deprecated_param_scheduler_hints}}:
- type: json
- description: DEPRECATED - use {{role.name}}SchedulerHints instead
- default: {}
-{%- endif %}
NodeIndex:
type: number
default: 0
@@ -239,7 +238,7 @@ parameter_groups:
description: Do not use deprecated params, they will be removed.
parameters:
{%- for property in role %}
-{%- if property.startswith('deprecated_param_') %}
+{%- if property.startswith('deprecated_param_') and not role[property].endswith('SchedulerHints') %}
- {{role[property]}}
{%- endif %}
{%- endfor %}
@@ -271,6 +270,11 @@ conditions:
- {get_param: {{role.deprecated_param_flavor}}}
- {{default_flavor_name}}
{%- endif %}
+ role_network_deployment_actions_exists:
+ not:
+ equals:
+ - {get_param: {{role.name}}NetworkDeploymentActions}
+ - []
resources:
{{server_resource_name}}:
@@ -317,12 +321,7 @@ resources:
{%- endif %}
- {get_param: {{role.name}}ServerMetadata}
- {get_param: ServiceMetadataSettings}
- scheduler_hints:
- map_merge:
-{%- if role.deprecated_param_scheduler_hints is defined %}
- - {get_param: {{role.deprecated_param_scheduler_hints}}}
-{%- endif %}
- - {get_param: {{role.name}}SchedulerHints}
+ scheduler_hints: {get_param: {{role.name}}SchedulerHints}
deployment_swift_data:
if:
- deployment_swift_data_map_unset
@@ -501,7 +500,10 @@ resources:
actions:
if:
- server_not_blacklisted
- - {get_param: NetworkDeploymentActions}
+ - if:
+ - role_network_deployment_actions_exists
+ - {get_param: {{role.name}}NetworkDeploymentActions}
+ - {get_param: NetworkDeploymentActions}
- []
{{server_resource_name}}UpgradeInitConfig:
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index a593d55e..38e2a280 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -99,12 +99,12 @@ It is also possible to use Mistral actions or workflows together with
a deployment step, these are executed before the main configuration run.
To describe actions or workflows from within a service use:
- * service_workflow_tasks: One or more workflow task properties
+ * workflow_tasks: One or more workflow task properties
which expects a map where the key is the step and the value a list of
dictionaries descrbing each a workflow task, for example::
- service_workflow_tasks:
+ workflow_tasks:
step2:
- name: echo
action: std.echo output=Hello
diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml
index f84edde0..f0493f0e 100644
--- a/puppet/services/aodh-api.yaml
+++ b/puppet/services/aodh-api.yaml
@@ -116,12 +116,9 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- yaql:
- expression: $.data.apache_upgrade + $.data.aodh_api_upgrade
- data:
- apache_upgrade:
- get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
- aodh_api_upgrade:
- - name: Stop aodh_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ list_concat:
+ - get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ -
+ - name: Stop aodh_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml
index a894dbdf..974c2538 100644
--- a/puppet/services/barbican-api.yaml
+++ b/puppet/services/barbican-api.yaml
@@ -186,22 +186,19 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- yaql:
- expression: $.data.apache_upgrade + $.data.barbican_api_upgrade
- data:
- apache_upgrade:
- get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
- barbican_api_upgrade:
- - name: Check if barbican_api is deployed
- command: systemctl is-enabled openstack-barbican-api
- tags: common
- ignore_errors: True
- register: barbican_api_enabled
- - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
- shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
- when: barbican_api_enabled.rc == 0
- tags: step0,validation
- - name: Install openstack-barbican-api package if it was disabled
- tags: step3
- yum: name=openstack-barbican-api state=latest
- when: barbican_api_enabled.rc != 0
+ list_concat:
+ - get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ -
+ - name: Check if barbican_api is deployed
+ command: systemctl is-enabled openstack-barbican-api
+ tags: common
+ ignore_errors: True
+ register: barbican_api_enabled
+ - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
+ shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
+ when: barbican_api_enabled.rc == 0
+ tags: step0,validation
+ - name: Install openstack-barbican-api package if it was disabled
+ tags: step3
+ yum: name=openstack-barbican-api state=latest
+ when: barbican_api_enabled.rc != 0
diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml
index aba303fb..1076c043 100644
--- a/puppet/services/ceilometer-api.yaml
+++ b/puppet/services/ceilometer-api.yaml
@@ -118,12 +118,9 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- yaql:
- expression: $.data.apache_upgrade + $.data.ceilometer_api_upgrade
- data:
- apache_upgrade:
- get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
- ceilometer_api_upgrade:
- - name: Stop ceilometer_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ list_concat:
+ - get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ -
+ - name: Stop ceilometer_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml
index f6573f6c..8debf8c7 100644
--- a/puppet/services/ceph-base.yaml
+++ b/puppet/services/ceph-base.yaml
@@ -99,7 +99,6 @@ outputs:
ceph::params::packages:
- ceph-base
- ceph-mon
- - ceph-osd
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
@@ -152,3 +151,9 @@ outputs:
list_join: ['.', ['client', {get_param: CephClientUserName}]]
MANILA_CLIENT_KEY:
list_join: ['.', ['client', {get_param: ManilaCephFSNativeCephFSAuthId}]]
+ service_config_settings:
+ ceph_osd:
+ ceph::params::packages:
+ - ceph-base
+ - ceph-mon
+ - ceph-osd
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index fbfe532a..193c6ba3 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -184,31 +184,28 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- yaql:
- expression: $.data.apache_upgrade + $.data.cinder_api_upgrade
- data:
- apache_upgrade:
- get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
- cinder_api_upgrade:
- - name: Check if cinder_api is deployed
- command: systemctl is-enabled openstack-cinder-api
- tags: common
- ignore_errors: True
- register: cinder_api_enabled
- - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
- shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
- when: cinder_api_enabled.rc == 0
- tags: step0,validation
- - name: check for cinder running under apache (post upgrade)
- tags: step1
- shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder"
- register: cinder_apache
- ignore_errors: true
- - name: Stop cinder_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
- when: cinder_apache.rc == 0
- - name: Stop and disable cinder_api service (pre-upgrade not under httpd)
- tags: step1
- when: cinder_api_enabled.rc == 0
- service: name=openstack-cinder-api state=stopped enabled=no
+ list_concat:
+ - get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ -
+ - name: Check if cinder_api is deployed
+ command: systemctl is-enabled openstack-cinder-api
+ tags: common
+ ignore_errors: True
+ register: cinder_api_enabled
+ - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
+ shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
+ when: cinder_api_enabled.rc == 0
+ tags: step0,validation
+ - name: check for cinder running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder"
+ register: cinder_apache
+ ignore_errors: true
+ - name: Stop cinder_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: cinder_apache.rc == 0
+ - name: Stop and disable cinder_api service (pre-upgrade not under httpd)
+ tags: step1
+ when: cinder_api_enabled.rc == 0
+ service: name=openstack-cinder-api state=stopped enabled=no
diff --git a/puppet/services/cinder-backend-dellemc-vmax-iscsi.yaml b/puppet/services/cinder-backend-dellemc-vmax-iscsi.yaml
new file mode 100644
index 00000000..1a3beab5
--- /dev/null
+++ b/puppet/services/cinder-backend-dellemc-vmax-iscsi.yaml
@@ -0,0 +1,65 @@
+# Copyright (c) 2016-2017 Dell Inc, or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+heat_template_version: pike
+
+description: >
+ Openstack Cinder Dell EMC VMAX iscsi backend
+
+parameters:
+ CinderEnableDellEMCVMAXISCSIBackend:
+ type: boolean
+ default: true
+ CinderDellEMCVMAXISCSIBackendName:
+ type: string
+ default: 'tripleo_dellemc_vmax_iscsi'
+ CinderDellEMCVMAXISCSIConfigFile:
+ type: string
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Dell EMC VMAX iscsi backend.
+ value:
+ service_name: cinder_backend_dellemc_vmax_iscsi
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_dellemc_vmax_iscsi_backend: {get_param: CinderEnableDellEMCVMAXISCSIBackend}
+ cinder::backend::dell_emc_vmax_iscsi::volume_backend_name: {get_param: CinderDellEMCVMAXISCSIBackendName}
+ cinder::backend::dell_emc_vmax_iscsi::cinder_emc_config_file: {get_param: CinderDellEMCVMAXISCSIConfigFile}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/database/redis-base.yaml b/puppet/services/database/redis-base.yaml
index 2a6a89e9..8436062a 100644
--- a/puppet/services/database/redis-base.yaml
+++ b/puppet/services/database/redis-base.yaml
@@ -38,6 +38,12 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
outputs:
role_data:
@@ -53,10 +59,20 @@ outputs:
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- redis::bind: {get_param: [ServiceNetMap, RedisNetwork]}
+ # Bind to localhost if internal TLS is enabled, since we put a TLs
+ # proxy in front.
+ redis::bind:
+ if:
+ - use_tls_proxy
+ - 'localhost'
+ - {get_param: [ServiceNetMap, RedisNetwork]}
redis::port: 6379
redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
- redis::sentinel::sentinel_bind: {get_param: [ServiceNetMap, RedisNetwork]}
+ redis::sentinel::sentinel_bind:
+ if:
+ - use_tls_proxy
+ - 'localhost'
+ - {get_param: [ServiceNetMap, RedisNetwork]}
redis::ulimit: {get_param: RedisFDLimit}
diff --git a/puppet/services/database/redis.yaml b/puppet/services/database/redis.yaml
index bdcc4fcd..810e467e 100644
--- a/puppet/services/database/redis.yaml
+++ b/puppet/services/database/redis.yaml
@@ -30,8 +30,15 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
resources:
+
RedisBase:
type: ./redis-base.yaml
properties:
@@ -41,6 +48,7 @@ resources:
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
role_data:
@@ -55,8 +63,41 @@ outputs:
dport:
- 6379
- 26379
+ tripleo::profile::base::database::redis::tls_proxy_bind_ip:
+ get_param: [ServiceNetMap, RedisNetwork]
+ tripleo::profile::base::database::redis::tls_proxy_fqdn:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, RedisNetwork]}
+ tripleo::profile::base::database::redis::tls_proxy_port: 6379
+ - if:
+ - use_tls_proxy
+ - redis_certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/redis.crt'
+ service_key: '/etc/pki/tls/private/redis.key'
+ hostname:
+ str_replace:
+ template: "%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, RedisNetwork]}
+ principal:
+ str_replace:
+ template: "redis/%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, RedisNetwork]}
+ - {}
step_config: |
include ::tripleo::profile::base::database::redis
+ metadata_settings:
+ if:
+ - use_tls_proxy
+ -
+ - service: redis
+ network: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ type: vip
+ - null
upgrade_tasks:
- name: Check if redis is deployed
command: systemctl is-enabled redis
diff --git a/puppet/services/disabled/mongodb-disabled.yaml b/puppet/services/disabled/mongodb-disabled.yaml
index 9e58103c..0c6e2bbb 100644
--- a/puppet/services/disabled/mongodb-disabled.yaml
+++ b/puppet/services/disabled/mongodb-disabled.yaml
@@ -37,6 +37,11 @@ outputs:
value:
service_name: mongodb_disabled
upgrade_tasks:
+ - name: Check for mongodb service
+ stat: path=/usr/lib/systemd/system/mongod.service
+ tags: common
+ register: mongod_service
- name: Stop and disable mongodb service on upgrade
tags: step1
service: name=mongod state=stopped enabled=no
+ when: mongod_service.stat.exists
diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml
index cd7ab692..0f8f352a 100644
--- a/puppet/services/gnocchi-api.yaml
+++ b/puppet/services/gnocchi-api.yaml
@@ -151,12 +151,9 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- yaql:
- expression: $.data.apache_upgrade + $.data.gnocchi_api_upgrade
- data:
- apache_upgrade:
- get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
- gnocchi_api_upgrade:
- - name: Stop gnocchi_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ list_concat:
+ - get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ -
+ - name: Stop gnocchi_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
diff --git a/puppet/services/haproxy-public-tls-certmonger.yaml b/puppet/services/haproxy-public-tls-certmonger.yaml
index 14d171dc..cdfc41cf 100644
--- a/puppet/services/haproxy-public-tls-certmonger.yaml
+++ b/puppet/services/haproxy-public-tls-certmonger.yaml
@@ -36,6 +36,11 @@ parameters:
HAProxyInternalTLSKeysDirectory:
default: '/etc/pki/tls/private/haproxy'
type: string
+ DeployedSSLCertificatePath:
+ default: '/etc/pki/tls/private/overcloud_endpoint.pem'
+ description: >
+ The filepath of the certificate as it will be stored in the controller.
+ type: string
outputs:
role_data:
@@ -44,22 +49,14 @@ outputs:
service_name: haproxy_public_tls_certmonger
config_settings:
generate_service_certificates: true
- tripleo::haproxy::service_certificate:
- list_join:
- - ''
- - - {get_param: HAProxyInternalTLSCertsDirectory}
- - '/overcloud-haproxy-external.pem'
+ tripleo::haproxy::service_certificate: {get_param: DeployedSSLCertificatePath}
tripleo::certmonger::haproxy_dirs::certificate_dir:
get_param: HAProxyInternalTLSCertsDirectory
tripleo::certmonger::haproxy_dirs::key_dir:
get_param: HAProxyInternalTLSKeysDirectory
certificates_specs:
haproxy-external:
- service_pem:
- list_join:
- - ''
- - - {get_param: HAProxyInternalTLSCertsDirectory}
- - '/overcloud-haproxy-external.pem'
+ service_pem: {get_param: DeployedSSLCertificatePath}
service_certificate:
list_join:
- ''
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index 218ba740..6301314a 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -400,12 +400,9 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- yaql:
- expression: $.data.apache_upgrade + $.data.keystone_upgrade
- data:
- apache_upgrade:
- get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
- keystone_upgrade:
- - name: Stop keystone service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ list_concat:
+ - get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ -
+ - name: Stop keystone service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
diff --git a/puppet/services/manila-backend-isilon.yaml b/puppet/services/manila-backend-isilon.yaml
new file mode 100644
index 00000000..6d8a1fb6
--- /dev/null
+++ b/puppet/services/manila-backend-isilon.yaml
@@ -0,0 +1,72 @@
+heat_template_version: pike
+
+description: >
+ Openstack Manila isilon backend.
+
+parameters:
+ ManilaIsilonDriverHandlesShareServers:
+ type: string
+ default: true
+ ManilaIsilonBackendName:
+ type: string
+ default: tripleo_isilon
+ ManilaIsilonNasLogin:
+ type: string
+ default: ''
+ ManilaIsilonNasPassword:
+ type: string
+ default: ''
+ ManilaIsilonNasServer:
+ type: string
+ default: ''
+ ManilaIsilonNasRootDir:
+ type: string
+ default: ''
+ ManilaIsilonNasServerPort:
+ type: number
+ default: 8080
+ ManilaIsilonNasServerSecure:
+ type: string
+ default: ''
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+outputs:
+ role_data:
+ description: Role data for the Manila Isilon backend.
+ value:
+ service_name: manila_backend_isilon
+ config_settings:
+ manila::backend::dellemc_isilon::title: {get_param: ManilaIsilonBackendName}
+ manila::backend::dellemc_isilon::emc_nas_login: {get_param: ManilaIsilonNasLogin}
+ manila::backend::dellemc_isilon::driver_handles_share_servers: {get_param: ManilaIsilonDriverHandlesShareServers}
+ manila::backend::dellemc_isilon::emc_nas_password: {get_param: ManilaIsilonNasPassword}
+ manila::backend::dellemc_isilon::emc_nas_server: {get_param: ManilaIsilonNasServer}
+ manila::backend::dellemc_isilon::emc_nas_root_dir: {get_param: ManilaIsilonNasRootDir}
+ manila::backend::dellemc_isilon::emc_nas_server_port: {get_param: ManilaIsilonNasServerPort}
+ manila::backend::dellemc_isilon::emc_nas_server_secure: {get_param: ManilaIsilonNasServerSecure}
+ step_config:
diff --git a/puppet/services/manila-backend-vmax.yaml b/puppet/services/manila-backend-vmax.yaml
new file mode 100644
index 00000000..cdd32f5d
--- /dev/null
+++ b/puppet/services/manila-backend-vmax.yaml
@@ -0,0 +1,74 @@
+heat_template_version: pike
+
+description: >
+ Openstack Manila vmax backend.
+
+parameters:
+ ManilaVMAXDriverHandlesShareServers:
+ type: string
+ default: false
+ ManilaVMAXBackendName:
+ type: string
+ default: tripleo_manila_vmax
+ ManilaVMAXNasLogin:
+ type: string
+ default: ''
+ ManilaVMAXNasPassword:
+ type: string
+ default: ''
+ ManilaVMAXNasServer:
+ type: string
+ default: ''
+ ManilaVMAXServerContainer:
+ type: string
+ default: ''
+ ManilaVMAXShareDataPools:
+ type: string
+ default: ''
+ ManilaVMAXEthernetPorts:
+ type: string
+ default: ''
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+outputs:
+ role_data:
+ description: Role data for the Manila VMAX backend.
+ value:
+ service_name: manila_backend_vmax
+ config_settings:
+ manila::backend::dellemc_vmax::title: {get_param: ManilaVMAXBackendName}
+ manila::backend::dellemc_vmax::emc_nas_login: {get_param: ManilaVMAXNasLogin}
+ manila::backend::dellemc_vmax::driver_handles_share_servers: {get_param: ManilaVMAXDriverHandlesShareServers}
+ manila::backend::dellemc_vmax::emc_nas_password: {get_param: ManilaVMAXNasPassword}
+ manila::backend::dellemc_vmax::emc_nas_server: {get_param: ManilaVMAXNasServer}
+ manila::backend::dellemc_vmax::emc_share_backend: {'vmax'}
+ manila::backend::dellemc_vmax::vmax_server_container: {get_param: ManilaVMAXServerContainer}
+ manila::backend::dellemc_vmax::vmax_share_data_pools: {get_param: ManilaVMAXShareDataPools}
+ manila::backend::dellemc_vmax::vmax_ethernet_ports: {get_param: ManilaVMAXEthernetPorts}
+ step_config:
+
diff --git a/puppet/services/manila-scheduler.yaml b/puppet/services/manila-scheduler.yaml
index 7d43f685..364a1a3d 100644
--- a/puppet/services/manila-scheduler.yaml
+++ b/puppet/services/manila-scheduler.yaml
@@ -70,7 +70,7 @@ outputs:
manila::compute::nova::nova_admin_password: {get_param: NovaPassword}
manila::compute::nova::nova_admin_tenant_name: 'service'
manila::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
- manila::network::neutron::neutron_admin_auth_url: {get_param: [EndpointMap, NeutronAdmin, uri]}
+ manila::network::neutron::neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
manila::network::neutron::neutron_admin_password: {get_param: NeutronPassword}
step_config: |
include ::tripleo::profile::base::manila::scheduler
diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml
index b6980045..9e493c3e 100644
--- a/puppet/services/neutron-base.yaml
+++ b/puppet/services/neutron-base.yaml
@@ -30,6 +30,10 @@ parameters:
type: number
default: 0
description: The number of neutron dhcp agents to schedule per network
+ DhcpAgentNotification:
+ default: true
+ description: Whether or not to enable DHCP agent notifications.
+ type: boolean
NeutronDnsDomain:
type: string
default: openstacklocal
@@ -133,6 +137,7 @@ outputs:
- {get_param: NeutronDebug }
neutron::purge_config: {get_param: EnableConfigPurge}
neutron::allow_overlapping_ips: true
+ neutron::dhcp_agent_notification: {get_param: DhcpAgentNotification}
neutron::dns_domain: {get_param: NeutronDnsDomain}
neutron::rabbit_heartbeat_timeout_threshold: 60
neutron::host: '%{::fqdn}'
diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml
index 7894f78b..a2f82a58 100644
--- a/puppet/services/neutron-ovs-agent.yaml
+++ b/puppet/services/neutron-ovs-agent.yaml
@@ -140,22 +140,19 @@ outputs:
step_config: |
include ::tripleo::profile::base::neutron::ovs
upgrade_tasks:
- yaql:
- expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
- data:
- ovs_upgrade:
- get_attr: [Ovs, role_data, upgrade_tasks]
- neutron_ovs_upgrade:
- - name: Check if neutron_ovs_agent is deployed
- command: systemctl is-enabled neutron-openvswitch-agent
- tags: common
- ignore_errors: True
- register: neutron_ovs_agent_enabled
- - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
- shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
- when: neutron_ovs_agent_enabled.rc == 0
- tags: step0,validation
- - name: Stop neutron_ovs_agent service
- tags: step1
- when: neutron_ovs_agent_enabled.rc == 0
- service: name=neutron-openvswitch-agent state=stopped
+ list_concat:
+ - get_attr: [Ovs, role_data, upgrade_tasks]
+ -
+ - name: Check if neutron_ovs_agent is deployed
+ command: systemctl is-enabled neutron-openvswitch-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_ovs_agent_enabled
+ - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_ovs_agent_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop neutron_ovs_agent service
+ tags: step1
+ when: neutron_ovs_agent_enabled.rc == 0
+ service: name=neutron-openvswitch-agent state=stopped
diff --git a/puppet/services/neutron-plugin-ml2-nuage.yaml b/puppet/services/neutron-plugin-ml2-nuage.yaml
index a7dc2e8b..4cd541cc 100644
--- a/puppet/services/neutron-plugin-ml2-nuage.yaml
+++ b/puppet/services/neutron-plugin-ml2-nuage.yaml
@@ -67,6 +67,16 @@ parameters:
type: boolean
default: false
+ NovaPatchConfigMonkeyPatch:
+ description: Apply monkey patching or not
+ type: boolean
+ default: false
+
+ NovaPatchConfigMonkeyPatchModules:
+ description: List of modules/decorators to monkey patch
+ type: comma_delimited_list
+ default: ''
+
resources:
NeutronML2Base:
@@ -95,5 +105,7 @@ outputs:
neutron::plugins::ml2::nuage::nuage_base_uri_version: {get_param: NeutronNuageBaseURIVersion}
neutron::plugins::ml2::nuage::nuage_cms_id: {get_param: NeutronNuageCMSId}
nova::api::use_forwarded_for: {get_param: UseForwardedFor}
+ nova::patch::config::monkey_patch: {get_param: NovaPatchConfigMonkeyPatch}
+ nova::patch::config::monkey_patch_modules: {get_param: NovaPatchConfigMonkeyPatchModules}
step_config: |
include tripleo::profile::base::neutron::plugins::ml2
diff --git a/puppet/services/neutron-plugin-nsx.yaml b/puppet/services/neutron-plugin-nsx.yaml
index 2774b03e..ad0fc7f8 100644
--- a/puppet/services/neutron-plugin-nsx.yaml
+++ b/puppet/services/neutron-plugin-nsx.yaml
@@ -49,7 +49,7 @@ parameters:
NativeDhcpMetadata:
default: True
description: This is the flag to indicate if using native DHCP/Metadata or not.
- type: string
+ type: boolean
DhcpProfileUuid:
description: This is the UUID of the NSX DHCP Profile that will be used to enable
native DHCP service.
@@ -65,14 +65,14 @@ outputs:
value:
service_name: neutron_plugin_nsx
config_settings:
- neutron::plugins::nsx_v3::default_overlay_tz: {get_param: DefaultOverlayTz}
- neutron::plugins::nsx_v3::default_tier0_router: {get_param: DefaultTier0Router}
- neutron::plugins::nsx_v3::nsx_api_managers: {get_param: NsxApiManagers}
- neutron::plugins::nsx_v3::nsx_api_user: {get_param: NsxApiUser}
- neutron::plugins::nsx_v3::nsx_api_password: {get_param: NsxApiPassword}
- neutron::plugins::nsx_v3::native_dhcp_metadata: {get_param: NativeDhcpMetadata}
- neutron::plugins::nsx_v3::dhcp_profile_uuid: {get_param: DhcpProfileUuid}
- neutron::plugins::nsx_v3::metadata_proxy_uuid: {get_param: MetadataProxyUuid}
+ neutron::plugins::nsx::default_overlay_tz: {get_param: DefaultOverlayTz}
+ neutron::plugins::nsx::default_tier0_router: {get_param: DefaultTier0Router}
+ neutron::plugins::nsx::nsx_api_managers: {get_param: NsxApiManagers}
+ neutron::plugins::nsx::nsx_api_user: {get_param: NsxApiUser}
+ neutron::plugins::nsx::nsx_api_password: {get_param: NsxApiPassword}
+ neutron::plugins::nsx::native_dhcp_metadata: {get_param: NativeDhcpMetadata}
+ neutron::plugins::nsx::dhcp_profile_uuid: {get_param: DhcpProfileUuid}
+ neutron::plugins::nsx::metadata_proxy_uuid: {get_param: MetadataProxyUuid}
step_config: |
- include tripleo::profile::base::neutron::plugins::nsx_v3
+ include tripleo::profile::base::neutron::plugins::nsx
diff --git a/puppet/services/neutron-sriov-agent.yaml b/puppet/services/neutron-sriov-agent.yaml
index 3c18209c..5c52ecfc 100644
--- a/puppet/services/neutron-sriov-agent.yaml
+++ b/puppet/services/neutron-sriov-agent.yaml
@@ -99,3 +99,17 @@ outputs:
- get_attr: [RoleParametersValue, value]
step_config: |
include ::tripleo::profile::base::neutron::sriov
+ upgrade_tasks:
+ - name: Check if neutron_sriov_agent is deployed
+ command: systemctl is-enabled neutron-sriov-nic-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_sriov_nic_agent_enabled
+ - name: "PreUpgrade step0,validation: Check service neutron-server is running"
+ shell: /usr/bin/systemctl show 'neutron-sriov-nic-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_sriov_nic_agent_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop neutron_sriov_nic_agent service
+ tags: step1
+ when: neutron_sriov_nic_agent_enabled.rc == 0
+ service: name=neutron-sriov-nic-agent state=stopped
diff --git a/puppet/services/neutron-sriov-host-config.yaml b/puppet/services/neutron-sriov-host-config.yaml
new file mode 100644
index 00000000..987b96f9
--- /dev/null
+++ b/puppet/services/neutron-sriov-host-config.yaml
@@ -0,0 +1,78 @@
+heat_template_version: pike
+
+description: >
+ OpenStack Neutron SR-IOV host configuration
+
+parameters:
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: >
+ Mapping of service_name -> network name. Typically set via
+ parameter_defaults in the resource registry. This mapping overrides those
+ in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronSriovNumVFs:
+ description: >
+ Provide the list of VFs to be reserved for each SR-IOV interface.
+ Format "<interface_name1>:<numvfs1>,<interface_name2>:<numvfs2>"
+ Example "eth1:4096,eth2:128"
+ type: comma_delimited_list
+ default: ""
+
+resources:
+
+ NeutronBase:
+ type: ./neutron-base.yaml
+ properties:
+ ServiceData: {get_param: ServiceData}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+ # Merging role-specific parameters (RoleParameters) with the default parameters.
+ # RoleParameters will have the precedence over the default parameters.
+ RoleParametersValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - map_replace:
+ - tripleo::host::sriov::number_of_vfs: NeutronSriovNumVFs
+ - values: {get_param: [RoleParameters]}
+ - values:
+ NeutronSriovNumVFs: {get_param: NeutronSriovNumVFs}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron SR-IOV nic agent service.
+ value:
+ service_name: neutron_sriov_host_config
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - get_attr: [RoleParametersValue, value]
+ step_config: |
+ include ::tripleo::host::sriov
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index b413fb12..a4a3ca2b 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -199,88 +199,85 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- yaql:
- expression: $.data.apache_upgrade + $.data.nova_api_upgrade
- data:
- apache_upgrade:
- get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
- nova_api_upgrade:
- - name: get bootstrap nodeid
- tags: common
- command: hiera bootstrap_nodeid
- register: bootstrap_node
- - name: set is_bootstrap_node fact
- tags: common
- set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- - name: Extra migration for nova tripleo/+bug/1656791
- tags: step0,pre-upgrade
- when: is_bootstrap_node
- command: nova-manage db online_data_migrations
- - name: Stop and disable nova_api service (pre-upgrade not under httpd)
- tags: step2
- service: name=openstack-nova-api state=stopped enabled=no
- - name: Create puppet manifest to set transport_url in nova.conf
- tags: step5
- when: is_bootstrap_node
- copy:
- dest: /root/nova-api_upgrade_manifest.pp
- mode: 0600
- content: >
- $transport_url = os_transport_url({
- 'transport' => hiera('messaging_service_name', 'rabbit'),
- 'hosts' => any2array(hiera('rabbitmq_node_names', undef)),
- 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
- 'username' => hiera('nova::rabbit_userid', 'guest'),
- 'password' => hiera('nova::rabbit_password'),
- 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
- })
- oslo::messaging::default { 'nova_config':
- transport_url => $transport_url
- }
- - name: Run puppet apply to set tranport_url in nova.conf
- tags: step5
- when: is_bootstrap_node
- command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
- register: puppet_apply_nova_api_upgrade
- failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
- changed_when: puppet_apply_nova_api_upgrade.rc == 2
- - name: Setup cell_v2 (map cell0)
- tags: step5
- when: is_bootstrap_node
- shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
- - name: Setup cell_v2 (create default cell)
- tags: step5
- when: is_bootstrap_node
- # (owalsh) puppet-nova expects the cell name 'default'
- # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
- shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
- register: nova_api_create_cell
- failed_when: nova_api_create_cell.rc not in [0,2]
- changed_when: nova_api_create_cell.rc == 0
- - name: Setup cell_v2 (sync nova/cell DB)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage db sync
- async: {get_param: NovaDbSyncTimeout}
- poll: 10
- - name: Setup cell_v2 (get cell uuid)
- tags: step5
- when: is_bootstrap_node
- shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
- register: nova_api_cell_uuid
- - name: Setup cell_v2 (migrate hosts)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
- - name: Setup cell_v2 (migrate instances)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
- - name: Sync nova_api DB
- tags: step5
- command: nova-manage api_db sync
- when: is_bootstrap_node
- - name: Online data migration for nova
- tags: step5
- when: is_bootstrap_node
- command: nova-manage db online_data_migrations
+ list_concat:
+ - get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ -
+ - name: get bootstrap nodeid
+ tags: common
+ command: hiera bootstrap_nodeid
+ register: bootstrap_node
+ - name: set is_bootstrap_node fact
+ tags: common
+ set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+ - name: Extra migration for nova tripleo/+bug/1656791
+ tags: step0,pre-upgrade
+ when: is_bootstrap_node
+ command: nova-manage db online_data_migrations
+ - name: Stop and disable nova_api service (pre-upgrade not under httpd)
+ tags: step2
+ service: name=openstack-nova-api state=stopped enabled=no
+ - name: Create puppet manifest to set transport_url in nova.conf
+ tags: step5
+ when: is_bootstrap_node
+ copy:
+ dest: /root/nova-api_upgrade_manifest.pp
+ mode: 0600
+ content: >
+ $transport_url = os_transport_url({
+ 'transport' => hiera('messaging_service_name', 'rabbit'),
+ 'hosts' => any2array(hiera('rabbitmq_node_names', undef)),
+ 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
+ 'username' => hiera('nova::rabbit_userid', 'guest'),
+ 'password' => hiera('nova::rabbit_password'),
+ 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
+ })
+ oslo::messaging::default { 'nova_config':
+ transport_url => $transport_url
+ }
+ - name: Run puppet apply to set tranport_url in nova.conf
+ tags: step5
+ when: is_bootstrap_node
+ command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+ register: puppet_apply_nova_api_upgrade
+ failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
+ changed_when: puppet_apply_nova_api_upgrade.rc == 2
+ - name: Setup cell_v2 (map cell0)
+ tags: step5
+ when: is_bootstrap_node
+ shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
+ - name: Setup cell_v2 (create default cell)
+ tags: step5
+ when: is_bootstrap_node
+ # (owalsh) puppet-nova expects the cell name 'default'
+ # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
+ shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
+ register: nova_api_create_cell
+ failed_when: nova_api_create_cell.rc not in [0,2]
+ changed_when: nova_api_create_cell.rc == 0
+ - name: Setup cell_v2 (sync nova/cell DB)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage db sync
+ async: {get_param: NovaDbSyncTimeout}
+ poll: 10
+ - name: Setup cell_v2 (get cell uuid)
+ tags: step5
+ when: is_bootstrap_node
+ shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
+ register: nova_api_cell_uuid
+ - name: Setup cell_v2 (migrate hosts)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
+ - name: Setup cell_v2 (migrate instances)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
+ - name: Sync nova_api DB
+ tags: step5
+ command: nova-manage api_db sync
+ when: is_bootstrap_node
+ - name: Online data migration for nova
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage db online_data_migrations
diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml
index 472dbcce..1feb62b2 100644
--- a/puppet/services/opendaylight-api.yaml
+++ b/puppet/services/opendaylight-api.yaml
@@ -62,6 +62,14 @@ parameters:
description: Whether to manage the OpenDaylight repository
type: boolean
default: false
+ OpenDaylightSNATMechanism:
+ description: SNAT mechanism to be used
+ default: 'conntrack'
+ type: string
+ constraints:
+ - allowed_values:
+ - conntrack
+ - controller
outputs:
role_data:
@@ -84,6 +92,8 @@ outputs:
- 6640
- 6653
- 2550
+ - 8185
+ opendaylight::snat_mechanism: {get_param: OpenDaylightSNATMechanism}
step_config: |
include tripleo::profile::base::neutron::opendaylight
upgrade_tasks:
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index 139ab7c7..c1cec4ff 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -141,22 +141,19 @@ outputs:
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
upgrade_tasks:
- yaql:
- expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
- data:
- ovs_upgrade:
- get_attr: [Ovs, role_data, upgrade_tasks]
- opendaylight_upgrade:
- - name: Check if openvswitch is deployed
- command: systemctl is-enabled openvswitch
- tags: common
- ignore_errors: True
- register: openvswitch_enabled
- - name: "PreUpgrade step0,validation: Check service openvswitch is running"
- shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b'
- when: openvswitch_enabled.rc == 0
- tags: step0,validation
- - name: Stop openvswitch service
- tags: step1
- when: openvswitch_enabled.rc == 0
- service: name=openvswitch state=stopped
+ list_concat:
+ - get_attr: [Ovs, role_data, upgrade_tasks]
+ -
+ - name: Check if openvswitch is deployed
+ command: systemctl is-enabled openvswitch
+ tags: common
+ ignore_errors: True
+ register: openvswitch_enabled
+ - name: "PreUpgrade step0,validation: Check service openvswitch is running"
+ shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b'
+ when: openvswitch_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop openvswitch service
+ tags: step1
+ when: openvswitch_enabled.rc == 0
+ service: name=openvswitch state=stopped
diff --git a/puppet/services/openvswitch.yaml b/puppet/services/openvswitch.yaml
index d8061d4b..6479d7f9 100644
--- a/puppet/services/openvswitch.yaml
+++ b/puppet/services/openvswitch.yaml
@@ -179,6 +179,6 @@ outputs:
with_items:
- "{{ovs_list_of_rpms.stdout_lines}}"
tags: step2
- when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+ when: "'2.5.0-14' in ovs_version.stdout|default('')
or
- ovs_packaging_issue|succeeded"
+ ovs_packaging_issue|default(false)|succeeded"
diff --git a/puppet/services/pacemaker/cinder-volume.yaml b/puppet/services/pacemaker/cinder-volume.yaml
index f4675875..cbbf2eaf 100644
--- a/puppet/services/pacemaker/cinder-volume.yaml
+++ b/puppet/services/pacemaker/cinder-volume.yaml
@@ -66,17 +66,9 @@ outputs:
resource: openstack-cinder-volume
state: disable
wait_for_resource: true
- - name: get bootstrap nodeid
- tags: step5
- command: hiera bootstrap_nodeid
- register: bootstrap_node
- - block:
- - name: Sync cinder DB
- tags: step5
- command: cinder-manage db sync
- - name: Start cinder_volume service (pacemaker)
- tags: step5
- pacemaker_resource:
- resource: openstack-cinder-volume
- state: enable
- when: bootstrap_node.stdout == ansible_hostname
+ post_upgrade_tasks:
+ - name: Start cinder_volume service (pacemaker)
+ tags: step1
+ pacemaker_resource:
+ resource: openstack-cinder-volume
+ state: enable
diff --git a/puppet/services/pacemaker/database/redis.yaml b/puppet/services/pacemaker/database/redis.yaml
index 66eb4b2a..e466f304 100644
--- a/puppet/services/pacemaker/database/redis.yaml
+++ b/puppet/services/pacemaker/database/redis.yaml
@@ -53,5 +53,16 @@ outputs:
- redis::service_manage: false
redis::notify_service: false
redis::managed_by_cluster_manager: true
+ tripleo::profile::pacemaker::database::redis::tls_proxy_bind_ip:
+ get_param: [ServiceNetMap, RedisNetwork]
+ tripleo::profile::pacemaker::database::redis::tls_proxy_fqdn:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, RedisNetwork]}
+ tripleo::profile::pacemaker::database::redis::tls_proxy_port: 6379
step_config: |
include ::tripleo::profile::pacemaker::database::redis
+ metadata_settings:
+ get_attr: [RedisBase, role_data, metadata_settings]
diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml
index 74d3f27c..892ad1c1 100644
--- a/puppet/services/panko-api.yaml
+++ b/puppet/services/panko-api.yaml
@@ -112,27 +112,24 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- yaql:
- expression: $.data.apache_upgrade + $.data.panko_api_upgrade
- data:
- apache_upgrade:
- get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
- panko_api_upgrade:
- - name: Check if httpd is deployed
- command: systemctl is-enabled httpd
- tags: common
- ignore_errors: True
- register: httpd_enabled
- - name: "PreUpgrade step0,validation: Check if httpd is running"
- shell: >
- /usr/bin/systemctl show 'httpd' --property ActiveState |
- grep '\bactive\b'
- when: httpd_enabled.rc == 0
- tags: step0,validation
- - name: Stop panko-api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
- when: httpd_enabled.rc == 0
- - name: Install openstack-panko-api package if it was not installed
- tags: step3
- yum: name=openstack-panko-api state=latest
+ list_concat:
+ - get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ -
+ - name: Check if httpd is deployed
+ command: systemctl is-enabled httpd
+ tags: common
+ ignore_errors: True
+ register: httpd_enabled
+ - name: "PreUpgrade step0,validation: Check if httpd is running"
+ shell: >
+ /usr/bin/systemctl show 'httpd' --property ActiveState |
+ grep '\bactive\b'
+ when: httpd_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop panko-api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: httpd_enabled.rc == 0
+ - name: Install openstack-panko-api package if it was not installed
+ tags: step3
+ yum: name=openstack-panko-api state=latest
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index ba3a0984..a1a60201 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -122,6 +122,7 @@ outputs:
rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
rabbitmq::ssl: {get_param: EnableInternalTLS}
+ rabbitmq::ssl_erl_dist: {get_param: EnableInternalTLS}
rabbitmq::ssl_port: 5672
rabbitmq::ssl_depth: 1
rabbitmq::ssl_only: {get_param: EnableInternalTLS}
diff --git a/puppet/services/tacker.yaml b/puppet/services/tacker.yaml
index 541a2eb6..251d8092 100644
--- a/puppet/services/tacker.yaml
+++ b/puppet/services/tacker.yaml
@@ -114,6 +114,7 @@ outputs:
tacker::keystone::authtoken::project_name: 'service'
tacker::keystone::authtoken::user_domain_name: 'Default'
tacker::keystone::authtoken::project_domain_name: 'Default'
+ tacker::keystone::authtoken::password: {get_param: TackerPassword}
tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
diff --git a/puppet/services/zaqar-api.yaml b/puppet/services/zaqar-api.yaml
index 82d105ef..71f90534 100644
--- a/puppet/services/zaqar-api.yaml
+++ b/puppet/services/zaqar-api.yaml
@@ -98,6 +98,7 @@ outputs:
zaqar::keystone::authtoken::project_name: 'service'
zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ zaqar::keystone::trust::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
zaqar::debug:
if:
- service_debug_unset
@@ -164,6 +165,8 @@ outputs:
zaqar::keystone::auth_websocket::internal_url: {get_param: [EndpointMap, ZaqarWebSocketInternal, uri]}
zaqar::keystone::auth_websocket::region: {get_param: KeystoneRegion}
zaqar::keystone::auth_websocket::tenant: 'service'
+ zaqar::keystone::trust::password: {get_param: ZaqarPassword}
+ zaqar::keystone::trust::user_domain_name: 'Default'
-
if:
- zaqar_management_store_sqlalchemy
@@ -181,37 +184,34 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- yaql:
- expression: $.data.apache_upgrade + $.data.zaqar_upgrade
- data:
- apache_upgrade:
- get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
- zaqar_upgrade:
- - name: Check if zaqar is deployed
- command: systemctl is-enabled openstack-zaqar
- tags: common
- ignore_errors: True
- register: zaqar_enabled
- - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running"
- shell: >
- /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState |
- grep '\bactive\b'
- when: zaqar_enabled.rc == 0
- tags: step0,validation
- - name: Check for zaqar running under apache (post upgrade)
- tags: step1
- shell: "httpd -t -D DUMP_VHOSTS | grep -q zaqar_wsgi"
- register: zaqar_apache
- ignore_errors: true
- - name: Stop zaqar service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
- when: zaqar_apache.rc == 0
- - name: Stop and disable zaqar service (pre-upgrade not under httpd)
- tags: step1
- when: zaqar_enabled.rc == 0
- service: name=openstack-zaqar state=stopped enabled=no
- - name: Install openstack-zaqar package if it was disabled
- tags: step3
- yum: name=openstack-zaqar state=latest
- when: zaqar_enabled.rc != 0
+ list_concat:
+ - get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ -
+ - name: Check if zaqar is deployed
+ command: systemctl is-enabled openstack-zaqar
+ tags: common
+ ignore_errors: True
+ register: zaqar_enabled
+ - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running"
+ shell: >
+ /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState |
+ grep '\bactive\b'
+ when: zaqar_enabled.rc == 0
+ tags: step0,validation
+ - name: Check for zaqar running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q zaqar_wsgi"
+ register: zaqar_apache
+ ignore_errors: true
+ - name: Stop zaqar service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: zaqar_apache.rc == 0
+ - name: Stop and disable zaqar service (pre-upgrade not under httpd)
+ tags: step1
+ when: zaqar_enabled.rc == 0
+ service: name=openstack-zaqar state=stopped enabled=no
+ - name: Install openstack-zaqar package if it was disabled
+ tags: step3
+ yum: name=openstack-zaqar state=latest
+ when: zaqar_enabled.rc != 0
diff --git a/releasenotes/notes/adds-post_upgrade_tasks-eba0656012c861a1.yaml b/releasenotes/notes/adds-post_upgrade_tasks-eba0656012c861a1.yaml
new file mode 100644
index 00000000..bdce1348
--- /dev/null
+++ b/releasenotes/notes/adds-post_upgrade_tasks-eba0656012c861a1.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+ - |
+ This adds post_upgrade_tasks, ansible tasks that can be added to any
+ service manifest (currently, pacemaker/cinder-volume for bug 1706951).
+
+ These are similar to the existing upgrade_tasks in their format, however
+ they will be executed *after* the docker/puppet config. So the order is
+ upgrade_tasks, deployment steps (docker/puppet), then post_upgrade_tasks.
+
+ Also like the upgrade_tasks these are serialised and you can use 'tags'
+ with 'step0' to 'step6' (more can be added if needed).
diff --git a/releasenotes/notes/configuring-snat-in-opendaylight-d5ed4d62275e1876.yaml b/releasenotes/notes/configuring-snat-in-opendaylight-d5ed4d62275e1876.yaml
new file mode 100644
index 00000000..31564e09
--- /dev/null
+++ b/releasenotes/notes/configuring-snat-in-opendaylight-d5ed4d62275e1876.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Configure OpenDaylight SNAT to use conntrack mechanism with OVS and controller
+ based mechanism with OVS-DPDK.
diff --git a/releasenotes/notes/containerized-services-logs-0dc652513870f46d.yaml b/releasenotes/notes/containerized-services-logs-0dc652513870f46d.yaml
new file mode 100644
index 00000000..5ce8b7df
--- /dev/null
+++ b/releasenotes/notes/containerized-services-logs-0dc652513870f46d.yaml
@@ -0,0 +1,11 @@
+---
+upgrade:
+ - |
+ Containerized services logs can be found under updated paths.
+ Pacemaker-managed resources write logs to `/var/log/pacemaker/bundles/*`.
+ Docker-daemon managed openstack services bind-mount their log files to the
+ `/var/log/containers/<foo>/*` sub-directories. Services running under
+ Apache2 WSGI use the `/var/log/containers/httpd/<foo-api>/*` destinations.
+ Additional tools or commands that log to syslog, end up placing log records
+ into the hosts journalctl and `/var/log/messages`.
+
diff --git a/releasenotes/notes/fix-missing-tacker-password-c2ce555cdd52c102.yaml b/releasenotes/notes/fix-missing-tacker-password-c2ce555cdd52c102.yaml
new file mode 100644
index 00000000..7d8d3dd1
--- /dev/null
+++ b/releasenotes/notes/fix-missing-tacker-password-c2ce555cdd52c102.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fixes missing Keystone authtoken password for Tacker.
diff --git a/releasenotes/notes/fix-neutron_admin_auth_url-c88224251d8eb807.yaml b/releasenotes/notes/fix-neutron_admin_auth_url-c88224251d8eb807.yaml
new file mode 100644
index 00000000..fa4074b1
--- /dev/null
+++ b/releasenotes/notes/fix-neutron_admin_auth_url-c88224251d8eb807.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - The "neutron_admin_auth_url" is now properly set using KeystoneInternal
+ rather than using the NeutronAdmin endpoint.
diff --git a/releasenotes/notes/fix-odl-websocket-firewall-9e2f78ebaa39313f.yaml b/releasenotes/notes/fix-odl-websocket-firewall-9e2f78ebaa39313f.yaml
new file mode 100644
index 00000000..63919dad
--- /dev/null
+++ b/releasenotes/notes/fix-odl-websocket-firewall-9e2f78ebaa39313f.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes bug where neutron port status was not updated with OpenDaylight
+ deployments due to firewall blocking the websocket port used to send the
+ update (port 8185).
diff --git a/releasenotes/notes/ipv6_defaults-7dbb62113f4e5084.yaml b/releasenotes/notes/ipv6_defaults-7dbb62113f4e5084.yaml
new file mode 100644
index 00000000..bbc67563
--- /dev/null
+++ b/releasenotes/notes/ipv6_defaults-7dbb62113f4e5084.yaml
@@ -0,0 +1,10 @@
+---
+other:
+ - |
+ Network templates are now rendered with jinja2 based on network_data.yaml.
+ The only required parameter for each network is the name, optional params
+ will populate the defaults in the network template. Network templates
+ will be generated for both IPv4 and IPv6 versions of the networks, setting
+ ipv6: true on the network will generate only IPv6 templates. An example for
+ overriding default IP addresses for IPv6 has been added in
+ environments/network-environment-v6.yaml.
diff --git a/releasenotes/notes/isilon_manila_e9677898724a11e7.yaml b/releasenotes/notes/isilon_manila_e9677898724a11e7.yaml
new file mode 100644
index 00000000..8eb50b8f
--- /dev/null
+++ b/releasenotes/notes/isilon_manila_e9677898724a11e7.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add support for Dell EMC Isilon manila driver
diff --git a/releasenotes/notes/odl-qos-48b70c804755e3a5.yaml b/releasenotes/notes/odl-qos-48b70c804755e3a5.yaml
new file mode 100644
index 00000000..380ef7ff
--- /dev/null
+++ b/releasenotes/notes/odl-qos-48b70c804755e3a5.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - Disables QoS with OpenDaylight until officially
+ supported.
diff --git a/releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml b/releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml
index 25fd2fbe..6da35473 100644
--- a/releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml
+++ b/releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml
@@ -2,3 +2,4 @@
features:
- Support containerized ovn-controller
- Support containerized OVN Dbs without HA
+ - Support containerized OVN DBs with HA
diff --git a/releasenotes/notes/vmax_cinder_a6672898724a11e7.yaml b/releasenotes/notes/vmax_cinder_a6672898724a11e7.yaml
new file mode 100644
index 00000000..e6eb7497
--- /dev/null
+++ b/releasenotes/notes/vmax_cinder_a6672898724a11e7.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add support for Dell EMC VMAX Iscsi cinder driver
diff --git a/releasenotes/notes/vmax_manila_2967789872aa11e8.yaml b/releasenotes/notes/vmax_manila_2967789872aa11e8.yaml
new file mode 100644
index 00000000..9f2a7a64
--- /dev/null
+++ b/releasenotes/notes/vmax_manila_2967789872aa11e8.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add support for Dell EMC VMAX Manila driver
diff --git a/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml b/releasenotes/notes/workflow_tasks-4da5830821b7154b.yaml
index cf99ec5d..cf99ec5d 100644
--- a/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml
+++ b/releasenotes/notes/workflow_tasks-4da5830821b7154b.yaml
diff --git a/roles/Compute.yaml b/roles/Compute.yaml
index 9d2c8189..892bed8f 100644
--- a/roles/Compute.yaml
+++ b/roles/Compute.yaml
@@ -39,6 +39,7 @@
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronLinuxbridgeAgent
- OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::NeutronSriovHostConfig
- OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
diff --git a/roles/ComputeHCI.yaml b/roles/ComputeHCI.yaml
index 0216b04a..4632dff2 100644
--- a/roles/ComputeHCI.yaml
+++ b/roles/ComputeHCI.yaml
@@ -30,6 +30,7 @@
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronLinuxbridgeAgent
- OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::NeutronSriovHostConfig
- OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
diff --git a/roles/Controller.yaml b/roles/Controller.yaml
index 8f5e0cc8..88e4dfed 100644
--- a/roles/Controller.yaml
+++ b/roles/Controller.yaml
@@ -47,6 +47,7 @@
- OS::TripleO::Services::CinderBackendDellPs
- OS::TripleO::Services::CinderBackendDellSc
- OS::TripleO::Services::CinderBackendDellEMCUnity
+ - OS::TripleO::Services::CinderBackendDellEMCVMAXISCSI
- OS::TripleO::Services::CinderBackendNetApp
- OS::TripleO::Services::CinderBackendScaleIO
- OS::TripleO::Services::CinderBackendVRTSHyperScale
@@ -81,9 +82,11 @@
- OS::TripleO::Services::ManilaApi
- OS::TripleO::Services::ManilaBackendCephFs
- OS::TripleO::Services::ManilaBackendGeneric
+ - OS::TripleO::Services::ManilaBackendIsilon
- OS::TripleO::Services::ManilaBackendNetapp
- OS::TripleO::Services::ManilaBackendUnity
- OS::TripleO::Services::ManilaBackendVNX
+ - OS::TripleO::Services::ManilaBackendVMAX
- OS::TripleO::Services::ManilaScheduler
- OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::Memcached
diff --git a/roles/ControllerOpenstack.yaml b/roles/ControllerOpenstack.yaml
index c2130011..2f86d2d2 100644
--- a/roles/ControllerOpenstack.yaml
+++ b/roles/ControllerOpenstack.yaml
@@ -42,7 +42,6 @@
- OS::TripleO::Services::CinderVolume
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Congress
- - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::Etcd
@@ -66,14 +65,20 @@
- OS::TripleO::Services::ManilaApi
- OS::TripleO::Services::ManilaBackendCephFs
- OS::TripleO::Services::ManilaBackendGeneric
+ - OS::TripleO::Services::ManilaBackendIsilon
- OS::TripleO::Services::ManilaBackendNetapp
- OS::TripleO::Services::ManilaBackendUnity
- OS::TripleO::Services::ManilaBackendVNX
+ - OS::TripleO::Services::ManilaBackendVMAX
- OS::TripleO::Services::ManilaScheduler
- OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::MongoDb
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronApi
+ - OS::TripleO::Services::NeutronBgpVpnApi
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronL2gwApi
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaConsoleauth
diff --git a/roles/Database.yaml b/roles/Database.yaml
index 689b1617..e5c6b4d2 100644
--- a/roles/Database.yaml
+++ b/roles/Database.yaml
@@ -12,6 +12,7 @@
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Collectd
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::Kernel
diff --git a/roles/Networker.yaml b/roles/Networker.yaml
index ac30c2fd..afd3b101 100644
--- a/roles/Networker.yaml
+++ b/roles/Networker.yaml
@@ -3,10 +3,11 @@
###############################################################################
- name: Networker
description: |
- Standalone networking role to run Neutron services their own. Includes
+ Standalone networking role to run Neutron agents their own. Includes
Pacemaker integration via PacemakerRemote
networks:
- InternalApi
+ - Tenant
HostnameFormatDefault: '%stackname%-networker-%index%'
ServicesDefault:
- OS::TripleO::Services::AuditD
@@ -17,12 +18,8 @@
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQLClient
- - OS::TripleO::Services::NeutronApi
- - OS::TripleO::Services::NeutronBgpVpnApi
- - OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL2gwAgent
- - OS::TripleO::Services::NeutronL2gwApi
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronLbaasv2Agent
- OS::TripleO::Services::NeutronMetadataAgent
diff --git a/roles_data.yaml b/roles_data.yaml
index dcb66af3..a1adc5ee 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -50,6 +50,7 @@
- OS::TripleO::Services::CinderBackendDellPs
- OS::TripleO::Services::CinderBackendDellSc
- OS::TripleO::Services::CinderBackendDellEMCUnity
+ - OS::TripleO::Services::CinderBackendDellEMCVMAXISCSI
- OS::TripleO::Services::CinderBackendNetApp
- OS::TripleO::Services::CinderBackendScaleIO
- OS::TripleO::Services::CinderBackendVRTSHyperScale
@@ -84,9 +85,11 @@
- OS::TripleO::Services::ManilaApi
- OS::TripleO::Services::ManilaBackendCephFs
- OS::TripleO::Services::ManilaBackendGeneric
+ - OS::TripleO::Services::ManilaBackendIsilon
- OS::TripleO::Services::ManilaBackendNetapp
- OS::TripleO::Services::ManilaBackendUnity
- OS::TripleO::Services::ManilaBackendVNX
+ - OS::TripleO::Services::ManilaBackendVMAX
- OS::TripleO::Services::ManilaScheduler
- OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::Memcached
@@ -186,6 +189,7 @@
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronLinuxbridgeAgent
- OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::NeutronSriovHostConfig
- OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
diff --git a/sample-env-generator/ssl.yaml b/sample-env-generator/ssl.yaml
index 6963e842..43a1afc1 100644
--- a/sample-env-generator/ssl.yaml
+++ b/sample-env-generator/ssl.yaml
@@ -22,6 +22,40 @@ environments:
The contents of the private key go here
resource_registry:
OS::TripleO::NodeTLSData: ../../puppet/extraconfig/tls/tls-cert-inject.yaml
+ -
+ name: ssl/enable-internal-tls
+ title: Enable SSL on OpenStack Internal Endpoints
+ description: |
+ A Heat environment file which can be used to enable TLS for the internal
+ network via certmonger
+ files:
+ puppet/all-nodes-config.yaml:
+ parameters:
+ - EnableInternalTLS
+ puppet/services/nova-base.yaml:
+ parameters:
+ - RabbitClientUseSSL
+ overcloud.yaml:
+ parameters:
+ - ServerMetadata
+ static:
+ - EnableInternalTLS
+ - RabbitClientUseSSL
+ - ServerMetadata
+ sample_values:
+ EnableInternalTLS: True
+ RabbitClientUseSSL: True
+ ServerMetadata: |-2
+
+ ipa_enroll: True
+ resource_registry:
+ OS::TripleO::Services::CertmongerUser: ../puppet/services/certmonger-user.yaml
+ OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml
+ # We use apache as a TLS proxy
+ OS::TripleO::Services::TLSProxyBase: ../puppet/services/apache.yaml
+ # Creates nova metadata that will create the extra service principals per
+ # node.
+ OS::TripleO::ServiceServerMetadataHook: ../extraconfig/nova_metadata/krb-service-principals.yaml
- name: ssl/inject-trust-anchor
title: Inject SSL Trust Anchor on Overcloud Nodes
description: |
@@ -91,39 +125,6 @@ environments:
CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
CongressPublic: {protocol: 'https', port: '13789', host: 'IP_ADDRESS'}
- ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
- ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
- ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
- ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
- ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
- ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
- ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
- ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
- ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
- ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
- ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
- ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
- ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
- ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
- ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
- ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
- ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'IP_ADDRESS'}
@@ -225,39 +226,6 @@ environments:
CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
- ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
- ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
- ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
- ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
- ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
- ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
- ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
- ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
- ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
- ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
- ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
- ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
- ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
- ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
- ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
- ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
- ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
@@ -359,39 +327,6 @@ environments:
CongressAdmin: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
CongressInternal: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
- ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
- ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
- ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
- host: 'IP_ADDRESS'}
- ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
- ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
- ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
- ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
- ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
- ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
- ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
- ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
- ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
- ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
- ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
- ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
- ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
- ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
- ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
- ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
Ec2ApiAdmin: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
Ec2ApiInternal: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
diff --git a/test-requirements.txt b/test-requirements.txt
index 1b60459c..6ba8e178 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,7 +1,7 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-openstackdocstheme>=1.11.0 # Apache-2.0
+openstackdocstheme>=1.16.0 # Apache-2.0
PyYAML>=3.10.0 # MIT
Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
six>=1.9.0 # MIT
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index a096d69a..f7a45d7b 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -31,12 +31,13 @@ envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
'tls-endpoints-public-ip.yaml',
'tls-everywhere-endpoints-dns.yaml']
ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
-OPTIONAL_SECTIONS = ['service_workflow_tasks']
+OPTIONAL_SECTIONS = ['workflow_tasks']
REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
'config_settings', 'step_config']
OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
- 'service_config_settings', 'host_prep_tasks',
- 'metadata_settings', 'kolla_config']
+ 'post_upgrade_tasks', 'service_config_settings',
+ 'host_prep_tasks', 'metadata_settings',
+ 'kolla_config']
REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
'config_image']
OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags', 'volumes' ]
@@ -87,6 +88,8 @@ PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
'OVNSouthboundServerPort': ['description'],
'ExternalInterfaceDefaultRoute':
['description', 'default'],
+ 'ManagementInterfaceDefaultRoute':
+ ['description', 'default'],
'IPPool': ['description'],
'SSLCertificate': ['description',
'default',
@@ -105,7 +108,7 @@ PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
'ControllerExtraConfig': ['description'],
'NovaComputeExtraConfig': ['description'],
'controllerExtraConfig': ['description'],
- 'DockerSwiftConfigImage': ['default'],
+ 'DockerSwiftConfigImage': ['default']
}
PREFERRED_CAMEL_CASE = {
diff --git a/tripleo_heat_templates/environment_generator.py b/tripleo_heat_templates/environment_generator.py
index f1469390..e13690dd 100755
--- a/tripleo_heat_templates/environment_generator.py
+++ b/tripleo_heat_templates/environment_generator.py
@@ -159,7 +159,7 @@ def _generate_environment(input_env, parent_env=None):
for line in env_desc.splitlines():
env_file.write(u'# %s\n' % line)
- if parameter_defaults:
+ if parameter_defaults or static_defaults:
env_file.write(u'parameter_defaults:\n')
for name, value in sorted(parameter_defaults.items()):
write_sample_entry(env_file, name, value)