summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.rst6
-rw-r--r--all-nodes-validation.yaml6
-rw-r--r--bindep.txt2
-rw-r--r--capabilities-map.yaml28
-rw-r--r--ci/environments/multinode-3nodes.yaml2
-rw-r--r--ci/environments/multinode.yaml16
-rw-r--r--ci/environments/multinode_major_upgrade.yaml19
-rw-r--r--ci/environments/scenario001-multinode.yaml29
-rw-r--r--ci/environments/scenario002-multinode.yaml15
-rw-r--r--ci/environments/scenario003-multinode.yaml13
-rw-r--r--ci/environments/scenario004-multinode.yaml40
-rw-r--r--ci/pingtests/scenario004-multinode.yaml12
-rw-r--r--deployed-server/deployed-server-bootstrap-centos.sh3
-rw-r--r--deployed-server/deployed-server-bootstrap-rhel.sh3
-rw-r--r--deployed-server/deployed-server-roles-data.yaml6
-rw-r--r--docker/copy-etc.sh3
-rw-r--r--docker/create-config-dir.sh6
-rwxr-xr-xdocker/docker-puppet.py262
-rw-r--r--docker/docker-steps.j2350
-rwxr-xr-xdocker/docker-toool189
-rwxr-xr-xdocker/firstboot/setup_docker_host.sh8
-rw-r--r--docker/firstboot/setup_docker_host.yaml (renamed from docker/firstboot/install_docker_agents.yaml)13
-rwxr-xr-xdocker/firstboot/start_docker_agents.sh69
-rw-r--r--docker/post-upgrade.j2.yaml4
-rw-r--r--docker/post.j2.yaml212
-rw-r--r--docker/services/README.rst151
-rw-r--r--docker/services/aodh-api.yaml123
-rw-r--r--docker/services/aodh-evaluator.yaml84
-rw-r--r--docker/services/aodh-listener.yaml84
-rw-r--r--docker/services/aodh-notifier.yaml84
-rw-r--r--docker/services/database/mongodb.yaml115
-rw-r--r--docker/services/database/mysql.yaml150
-rw-r--r--docker/services/glance-api.yaml102
-rw-r--r--docker/services/gnocchi-api.yaml118
-rw-r--r--docker/services/gnocchi-metricd.yaml78
-rw-r--r--docker/services/gnocchi-statsd.yaml78
-rw-r--r--docker/services/heat-api-cfn.yaml96
-rw-r--r--docker/services/heat-api.yaml96
-rw-r--r--docker/services/heat-engine.yaml98
-rw-r--r--docker/services/ironic-api.yaml105
-rw-r--r--docker/services/ironic-conductor.yaml145
-rw-r--r--docker/services/ironic-pxe.yaml137
-rw-r--r--docker/services/keystone.yaml172
-rw-r--r--docker/services/memcached.yaml75
-rw-r--r--docker/services/mistral-api.yaml121
-rw-r--r--docker/services/mistral-engine.yaml94
-rw-r--r--docker/services/mistral-executor.yaml97
-rw-r--r--docker/services/neutron-api.yaml111
-rw-r--r--docker/services/neutron-dhcp.yaml99
-rw-r--r--docker/services/neutron-l3.yaml90
-rw-r--r--docker/services/neutron-ovs-agent.yaml46
-rw-r--r--docker/services/neutron-plugin-ml2.yaml60
-rw-r--r--docker/services/nova-api.yaml151
-rw-r--r--docker/services/nova-compute.yaml56
-rw-r--r--docker/services/nova-conductor.yaml91
-rw-r--r--docker/services/nova-ironic.yaml94
-rw-r--r--docker/services/nova-libvirt.yaml66
-rw-r--r--docker/services/nova-metadata.yaml50
-rw-r--r--docker/services/nova-placement.yaml110
-rw-r--r--docker/services/nova-scheduler.yaml90
-rw-r--r--docker/services/panko-api.yaml119
-rw-r--r--docker/services/rabbitmq.yaml130
-rw-r--r--docker/services/services.yaml24
-rw-r--r--docker/services/swift-proxy.yaml87
-rw-r--r--docker/services/swift-ringbuilder.yaml82
-rw-r--r--docker/services/swift-storage.yaml334
-rw-r--r--docker/services/zaqar.yaml106
-rw-r--r--environments/cadf.yaml2
-rw-r--r--environments/contrail/roles_data_contrail.yaml9
-rw-r--r--environments/deployed-server-pacemaker-environment.yaml4
-rw-r--r--environments/docker.yaml57
-rw-r--r--environments/enable-internal-tls.yaml5
-rw-r--r--environments/hyperconverged-ceph.yaml6
-rw-r--r--environments/low-memory-usage.yaml4
-rw-r--r--environments/major-upgrade-composable-steps-docker.yaml10
-rw-r--r--environments/major-upgrade-composable-steps.yaml11
-rw-r--r--environments/major-upgrade-converge-docker.yaml7
-rw-r--r--environments/major-upgrade-converge.yaml1
-rw-r--r--environments/net-bond-with-vlans-no-external.yaml4
-rw-r--r--environments/net-bond-with-vlans-v6.yaml6
-rw-r--r--environments/net-bond-with-vlans.yaml6
-rw-r--r--environments/net-single-nic-linux-bridge-with-vlans.yaml6
-rw-r--r--environments/net-single-nic-with-vlans-no-external.yaml4
-rw-r--r--environments/net-single-nic-with-vlans-v6.yaml6
-rw-r--r--environments/net-single-nic-with-vlans.yaml6
-rw-r--r--environments/network-environment.yaml2
-rw-r--r--environments/neutron-bgpvpn.yaml16
-rw-r--r--environments/neutron-opendaylight.yaml2
-rw-r--r--environments/services-docker/ironic.yaml5
-rw-r--r--environments/services-docker/mistral.yaml4
-rw-r--r--environments/services-docker/zaqar.yaml2
-rw-r--r--environments/services/panko.yaml2
-rw-r--r--environments/services/vpp.yaml9
-rw-r--r--environments/undercloud.yaml1
-rw-r--r--extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml4
-rw-r--r--extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml16
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration130
-rw-r--r--extraconfig/tasks/aodh_data_migration.sh19
-rw-r--r--extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml62
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh109
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh36
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh176
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_3.sh68
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_4.sh17
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_5.sh8
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_6.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml179
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh200
-rw-r--r--extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml25
-rw-r--r--extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp103
-rwxr-xr-xextraconfig/tasks/run_puppet.sh27
-rw-r--r--extraconfig/tasks/swift-ring-deploy.yaml31
-rw-r--r--extraconfig/tasks/swift-ring-update.yaml42
-rw-r--r--extraconfig/tasks/tripleo_upgrade_node.sh33
-rwxr-xr-xextraconfig/tasks/yum_update.sh19
-rw-r--r--firstboot/os-net-config-mappings.yaml51
-rw-r--r--network/ports/net_ip_list_map.yaml65
-rw-r--r--network/ports/net_ip_map.yaml155
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml19
-rw-r--r--overcloud.j2.yaml56
-rw-r--r--plan-environment.yaml5
-rw-r--r--puppet/all-nodes-config.yaml44
-rw-r--r--puppet/blockstorage-role.yaml10
-rw-r--r--puppet/cephstorage-role.yaml10
-rw-r--r--puppet/compute-role.yaml10
-rw-r--r--puppet/controller-role.yaml10
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml65
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml47
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml13
-rw-r--r--puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml29
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml55
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml23
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml81
-rw-r--r--puppet/major_upgrade_steps.j2.yaml1
-rw-r--r--puppet/objectstorage-role.yaml10
-rw-r--r--puppet/puppet-steps.j236
-rw-r--r--puppet/role.role.j2.yaml11
-rw-r--r--puppet/services/README.rst42
-rw-r--r--puppet/services/aodh-api.yaml2
-rw-r--r--puppet/services/aodh-base.yaml7
-rw-r--r--puppet/services/aodh-evaluator.yaml9
-rw-r--r--puppet/services/aodh-listener.yaml9
-rw-r--r--puppet/services/aodh-notifier.yaml9
-rw-r--r--puppet/services/apache-internal-tls-certmonger.yaml6
-rw-r--r--puppet/services/apache.yaml6
-rw-r--r--puppet/services/auditd.yaml16
-rw-r--r--puppet/services/barbican-api.yaml19
-rw-r--r--puppet/services/ceilometer-agent-central.yaml12
-rw-r--r--puppet/services/ceilometer-agent-compute.yaml12
-rw-r--r--puppet/services/ceilometer-agent-notification.yaml9
-rw-r--r--puppet/services/ceilometer-api.yaml2
-rw-r--r--puppet/services/ceilometer-base.yaml9
-rw-r--r--puppet/services/ceilometer-collector.yaml9
-rw-r--r--puppet/services/ceph-mon.yaml43
-rw-r--r--puppet/services/ceph-osd.yaml50
-rw-r--r--puppet/services/ceph-rgw.yaml13
-rw-r--r--puppet/services/certmonger-user.yaml28
-rw-r--r--puppet/services/cinder-api.yaml24
-rw-r--r--puppet/services/cinder-backend-scaleio.yaml2
-rw-r--r--puppet/services/cinder-base.yaml3
-rw-r--r--puppet/services/cinder-scheduler.yaml9
-rw-r--r--puppet/services/cinder-volume.yaml15
-rw-r--r--puppet/services/congress.yaml39
-rw-r--r--puppet/services/database/mysql-client.yaml34
-rw-r--r--puppet/services/database/mysql.yaml6
-rw-r--r--puppet/services/database/redis-base.yaml1
-rw-r--r--puppet/services/disabled/glance-registry.yaml2
-rw-r--r--puppet/services/docker.yaml43
-rw-r--r--puppet/services/ec2-api.yaml28
-rw-r--r--puppet/services/etcd.yaml19
-rw-r--r--puppet/services/glance-api.yaml126
-rw-r--r--puppet/services/glance-base.yaml126
-rw-r--r--puppet/services/gnocchi-api.yaml10
-rw-r--r--puppet/services/gnocchi-base.yaml10
-rw-r--r--puppet/services/gnocchi-metricd.yaml9
-rw-r--r--puppet/services/gnocchi-statsd.yaml9
-rw-r--r--puppet/services/haproxy.yaml10
-rw-r--r--puppet/services/heat-api-cfn.yaml64
-rw-r--r--puppet/services/heat-api-cloudwatch.yaml66
-rw-r--r--puppet/services/heat-api.yaml64
-rw-r--r--puppet/services/heat-base.yaml9
-rw-r--r--puppet/services/heat-engine.yaml14
-rw-r--r--puppet/services/horizon.yaml19
-rw-r--r--puppet/services/ironic-api.yaml6
-rw-r--r--puppet/services/ironic-base.yaml3
-rw-r--r--puppet/services/ironic-conductor.yaml45
-rw-r--r--puppet/services/kernel.yaml1
-rw-r--r--puppet/services/keystone.yaml10
-rw-r--r--puppet/services/logging/fluentd-client.yaml13
-rw-r--r--puppet/services/manila-api.yaml4
-rw-r--r--puppet/services/manila-base.yaml3
-rw-r--r--puppet/services/metrics/collectd.yaml13
-rw-r--r--puppet/services/mistral-api.yaml19
-rw-r--r--puppet/services/mistral-base.yaml3
-rw-r--r--puppet/services/mistral-engine.yaml19
-rw-r--r--puppet/services/mistral-executor.yaml19
-rw-r--r--puppet/services/monitoring/sensu-base.yaml15
-rw-r--r--puppet/services/monitoring/sensu-client.yaml13
-rw-r--r--puppet/services/neutron-api.yaml27
-rw-r--r--puppet/services/neutron-base.yaml39
-rw-r--r--puppet/services/neutron-bgpvpn-api.yaml34
-rw-r--r--puppet/services/neutron-dhcp.yaml9
-rw-r--r--puppet/services/neutron-l3-compute-dvr.yaml25
-rw-r--r--puppet/services/neutron-l3.yaml22
-rw-r--r--puppet/services/neutron-metadata.yaml9
-rw-r--r--puppet/services/neutron-ovs-agent.yaml9
-rw-r--r--puppet/services/neutron-ovs-dpdk-agent.yaml5
-rw-r--r--puppet/services/neutron-plugin-plumgrid.yaml3
-rw-r--r--puppet/services/nova-api.yaml7
-rw-r--r--puppet/services/nova-base.yaml3
-rw-r--r--puppet/services/nova-compute.yaml2
-rw-r--r--puppet/services/nova-conductor.yaml7
-rw-r--r--puppet/services/nova-consoleauth.yaml2
-rw-r--r--puppet/services/nova-ironic.yaml2
-rw-r--r--puppet/services/nova-placement.yaml11
-rw-r--r--puppet/services/nova-scheduler.yaml5
-rw-r--r--puppet/services/nova-vnc-proxy.yaml2
-rw-r--r--puppet/services/octavia-api.yaml3
-rw-r--r--puppet/services/octavia-base.yaml6
-rw-r--r--puppet/services/opendaylight-api.yaml31
-rw-r--r--puppet/services/opendaylight-ovs.yaml20
-rw-r--r--puppet/services/pacemaker.yaml8
-rw-r--r--puppet/services/pacemaker/rabbitmq.yaml31
-rw-r--r--puppet/services/panko-api.yaml19
-rw-r--r--puppet/services/panko-base.yaml7
-rw-r--r--puppet/services/rabbitmq-internal-tls-certmonger.yaml47
-rw-r--r--puppet/services/rabbitmq.yaml116
-rw-r--r--puppet/services/sahara-api.yaml2
-rw-r--r--puppet/services/sahara-base.yaml11
-rw-r--r--puppet/services/sahara-engine.yaml5
-rw-r--r--puppet/services/services.yaml8
-rw-r--r--puppet/services/snmp.yaml2
-rw-r--r--puppet/services/sshd.yaml2
-rw-r--r--puppet/services/swift-proxy.yaml7
-rw-r--r--puppet/services/swift-storage.yaml2
-rw-r--r--puppet/services/tacker.yaml37
-rw-r--r--puppet/services/vpp.yaml57
-rw-r--r--puppet/services/zaqar.yaml20
-rw-r--r--releasenotes/notes/6.0.0-b52a14a71fc62788.yaml2
-rw-r--r--releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml6
-rw-r--r--releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml3
-rw-r--r--releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml5
-rw-r--r--releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml6
-rw-r--r--releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml8
-rw-r--r--releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml10
-rw-r--r--releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml6
-rw-r--r--releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml6
-rw-r--r--releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml5
-rw-r--r--releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml4
-rw-r--r--releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml4
-rw-r--r--releasenotes/notes/ha-by-default-55326e699ee8602c.yaml5
-rw-r--r--releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml6
-rw-r--r--releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml11
-rw-r--r--releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml4
-rw-r--r--releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml9
-rw-r--r--releasenotes/notes/vpp-84d35e51ff62a58c.yaml6
-rw-r--r--roles_data.yaml18
-rw-r--r--roles_data_undercloud.yaml9
-rwxr-xr-xtools/yaml-validate.py88
-rw-r--r--validation-scripts/all-nodes.sh18
260 files changed, 7935 insertions, 2264 deletions
diff --git a/README.rst b/README.rst
index b0b7ceb0..e2b59c59 100644
--- a/README.rst
+++ b/README.rst
@@ -66,7 +66,7 @@ and should be executed according to the following table:
+================+=============+=============+=============+=============+=================+
| keystone | X | X | X | X | X |
+----------------+-------------+-------------+-------------+-------------+-----------------+
-| glance | file | swift | file | file | swift |
+| glance | rbd | swift | file | swift + rbd | swift |
+----------------+-------------+-------------+-------------+-------------+-----------------+
| cinder | rbd | iscsi | | | iscsi |
+----------------+-------------+-------------+-------------+-------------+-----------------+
@@ -124,3 +124,7 @@ and should be executed according to the following table:
+----------------+-------------+-------------+-------------+-------------+-----------------+
| congress | X | | | | |
+----------------+-------------+-------------+-------------+-------------+-----------------+
+| cephmds | | | | X | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| manila | | | | X | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
diff --git a/all-nodes-validation.yaml b/all-nodes-validation.yaml
index 65d01d0f..eea3e40a 100644
--- a/all-nodes-validation.yaml
+++ b/all-nodes-validation.yaml
@@ -10,6 +10,10 @@ parameters:
default: ''
description: A string containing a space separated list of IP addresses used to ping test each available network interface.
type: string
+ ValidateFqdn:
+ default: false
+ description: Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts.
+ type: boolean
resources:
AllNodesValidationsImpl:
@@ -19,6 +23,8 @@ resources:
inputs:
- name: ping_test_ips
default: {get_param: PingTestIps}
+ - name: validate_fqdn
+ default: {get_param: ValidateFqdn}
config: {get_file: ./validation-scripts/all-nodes.sh}
outputs:
diff --git a/bindep.txt b/bindep.txt
new file mode 100644
index 00000000..4f9b4254
--- /dev/null
+++ b/bindep.txt
@@ -0,0 +1,2 @@
+# This is a cross-platform list tracking distribution packages needed by tests;
+# see http://docs.openstack.org/infra/bindep/ for additional information.
diff --git a/capabilities-map.yaml b/capabilities-map.yaml
index cc22ff92..83b3ac40 100644
--- a/capabilities-map.yaml
+++ b/capabilities-map.yaml
@@ -308,6 +308,11 @@ topics:
description: >
Enable various Neutron plugins and backends
environments:
+ - file: environments/neutron-bgpvpn.yaml
+ title: Neutron BGPVPN Service Plugin
+ description: Enables Neutron BGPVPN Service Plugin
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- file: environments/neutron-ml2-bigswitch.yaml
title: BigSwitch Extensions
description: >
@@ -335,21 +340,11 @@ topics:
description: Enables Neutron Nuage backend on the controller
requires:
- overcloud-resource-registry-puppet.yaml
- - file: environments/neutron-opencontrail.yaml
- title: OpenContrail Extensions
- description: Enables OpenContrail extensions
- requires:
- - overcloud-resource-registry-puppet.yaml
- file: environments/neutron-opendaylight.yaml
title: OpenDaylight
description: Enables OpenDaylight
requires:
- overcloud-resource-registry-puppet.yaml
- - file: environments/neutron-opendaylight-l3.yaml
- title: OpenDaylight with L3 DVR
- description: Enables OpenDaylight with L3 DVR
- requires:
- - overcloud-resource-registry-puppet.yaml
- file: environments/neutron-ovs-dpdk.yaml
title: DPDK with OVS
description: Deploy DPDK with OVS
@@ -544,14 +539,6 @@ topics:
description:
requires:
- overcloud-resource-registry-puppet.yaml
- - title: Manage Firewall
- description:
- environments:
- - file: environments/manage-firewall.yaml
- title: Manage Firewall
- description:
- requires:
- - overcloud-resource-registry-puppet.yaml
- title: Operational Tools
description:
@@ -600,3 +587,8 @@ topics:
description:
requires:
- overcloud-resource-registry-puppet.yaml
+ - title: Keystone CADF auditing
+ description: Enable CADF notifications in Keystone for auditing
+ environments:
+ - file: environments/cadf.yaml
+ title: Keystone CADF auditing
diff --git a/ci/environments/multinode-3nodes.yaml b/ci/environments/multinode-3nodes.yaml
index f35a0804..d6e2376a 100644
--- a/ci/environments/multinode-3nodes.yaml
+++ b/ci/environments/multinode-3nodes.yaml
@@ -55,6 +55,7 @@
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::MySQLClient
- name: Controller
CountDefault: 1
@@ -65,6 +66,7 @@
- OS::TripleO::Services::Core
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
diff --git a/ci/environments/multinode.yaml b/ci/environments/multinode.yaml
index 212f6a23..c946ec8a 100644
--- a/ci/environments/multinode.yaml
+++ b/ci/environments/multinode.yaml
@@ -1,9 +1,20 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml
+ OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
+ OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
+ OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
+ OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
@@ -15,6 +26,7 @@ parameter_defaults:
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -44,5 +56,9 @@ parameter_defaults:
nova::compute::libvirt::libvirt_virt_type: qemu
# Required for Centos 7.3 and Qemu 2.6.0
nova::compute::libvirt::libvirt_cpu_mode: 'none'
+ #NOTE(gfidente): not great but we need this to deploy on ext4
+ #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+ ceph::profile::params::osd_max_object_name_len: 256
+ ceph::profile::params::osd_max_object_namespace_len: 64
SwiftCeilometerPipelineEnabled: False
Debug: True
diff --git a/ci/environments/multinode_major_upgrade.yaml b/ci/environments/multinode_major_upgrade.yaml
index 4859c23a..2251cc0c 100644
--- a/ci/environments/multinode_major_upgrade.yaml
+++ b/ci/environments/multinode_major_upgrade.yaml
@@ -1,6 +1,15 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml
+ OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
+ OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
+ OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
+ OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
ControllerServices:
@@ -28,6 +37,7 @@ parameter_defaults:
- OS::TripleO::Services::SaharaApi
- OS::TripleO::Services::SaharaEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
@@ -36,6 +46,15 @@ parameter_defaults:
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::Horizon
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml
index 72e25704..2203665a 100644
--- a/ci/environments/scenario001-multinode.yaml
+++ b/ci/environments/scenario001-multinode.yaml
@@ -1,13 +1,23 @@
resource_registry:
- OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
- OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
- OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml
- OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml
- OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml
- OS::TripleO::Services::PankoApi: /usr/share/openstack-tripleo-heat-templates/puppet/services/panko-api.yaml
- OS::TripleO::Services::Collectd: /usr/share/openstack-tripleo-heat-templates/puppet/services/metrics/collectd.yaml
- OS::TripleO::Services::Tacker: /usr/share/openstack-tripleo-heat-templates/puppet/services/tacker.yaml
- OS::TripleO::Services::Congress: /usr/share/openstack-tripleo-heat-templates/puppet/services/congress.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
+ OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
+ OS::TripleO::Services::CephClient: ../../puppet/services/ceph-client.yaml
+ OS::TripleO::Services::Collectd: ../../puppet/services/metrics/collectd.yaml
+ OS::TripleO::Services::Tacker: ../../puppet/services/tacker.yaml
+ OS::TripleO::Services::Congress: ../../puppet/services/congress.yaml
+ OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml
+ OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
+ OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
+ OS::TripleO::Services::Redis: ../../puppet/services/pacemaker/database/redis.yaml
+ OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
+ OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
+ OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
ControllerServices:
@@ -19,6 +29,7 @@ parameter_defaults:
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
diff --git a/ci/environments/scenario002-multinode.yaml b/ci/environments/scenario002-multinode.yaml
index bf4721e2..cbcfa9b3 100644
--- a/ci/environments/scenario002-multinode.yaml
+++ b/ci/environments/scenario002-multinode.yaml
@@ -1,9 +1,19 @@
resource_registry:
- OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
- OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml
OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml
OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml
+ OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml
+ OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
+ OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
+ OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
+ OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
+ OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
ControllerServices:
@@ -15,6 +25,7 @@ parameter_defaults:
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
diff --git a/ci/environments/scenario003-multinode.yaml b/ci/environments/scenario003-multinode.yaml
index 9167010c..6e926f74 100644
--- a/ci/environments/scenario003-multinode.yaml
+++ b/ci/environments/scenario003-multinode.yaml
@@ -1,11 +1,19 @@
resource_registry:
- OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
- OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Services::SaharaApi: ../../puppet/services/sahara-api.yaml
OS::TripleO::Services::SaharaEngine: ../../puppet/services/sahara-engine.yaml
OS::TripleO::Services::MistralApi: ../../puppet/services/mistral-api.yaml
OS::TripleO::Services::MistralEngine: ../../puppet/services/mistral-engine.yaml
OS::TripleO::Services::MistralExecutor: ../../puppet/services/mistral-executor.yaml
+ OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml
+ OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
+ OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
+ OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
ControllerServices:
@@ -17,6 +25,7 @@ parameter_defaults:
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml
index 87b10ca1..dc05ab4e 100644
--- a/ci/environments/scenario004-multinode.yaml
+++ b/ci/environments/scenario004-multinode.yaml
@@ -1,16 +1,35 @@
resource_registry:
- OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
- OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
- OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml
- OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml
- OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml
- OS::TripleO::Services::CephRgw: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-rgw.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml
+ OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
+ OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
+ OS::TripleO::Services::CephRgw: ../../puppet/services/ceph-rgw.yaml
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
+ OS::TripleO::Services::ManilaApi: ../../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaShare: ../../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
+ # These enable Pacemaker
+ OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
+ OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml
+ OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
+ OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
+ OS::TripleO::Services::Redis: ../../puppet/services/pacemaker/database/redis.yaml
+ OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::CephMds
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::CephRgw
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
@@ -19,6 +38,7 @@ parameter_defaults:
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -28,6 +48,10 @@ parameter_defaults:
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::ManilaApi
+ - OS::TripleO::Services::ManilaScheduler
+ - OS::TripleO::Services::ManilaBackendCephFs
+ - OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
@@ -40,10 +64,6 @@ parameter_defaults:
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- - OS::TripleO::Services::CephMon
- - OS::TripleO::Services::CephOSD
- - OS::TripleO::Services::CephClient
- - OS::TripleO::Services::CephRgw
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
ControllerExtraConfig:
diff --git a/ci/pingtests/scenario004-multinode.yaml b/ci/pingtests/scenario004-multinode.yaml
index a188fd1c..ebdfea14 100644
--- a/ci/pingtests/scenario004-multinode.yaml
+++ b/ci/pingtests/scenario004-multinode.yaml
@@ -118,6 +118,18 @@ resources:
ram: 512
vcpus: 1
+ manila_share_type:
+ type: OS::Manila::ShareType
+ properties:
+ name: default
+ driver_handles_share_servers: false
+
+ manila_share:
+ type: OS::Manila::Share
+ properties:
+ share_protocol: CEPHFS
+ size: 1
+
outputs:
server1_private_ip:
description: IP address of server1 in private network
diff --git a/deployed-server/deployed-server-bootstrap-centos.sh b/deployed-server/deployed-server-bootstrap-centos.sh
index 7266ca57..c86e771c 100644
--- a/deployed-server/deployed-server-bootstrap-centos.sh
+++ b/deployed-server/deployed-server-bootstrap-centos.sh
@@ -8,7 +8,8 @@ yum install -y \
openstack-puppet-modules \
os-net-config \
openvswitch \
- python-heat-agent*
+ python-heat-agent* \
+ openstack-selinux
ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
diff --git a/deployed-server/deployed-server-bootstrap-rhel.sh b/deployed-server/deployed-server-bootstrap-rhel.sh
index 36ff0077..10b4999b 100644
--- a/deployed-server/deployed-server-bootstrap-rhel.sh
+++ b/deployed-server/deployed-server-bootstrap-rhel.sh
@@ -8,6 +8,7 @@ yum install -y \
openstack-puppet-modules \
os-net-config \
openvswitch \
- python-heat-agent*
+ python-heat-agent* \
+ openstack-selinux
ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
diff --git a/deployed-server/deployed-server-roles-data.yaml b/deployed-server/deployed-server-roles-data.yaml
index 9795a00f..084c2f8f 100644
--- a/deployed-server/deployed-server-roles-data.yaml
+++ b/deployed-server/deployed-server-roles-data.yaml
@@ -26,6 +26,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::CephRgw
@@ -41,6 +42,7 @@
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -108,6 +110,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
@@ -132,6 +135,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -146,6 +150,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
@@ -161,6 +166,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
diff --git a/docker/copy-etc.sh b/docker/copy-etc.sh
deleted file mode 100644
index 1a6cd520..00000000
--- a/docker/copy-etc.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-echo "Copying agent container /etc to /var/lib/etc-data"
-cp -a /etc/* /var/lib/etc-data/
diff --git a/docker/create-config-dir.sh b/docker/create-config-dir.sh
new file mode 100644
index 00000000..1be1a56f
--- /dev/null
+++ b/docker/create-config-dir.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+# This is where we stack puppet configuration (for now)...
+mkdir -p /var/lib/config-data
+
+# This is the docker-puppet configs end in
+mkdir -p /var/lib/docker-puppet
diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py
new file mode 100755
index 00000000..8f95208f
--- /dev/null
+++ b/docker/docker-puppet.py
@@ -0,0 +1,262 @@
+#!/usr/bin/env python
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Shell script tool to run puppet inside of the given docker container image.
+# Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON
+# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
+# that can be used to generate config files or run ad-hoc puppet modules
+# inside of a container.
+
+import json
+import os
+import subprocess
+import sys
+import tempfile
+import multiprocessing
+
+
+# this is to match what we do in deployed-server
+def short_hostname():
+ subproc = subprocess.Popen(['hostname', '-s'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ return cmd_stdout.rstrip()
+
+
+def pull_image(name):
+ print('Pulling image: %s' % name)
+ subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ print(cmd_stdout)
+ print(cmd_stderr)
+
+
+def rm_container(name):
+ if os.environ.get('SHOW_DIFF', None):
+ print('Diffing container: %s' % name)
+ subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ print(cmd_stdout)
+ print(cmd_stderr)
+
+ print('Removing container: %s' % name)
+ subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ print(cmd_stdout)
+ print(cmd_stderr)
+
+process_count = int(os.environ.get('PROCESS_COUNT',
+ multiprocessing.cpu_count()))
+
+config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
+print('docker-puppet')
+print('CONFIG: %s' % config_file)
+with open(config_file) as f:
+ json_data = json.load(f)
+
+# To save time we support configuring 'shared' services at the same
+# time. For example configuring all of the heat services
+# in a single container pass makes sense and will save some time.
+# To support this we merge shared settings together here.
+#
+# We key off of config_volume as this should be the same for a
+# given group of services. We are also now specifying the container
+# in which the services should be configured. This should match
+# in all instances where the volume name is also the same.
+
+configs = {}
+
+for service in (json_data or []):
+ if service is None:
+ continue
+ if isinstance(service, dict):
+ service = [
+ service.get('config_volume'),
+ service.get('puppet_tags'),
+ service.get('step_config'),
+ service.get('config_image'),
+ service.get('volumes', []),
+ ]
+
+ config_volume = service[0] or ''
+ puppet_tags = service[1] or ''
+ manifest = service[2] or ''
+ config_image = service[3] or ''
+ volumes = service[4] if len(service) > 4 else []
+
+ if not manifest or not config_image:
+ continue
+
+ print('---------')
+ print('config_volume %s' % config_volume)
+ print('puppet_tags %s' % puppet_tags)
+ print('manifest %s' % manifest)
+ print('config_image %s' % config_image)
+ print('volumes %s' % volumes)
+ # We key off of config volume for all configs.
+ if config_volume in configs:
+ # Append puppet tags and manifest.
+ print("Existing service, appending puppet tags and manifest\n")
+ if puppet_tags:
+ configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
+ puppet_tags)
+ if manifest:
+ configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2],
+ manifest)
+ if configs[config_volume][3] != config_image:
+ print("WARNING: Config containers do not match even though"
+ " shared volumes are the same!\n")
+ else:
+ print("Adding new service\n")
+ configs[config_volume] = service
+
+print('Service compilation completed.\n')
+
+def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):
+
+ print('---------')
+ print('config_volume %s' % config_volume)
+ print('puppet_tags %s' % puppet_tags)
+ print('manifest %s' % manifest)
+ print('config_image %s' % config_image)
+ print('volumes %s' % volumes)
+ hostname = short_hostname()
+ sh_script = '/var/lib/docker-puppet/docker-puppet-%s.sh' % config_volume
+
+ with open(sh_script, 'w') as script_file:
+ os.chmod(script_file.name, 0755)
+ script_file.write("""#!/bin/bash
+ set -ex
+ mkdir -p /etc/puppet
+ cp -a /tmp/puppet-etc/* /etc/puppet
+ rm -Rf /etc/puppet/ssl # not in use and causes permission errors
+ echo '{"step": %(step)s}' > /etc/puppet/hieradata/docker.json
+ TAGS=""
+ if [ -n "%(puppet_tags)s" ]; then
+ TAGS='--tags "%(puppet_tags)s"'
+ fi
+ FACTER_hostname=%(hostname)s FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
+
+ # Disables archiving
+ if [ -z "%(no_archive)s" ]; then
+ rm -Rf /var/lib/config-data/%(name)s
+
+ # copying etc should be enough for most services
+ mkdir -p /var/lib/config-data/%(name)s/etc
+ cp -a /etc/* /var/lib/config-data/%(name)s/etc/
+
+ if [ -d /root/ ]; then
+ cp -a /root/ /var/lib/config-data/%(name)s/root/
+ fi
+ if [ -d /var/lib/ironic/tftpboot/ ]; then
+ mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/
+ cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/tftpboot/
+ fi
+ if [ -d /var/lib/ironic/httpboot/ ]; then
+ mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/
+ cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/httpboot/
+ fi
+
+ # apache services may files placed in /var/www/
+ if [ -d /var/www/ ]; then
+ mkdir -p /var/lib/config-data/%(name)s/var/www
+ cp -a /var/www/* /var/lib/config-data/%(name)s/var/www/
+ fi
+ fi
+ """ % {'puppet_tags': puppet_tags, 'name': config_volume,
+ 'hostname': hostname,
+ 'no_archive': os.environ.get('NO_ARCHIVE', ''),
+ 'step': os.environ.get('STEP', '6')})
+
+ with tempfile.NamedTemporaryFile() as tmp_man:
+ with open(tmp_man.name, 'w') as man_file:
+ man_file.write('include ::tripleo::packages\n')
+ man_file.write(manifest)
+
+ rm_container('docker-puppet-%s' % config_volume)
+ pull_image(config_image)
+
+ dcmd = ['/usr/bin/docker', 'run',
+ '--user', 'root',
+ '--name', 'docker-puppet-%s' % config_volume,
+ '--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
+ '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
+ '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
+ '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',
+ '--volume', 'tripleo_logs:/var/log/tripleo/',
+ '--volume', '%s:%s:rw' % (sh_script, sh_script) ]
+
+ for volume in volumes:
+ if volume:
+ dcmd.extend(['--volume', volume])
+
+ dcmd.extend(['--entrypoint', sh_script])
+
+ env = {}
+ # NOTE(flaper87): Always copy the DOCKER_* environment variables as
+ # they contain the access data for the docker daemon.
+ for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
+ env[k] = os.environ.get(k)
+
+ if os.environ.get('NET_HOST', 'false') == 'true':
+ print('NET_HOST enabled')
+ dcmd.extend(['--net', 'host', '--volume',
+ '/etc/hosts:/etc/hosts:ro'])
+ dcmd.append(config_image)
+
+ subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, env=env)
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ print(cmd_stdout)
+ print(cmd_stderr)
+ if subproc.returncode != 0:
+ print('Failed running docker-puppet.py for %s' % config_volume)
+ rm_container('docker-puppet-%s' % config_volume)
+ return subproc.returncode
+
+# Holds all the information for each process to consume.
+# Instead of starting them all linearly we run them using a process
+# pool. This creates a list of arguments for the above function
+# to consume.
+process_map = []
+
+for config_volume in configs:
+
+ service = configs[config_volume]
+ puppet_tags = service[1] or ''
+ manifest = service[2] or ''
+ config_image = service[3] or ''
+ volumes = service[4] if len(service) > 4 else []
+
+ if puppet_tags:
+ puppet_tags = "file,file_line,concat,%s" % puppet_tags
+ else:
+ puppet_tags = "file,file_line,concat"
+
+ process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
+
+for p in process_map:
+ print '--\n%s' % p
+
+# Fire off processes to perform each configuration. Defaults
+# to the number of CPUs on the system.
+p = multiprocessing.Pool(process_count)
+p.map(mp_puppet_config, process_map)
diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2
new file mode 100644
index 00000000..301d838f
--- /dev/null
+++ b/docker/docker-steps.j2
@@ -0,0 +1,350 @@
+# certain initialization steps (run in a container) will occur
+# on the first role listed in the roles file
+{% set primary_role_name = roles[0].name -%}
+
+heat_template_version: ocata
+
+description: >
+ Post-deploy configuration steps via puppet for all roles,
+ as defined in ../roles_data.yaml
+
+parameters:
+ servers:
+ type: json
+ description: Mapping of Role name e.g Controller to a list of servers
+ role_data:
+ type: json
+ description: Mapping of Role name e.g Controller to the per-role data
+ DeployIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ # These utility tasks use docker-puppet.py to execute tasks via puppet
+ # We only execute these on the first node in the primary role
+ {{primary_role_name}}DockerPuppetTasks:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ yaql:
+ expression:
+ dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1]))
+ data:
+ docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]}
+
+# BEGIN primary_role_name docker-puppet-tasks (run only on a single node)
+{% for step in range(1, 6) %}
+
+ {{primary_role_name}}DockerPuppetJsonConfig{{step}}:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json:
+ {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']}
+
+ {{primary_role_name}}DockerPuppetJsonDeployment{{step}}:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ server: {get_param: [servers, {{primary_role_name}}, '0']}
+ config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}}
+
+ {{primary_role_name}}DockerPuppetTasksConfig{{step}}:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: docker-puppet.py}
+ inputs:
+ - name: CONFIG
+ - name: NET_HOST
+ - name: NO_ARCHIVE
+ - name: STEP
+
+ {{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
+ type: OS::Heat::SoftwareDeployment
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step}}
+ - {{dep.name}}ContainersDeployment_Step{{step}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+ properties:
+ name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+ server: {get_param: [servers, {{primary_role_name}}, '0']}
+ config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}}
+ input_values:
+ CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
+ NET_HOST: 'true'
+ NO_ARCHIVE: 'true'
+ STEP: {{step}}
+
+{% endfor %}
+# END primary_role_name docker-puppet-tasks
+
+{% for role in roles %}
+ # Post deployment steps for all roles
+ # A single config is re-applied with an incrementing step number
+ # {{role.name}} Role steps
+ {{role.name}}ArtifactsConfig:
+ type: ../puppet/deploy-artifacts.yaml
+
+ {{role.name}}ArtifactsDeploy:
+ type: OS::Heat::StructuredDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ArtifactsConfig}
+
+ {{role.name}}PreConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PreConfig
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}CreateConfigDir:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: create-config-dir.sh}
+
+ {{role.name}}CreateConfigDirDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}CreateConfigDir}
+
+ {{role.name}}HostPrepAnsible:
+ type: OS::Heat::Value
+ properties:
+ value:
+ str_replace:
+ template: CONFIG
+ params:
+ CONFIG:
+ - hosts: localhost
+ connection: local
+ tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
+
+ {{role.name}}HostPrepConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ansible
+ options:
+ modulepath: /usr/share/ansible-modules
+ config: {get_attr: [{{role.name}}HostPrepAnsible, value]}
+
+ {{role.name}}HostPrepDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}HostPrepConfig}
+
+ # this creates a JSON config file for our docker-puppet.py script
+ {{role.name}}GenPuppetConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-puppet/docker-puppet.json:
+ {get_param: [role_data, {{role.name}}, puppet_config]}
+
+ {{role.name}}GenPuppetDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}GenPuppetConfig}
+
+ {{role.name}}GenerateConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: docker-puppet.py}
+
+ {{role.name}}GenerateConfigDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment, {{role.name}}HostPrepDeployment]
+ properties:
+ name: {{role.name}}GenerateConfigDeployment
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}GenerateConfig}
+
+ {{role.name}}PuppetStepConfig:
+ type: OS::Heat::Value
+ properties:
+ type: string
+ value:
+ yaql:
+ expression:
+ # select 'step_config' only from services that do not have a docker_config
+ $.data.service_names.zip($.data.step_config, $.data.docker_config).where($[2] = null).where($[1] != null).select($[1]).join("\n")
+ data:
+ service_names: {get_param: [role_data, {{role.name}}, service_names]}
+ step_config: {get_param: [role_data, {{role.name}}, step_config]}
+ docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
+
+ {{role.name}}DockerConfig:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ yaql:
+ expression:
+ # select 'docker_config' only from services that have it
+ $.data.service_names.zip($.data.docker_config).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {})
+ data:
+ service_names: {get_param: [role_data, {{role.name}}, service_names]}
+ docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
+
+ # Here we are dumping all the docker container startup configuration data
+ # so that we can have access to how they are started outside of heat
+ # and docker-cmd. This lets us create command line tools to start and
+ # test these containers.
+ {{role.name}}DockerConfigJsonStartupData:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-container-startup-configs.json:
+ {get_attr: [{{role.name}}DockerConfig, value]}
+
+ {{role.name}}DockerConfigJsonStartupDataDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: {{role.name}}DockerConfigJsonStartupData}
+ servers: {get_param: [servers, {{role.name}}]}
+
+ {{role.name}}KollaJsonConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ {get_param: [role_data, {{role.name}}, kolla_config]}
+
+ {{role.name}}KollaJsonDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ name: {{role.name}}KollaJsonDeployment
+ config: {get_resource: {{role.name}}KollaJsonConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+
+ # BEGIN BAREMETAL CONFIG STEPS
+
+ {% if role.name == 'Controller' %}
+ ControllerPrePuppet:
+ type: OS::TripleO::Tasks::ControllerPrePuppet
+ properties:
+ servers: {get_param: [servers, Controller]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+ {% endif %}
+
+ {{role.name}}Config:
+ type: OS::TripleO::{{role.name}}Config
+ properties:
+ StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]}
+
+ {% for step in range(1, 6) %}
+
+ {{role.name}}Deployment_Step{{step}}:
+ type: OS::Heat::StructuredDeploymentGroup
+ {% if step == 1 %}
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ {% else %}
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ - {{dep.name}}ContainersDeployment_Step{{step -1}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
+ {% endif %}
+ properties:
+ name: {{role.name}}Deployment_Step{{step}}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: {{step}}
+ update_identifier: {get_param: DeployIdentifier}
+
+ {% endfor %}
+ # END BAREMETAL CONFIG STEPS
+
+ # BEGIN CONTAINER CONFIG STEPS
+ {% for step in range(1, 6) %}
+
+ {{role.name}}ContainersConfig_Step{{step}}:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: docker-cmd
+ config:
+ {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]}
+
+ {{role.name}}ContainersDeployment_Step{{step}}:
+ type: OS::Heat::StructuredDeploymentGroup
+ {% if step == 1 %}
+ depends_on:
+ - {{role.name}}PreConfig
+ - {{role.name}}KollaJsonDeployment
+ - {{role.name}}GenPuppetDeployment
+ - {{role.name}}GenerateConfigDeployment
+ {% else %}
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}ContainersDeployment_Step{{step -1}}
+ - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
+ {% endif %}
+ properties:
+ name: {{role.name}}ContainersDeployment_Step{{step}}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}}
+
+ {% endfor %}
+ # END CONTAINER CONFIG STEPS
+
+ {{role.name}}PostConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PostConfig
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ - {{primary_role_name}}DockerPuppetTasksDeployment5
+ {% endfor %}
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ # Note, this should come last, so use depends_on to ensure
+ # this is created after any other resources.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}PostConfig
+ {% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+
+ {% if role.name == 'Controller' %}
+ ControllerPostPuppet:
+ depends_on:
+ - ControllerExtraConfigPost
+ type: OS::TripleO::Tasks::ControllerPostPuppet
+ properties:
+ servers: {get_param: [servers, Controller]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+ {% endif %}
+
+{% endfor %}
diff --git a/docker/docker-toool b/docker/docker-toool
new file mode 100755
index 00000000..36aba4a7
--- /dev/null
+++ b/docker/docker-toool
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import os
+import shutil
+import sys
+import json
+
+docker_cmd = '/bin/docker'
+
+# Tool to start docker containers as configured via
+# tripleo-heat-templates.
+#
+# This tool reads data from a json file generated from heat when the
+# TripleO stack is run. All the configuration data used to start the
+# containerized services is in this file.
+#
+# By default this tool lists all the containers that are started and
+# their start order.
+#
+# If you wish to see the command line used to start a given container,
+# specify it by name using the --container argument. --run can then be
+# used with this to actually execute docker to run the container.\n
+#
+# Other options listed allow you to modify this command line for
+# debugging purposes. For example:
+#
+# docker-toool -c swift-proxy -r -e /bin/bash -u root -i -n test
+#
+# will run the swift proxy container as user root, executing /bin/bash,
+#
+# named 'test', and will run interactively (eg -ti).
+
+
+def parse_opts(argv):
+ parser = argparse.ArgumentParser("Tool to start docker containers via "
+ "TripleO configurations")
+ parser.add_argument('-f', '--config',
+ help="""File to use as docker startup configuration data.""",
+ default='/var/lib/docker-container-startup-configs.json')
+ parser.add_argument('-r', '--run',
+ action='store_true',
+ help="""Run the container as specified with --container.""",
+ default=False)
+ parser.add_argument('-e', '--command',
+ help="""Override the command used to run the container.""",
+ default='')
+ parser.add_argument('-c', '--container',
+ help="""Specify a container to run or show the command for.""",
+ default='')
+ parser.add_argument('-u', '--user',
+ help="""User to run container as.""",
+ default='')
+ parser.add_argument('-n', '--name',
+ help="""Name of container.""",
+ default='')
+ parser.add_argument('-i', '--interactive',
+ action='store_true',
+ help="""Start docker container interactively (-ti).""",
+ default=False)
+ opts = parser.parse_args(argv[1:])
+
+ return opts
+
+def docker_arg_map(key, value):
+ value = str(value).encode('ascii', 'ignore')
+ return {
+ 'environment': "--env=%s" % value,
+ # 'image': value,
+ 'net': "--net=%s" % value,
+ 'pid': "--pid=%s" % value,
+ 'privileged': "--privileged=%s" % value.lower(),
+ #'restart': "--restart=%s" % "false",
+ 'user': "--user=%s" % value,
+ 'volumes': "--volume=%s" % value,
+ 'volumes_from': "--volumes-from=%s" % value,
+ }.get(key, None)
+
+def run_docker_container(opts, container_name):
+ container_found = False
+
+ with open(opts.config) as f:
+ json_data = json.load(f)
+
+ for step in (json_data or []):
+ if step is None:
+ continue
+ for container in (json_data[step] or []):
+ if container == container_name:
+ print('container found: %s' % container)
+ container_found = True
+ # A few positional arguments:
+ command = ''
+ image = ''
+
+ cmd = [
+ docker_cmd,
+ 'run',
+ '--name',
+ opts.name or container
+ ]
+ for container_data in (json_data[step][container] or []):
+ if container_data == "environment":
+ for env in (json_data[step][container][container_data] or []):
+ arg = docker_arg_map("environment", env)
+ if arg:
+ cmd.append(arg)
+ elif container_data == "volumes":
+ for volume in (json_data[step][container][container_data] or []):
+ arg = docker_arg_map("volumes", volume)
+ if arg:
+ cmd.append(arg)
+ elif container_data == "volumes_from":
+ for volume in (json_data[step][container][container_data] or []):
+ arg = docker_arg_map("volumes_from", volume)
+ if arg:
+ cmd.append(arg)
+ elif container_data == 'command':
+ command = json_data[step][container][container_data]
+ elif container_data == 'image':
+ image = json_data[step][container][container_data]
+ else:
+ # Only add a restart if we're not interactive
+ if container_data == 'restart':
+ if opts.interactive:
+ continue
+ if container_data == 'user':
+ if opts.user:
+ continue
+ arg = docker_arg_map(container_data,
+ json_data[step][container][container_data])
+ if arg:
+ cmd.append(arg)
+
+ if opts.user:
+ cmd.append('--user')
+ cmd.append(opts.user)
+ if opts.interactive:
+ cmd.append('-ti')
+ # May as well remove it when we're done too
+ cmd.append('--rm')
+ cmd.append(image)
+ if opts.command:
+ cmd.append(opts.command)
+ elif command:
+ cmd.extend(command)
+
+ print ' '.join(cmd)
+
+ if opts.run:
+ os.execl(docker_cmd, *cmd)
+
+ if not container_found:
+ print("Container '%s' not found!" % container_name)
+
+def list_docker_containers(opts):
+ print opts
+ with open(opts.config) as f:
+ json_data = json.load(f)
+
+ for step in (json_data or []):
+ if step is None:
+ continue
+ print step
+ for container in (json_data[step] or []):
+ print('\tcontainer: %s' % container)
+ for container_data in (json_data[step][container] or []):
+ #print('\t\tcontainer_data: %s' % container_data)
+ if container_data == "start_order":
+ print('\t\tstart_order: %s' % json_data[step][container][container_data])
+
+opts = parse_opts(sys.argv)
+
+if opts.container:
+ run_docker_container(opts, opts.container)
+else:
+ list_docker_containers(opts)
+
diff --git a/docker/firstboot/setup_docker_host.sh b/docker/firstboot/setup_docker_host.sh
new file mode 100755
index 00000000..8b4c6a03
--- /dev/null
+++ b/docker/firstboot/setup_docker_host.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -eux
+# This file contains setup steps that can't be or have not yet been moved to
+# puppet
+
+# Disable libvirtd since it conflicts with nova_libvirt container
+/usr/bin/systemctl disable libvirtd.service
+/usr/bin/systemctl stop libvirtd.service
diff --git a/docker/firstboot/install_docker_agents.yaml b/docker/firstboot/setup_docker_host.yaml
index 41a87406..2f258987 100644
--- a/docker/firstboot/install_docker_agents.yaml
+++ b/docker/firstboot/setup_docker_host.yaml
@@ -1,9 +1,6 @@
heat_template_version: ocata
parameters:
- DockerAgentImage:
- type: string
- default: heat-docker-agents
DockerNamespace:
type: string
default: tripleoupstream
@@ -17,22 +14,18 @@ resources:
type: OS::Heat::MultipartMime
properties:
parts:
- - config: {get_resource: install_docker_agents}
+ - config: {get_resource: setup_docker_host}
- install_docker_agents:
+ setup_docker_host:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
params:
- $agent_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerAgentImage} ]
$docker_registry: {get_param: DockerNamespace}
$docker_namespace_is_registry: {get_param: DockerNamespaceIsRegistry}
- template: {get_file: ./start_docker_agents.sh}
+ template: {get_file: ./setup_docker_host.sh}
outputs:
OS::stack_id:
diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh
deleted file mode 100755
index 1c5cc18d..00000000
--- a/docker/firstboot/start_docker_agents.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/bash
-set -eux
-
-# TODO remove this when built image includes docker
-if [ ! -f "/usr/bin/docker" ]; then
- yum -y install docker
-fi
-
-# Local docker registry 1.8
-# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is
-# a place holder for text replacement done via heat
-if [ "$docker_namespace_is_registry" = "True" ]; then
- /usr/bin/systemctl stop docker.service
- # if namespace is used with local registry, trim all namespacing
- trim_var=$docker_registry
- registry_host="${trim_var%%/*}"
- /bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker
-fi
-
-mkdir -p /var/lib/etc-data/json-config #FIXME: this should be a docker data container
-
-# NOTE(flaper87): Heat Agent required mounts
-AGENT_COMMAND_MOUNTS="\
--v /var/lib/etc-data:/var/lib/etc-data \
--v /run:/run \
--v /etc/hosts:/etc/hosts \
--v /etc:/host/etc \
--v /var/lib/dhclient:/var/lib/dhclient \
--v /var/lib/cloud:/var/lib/cloud \
--v /var/lib/heat-cfntools:/var/lib/heat-cfntools \
--v /var/lib/os-collect-config:/var/lib/os-collect-config \
--v /var/lib/os-apply-config-deployments:/var/lib/os-apply-config-deployments \
--v /var/lib/heat-config:/var/lib/heat-config \
--v /etc/sysconfig/docker:/etc/sysconfig/docker \
--v /etc/sysconfig/network-scripts:/etc/sysconfig/network-scripts \
--v /usr/lib64/libseccomp.so.2:/usr/lib64/libseccomp.so.2 \
--v /usr/bin/docker:/usr/bin/docker \
--v /usr/bin/docker-current:/usr/bin/docker-current \
--v /var/lib/os-collect-config:/var/lib/os-collect-config"
-
-# heat-docker-agents service
-cat <<EOF > /etc/systemd/system/heat-docker-agents.service
-[Unit]
-Description=Heat Docker Agent Container
-After=docker.service
-Requires=docker.service
-Before=os-collect-config.service
-Conflicts=os-collect-config.service
-
-[Service]
-User=root
-Restart=always
-ExecStartPre=-/usr/bin/docker rm -f heat-agents
-ExecStart=/usr/bin/docker run --name heat-agents --privileged --net=host \
- $AGENT_COMMAND_MOUNTS \
- --entrypoint=/usr/bin/os-collect-config $agent_image
-ExecStop=/usr/bin/docker stop heat-agents
-
-[Install]
-WantedBy=multi-user.target
-EOF
-
-# enable and start heat-docker-agents
-/usr/bin/systemctl enable heat-docker-agents.service
-/usr/bin/systemctl start --no-block heat-docker-agents.service
-
-# Disable libvirtd
-/usr/bin/systemctl disable libvirtd.service
-/usr/bin/systemctl stop libvirtd.service
diff --git a/docker/post-upgrade.j2.yaml b/docker/post-upgrade.j2.yaml
new file mode 100644
index 00000000..4477f868
--- /dev/null
+++ b/docker/post-upgrade.j2.yaml
@@ -0,0 +1,4 @@
+# Note the include here is the same as post.j2.yaml but the data used at
+# # the time of rendering is different if any roles disable upgrades
+{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% include 'docker-steps.j2' %}
diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml
index dfa8ac2e..fd956215 100644
--- a/docker/post.j2.yaml
+++ b/docker/post.j2.yaml
@@ -1,211 +1 @@
-heat_template_version: ocata
-
-description: >
- Post-deploy configuration steps via puppet for all roles,
- as defined in ../roles_data.yaml
-
-parameters:
- servers:
- type: json
- description: Mapping of Role name e.g Controller to a list of servers
- role_data:
- type: json
- description: Mapping of Role name e.g Controller to the per-role data
-
- DeployIdentifier:
- default: ''
- type: string
- description: >
- Setting this to a unique value will re-run any deployment tasks which
- perform configuration on a Heat stack-update.
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
-{% for role in roles %}
- # Post deployment steps for all roles
- # A single config is re-applied with an incrementing step number
- # {{role.name}} Role steps
- {{role.name}}ArtifactsConfig:
- type: ../puppet/deploy-artifacts.yaml
-
- {{role.name}}ArtifactsDeploy:
- type: OS::Heat::StructuredDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ArtifactsConfig}
-
- {{role.name}}PreConfig:
- type: OS::TripleO::Tasks::{{role.name}}PreConfig
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}Config:
- type: OS::TripleO::{{role.name}}Config
- properties:
- StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
- {% if role.name.lower() == 'compute' %}
- PuppetTags: {get_param: [role_data, {{role.name}}, puppet_tags]}
- {% endif %}
-
- # Step through a series of configuration steps
- {{role.name}}Deployment_Step1:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- properties:
- name: {{role.name}}Deployment_Step1
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 1
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}Deployment_Step2:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step1
- {% endfor %}
- properties:
- name: {{role.name}}Deployment_Step2
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}Deployment_Step3:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step2
- {% endfor %}
- properties:
- name: {{role.name}}Deployment_Step3
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}Deployment_Step4:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step3
- {% endfor %}
- properties:
- name: {{role.name}}Deployment_Step4
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}Deployment_Step5:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step4
- {% endfor %}
- properties:
- name: {{role.name}}Deployment_Step5
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 5
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}PostConfig:
- type: OS::TripleO::Tasks::{{role.name}}PostConfig
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step5
- {% endfor %}
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- {{role.name}}ExtraConfigPost:
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}PostConfig
- {% endfor %}
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: [servers, {{role.name}}]}
-
- {% if role.name.lower() == 'compute' %}
- CopyEtcConfig:
- type: OS::Heat::SoftwareConfig
- depends_on: {{role.name}}PostConfig
- properties:
- group: script
- outputs:
- - name: result
- config: {get_file: ../docker/copy-etc.sh}
-
- CopyEtcDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: CopyEtcDeployment
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: CopyEtcConfig}
-
- {{role.name}}KollaJsonConfig:
- type: OS::Heat::StructuredConfig
- depends_on: CopyEtcDeployment
- properties:
- group: json-file
- config:
- {get_param: [role_data, {{role.name}}, kolla_config]}
-
- {{role.name}}KollaJsonDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: {{role.name}}KollaJsonDeployment
- config: {get_resource: {{role.name}}KollaJsonConfig}
- servers: {get_param: [servers, {{role.name}}]}
-
- {{role.name}}ContainersConfig_Step1:
- type: OS::Heat::StructuredConfig
- depends_on: {{role.name}}KollaJsonDeployment
- properties:
- group: docker-cmd
- config:
- {get_param: [role_data, {{role.name}}, docker_config, step_1]}
-
- {{role.name}}ContainersConfig_Step2:
- type: OS::Heat::StructuredConfig
- depends_on: {{role.name}}KollaJsonDeployment
- properties:
- group: docker-cmd
- config:
- {get_param: [role_data, {{role.name}}, docker_config, step_2]}
-
- {{role.name}}ContainersDeployment_Step1:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- properties:
- name: {{role.name}}ContainersDeployment_Step1
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ContainersConfig_Step1}
-
- {{role.name}}ContainersDeployment_Step2:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: {{role.name}}ContainersDeployment_Step1
- properties:
- name: {{role.name}}ContainersDeployment_Step2
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ContainersConfig_Step2}
- {% endif %}
-{% endfor %}
+{% include 'docker-steps.j2' %}
diff --git a/docker/services/README.rst b/docker/services/README.rst
index 60719bfc..465e4abe 100644
--- a/docker/services/README.rst
+++ b/docker/services/README.rst
@@ -1,65 +1,126 @@
-========
-services
-========
+===============
+Docker Services
+===============
-A TripleO nested stack Heat template that encapsulates generic configuration
-data to configure a specific service. This generally includes everything
-needed to configure the service excluding the local bind ports which
-are still managed in the per-node role templates directly (controller.yaml,
-compute.yaml, etc.). All other (global) service settings go into
-the puppet/service templates.
+TripleO docker services are currently built on top of the puppet services.
+To do this each of the docker services includes the output of the
+t-h-t puppet/service templates where appropriate.
-Input Parameters
-----------------
+In general global docker specific service settings should reside in these
+templates (templates in the docker/services directory.) The required and
+optional items are specified in the docker settings section below.
-Each service may define its own input parameters and defaults.
-Operators will use the parameter_defaults section of any Heat
-environment to set per service parameters.
+If you are adding a config setting that applies to both docker and
+baremetal that setting should (so long as we use puppet) go into the
+puppet/services templates themselves.
-Config Settings
----------------
+Building Kolla Images
+---------------------
+
+TripleO currently relies on Kolla docker containers. Kolla supports container
+customization and we are making use of this feature within TripleO to inject
+puppet (our configuration tool of choice) into the Kolla base images. The
+undercloud nova-scheduler also requires openstack-tripleo-common to
+provide custom filters.
+
+To build Kolla images for TripleO adjust your kolla config [*]_ to build your
+centos base image with puppet using the example below:
+
+.. code-block::
+
+$ cat template-overrides.j2
+{% extends parent_template %}
+{% set base_centos_binary_packages_append = ['puppet'] %}
+{% set nova_scheduler_packages_append = ['openstack-tripleo-common'] %}
-Each service may define a config_settings output variable which returns
-Hiera settings to be configured.
+kolla-build --base centos --template-override template-overrides.j2
-Steps
------
+..
+.. [*] See the
+ `override file <https://github.com/openstack/tripleo-common/blob/master/contrib/tripleo_kolla_template_overrides.j2>`_
+ which can be used to build Kolla packages that work with TripleO, and an
+ `example build script <https://github.com/dprince/undercloud_containers/blob/master/build_kolla.sh>_.
+
+Docker settings
+---------------
Each service may define an output variable which returns a puppet manifest
snippet that will run at each of the following steps. Earlier manifests
are re-asserted when applying latter ones.
- * config_settings: Custom hiera settings for this service. These are
- used to generate configs.
+ * config_settings: This setting is generally inherited from the
+ puppet/services templates and only need to be appended
+ to on accasion if docker specific config settings are required.
+
+ * step_config: This setting controls the manifest that is used to
+ create docker config files via puppet. The puppet tags below are
+ used along with this manifest to generate a config directory for
+ this container.
* kolla_config: Contains YAML that represents how to map config files
into the kolla container. This config file is typically mapped into
the container itself at the /var/lib/kolla/config_files/config.json
location and drives how kolla's external config mechanisms work.
- * step_config: A puppet manifest that is used to step through the deployment
- sequence. Each sequence is given a "step" (via hiera('step') that provides
- information for when puppet classes should activate themselves.
-
- * docker_compose:
-
- * container_name:
-
- * volumes:
+ * docker_config: Data that is passed to the docker-cmd hook to configure
+ a container, or step of containers at each step. See the available steps
+ below and the related docker-cmd hook documentation in the heat-agents
+ project.
+
+ * puppet_config: This section is a nested set of key value pairs
+ that drive the creation of config files using puppet.
+ Required parameters include:
+
+ * puppet_tags: Puppet resource tag names that are used to generate config
+ files with puppet. Only the named config resources are used to generate
+ a config file. Any service that specifies tags will have the default
+ tags of 'file,concat,file_line' appended to the setting.
+ Example: keystone_config
+
+ * config_volume: The name of the volume (directory) where config files
+ will be generated for this service. Use this as the location to
+ bind mount into the running Kolla container for configuration.
+
+ * config_image: The name of the docker image that will be used for
+ generating configuration files. This is often the same container
+ that the runtime service uses. Some services share a common set of
+ config files which are generated in a common base container.
+
+ * step_config: This setting controls the manifest that is used to
+ create docker config files via puppet. The puppet tags below are
+ used along with this manifest to generate a config directory for
+ this container.
+
+ * docker_puppet_tasks: This section provides data to drive the
+ docker-puppet.py tool directly. The task is executed only once
+ within the cluster (not on each node) and is useful for several
+ puppet snippets we require for initialization of things like
+ keystone endpoints, database users, etc. See docker-puppet.py
+ for formatting.
+
+Docker steps
+------------
+Similar to baremetal docker containers are brought up in a stepwise manner.
+The current architecture supports bringing up baremetal services alongside
+of containers. For each step the baremetal puppet manifests are executed
+first and then any docker containers are brought up afterwards.
Steps correlate to the following:
- 1) Service configuration generation with puppet.
-
- 2) Early Openstack Service setup (database init?)
-
- 3) Early containerized networking services startup (OVS)
-
- 4) Network configuration
-
- 5) General OpenStack Services
-
- 6) Service activation (Pacemaker)
-
- 7) Fencing (Pacemaker)
-
+ Pre) Containers config files generated per hiera settings.
+ 1) Load Balancer configuration baremetal
+ a) step 1 baremetal
+ b) step 1 containers
+ 2) Core Services (Database/Rabbit/NTP/etc.)
+ a) step 2 baremetal
+ b) step 2 containers
+ 3) Early Openstack Service setup (Ringbuilder, etc.)
+ a) step 3 baremetal
+ b) step 3 containers
+ 4) General OpenStack Services
+ a) step 4 baremetal
+ b) step 4 containers
+ c) Keystone containers post initialization (tenant,service,endpoint creation)
+ 5) Service activation (Pacemaker)
+ a) step 5 baremetal
+ b) step 5 containers
diff --git a/docker/services/aodh-api.yaml b/docker/services/aodh-api.yaml
new file mode 100644
index 00000000..ca410d6d
--- /dev/null
+++ b/docker/services/aodh-api.yaml
@@ -0,0 +1,123 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized aodh service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhApiImage:
+ description: image
+ default: 'centos-binary-aodh-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ AodhApiPuppetBase:
+ type: ../../puppet/services/aodh-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the aodh API role.
+ value:
+ service_name: {get_attr: [AodhApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [AodhApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_api_paste_ini,aodh_config
+ step_config: *step_config
+ config_image: &aodh_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/aodh/aodh.conf
+ owner: aodh
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf
+ - dest: /etc/httpd/conf.d/10-aodh_wsgi.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-aodh_wsgi.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ - dest: /var/www/cgi-bin/aodh/app
+ owner: aodh
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/aodh/app
+ docker_config:
+ step_3:
+ aodh-init-log:
+ start_order: 0
+ image: *aodh_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/aodh && chown aodh:aodh /var/log/aodh']
+ volumes:
+ - logs:/var/log
+ aodh_db_sync:
+ start_order: 1
+ image: *aodh_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log
+ command: /usr/bin/aodh-dbsync
+ step_4:
+ aodh-api:
+ image: *aodh_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/aodh-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/aodh/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable aodh service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped enabled=no
diff --git a/docker/services/aodh-evaluator.yaml b/docker/services/aodh-evaluator.yaml
new file mode 100644
index 00000000..d3c8c595
--- /dev/null
+++ b/docker/services/aodh-evaluator.yaml
@@ -0,0 +1,84 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Aodh Evaluator service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhEvaluatorImage:
+ description: image
+ default: 'centos-binary-aodh-evaluator:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ AodhEvaluatorBase:
+ type: ../../puppet/services/aodh-evaluator.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhEvaluatorBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhEvaluatorBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhEvaluatorBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhEvaluatorBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_evaluator_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-evaluator.json:
+ command: /usr/bin/aodh-evaluator
+ config_files:
+ - dest: /etc/aodh/aodh.conf
+ owner: aodh
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf
+ docker_config:
+ step_4:
+ aodh_evaluator:
+ image: *aodh_evaluator_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/aodh-evaluator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-evaluator service
+ tags: step2
+ service: name=openstack-aodh-evaluator.service state=stopped enabled=no
diff --git a/docker/services/aodh-listener.yaml b/docker/services/aodh-listener.yaml
new file mode 100644
index 00000000..7aa9618d
--- /dev/null
+++ b/docker/services/aodh-listener.yaml
@@ -0,0 +1,84 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Aodh Listener service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhListenerImage:
+ description: image
+ default: 'centos-binary-aodh-listener:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ AodhListenerBase:
+ type: ../../puppet/services/aodh-listener.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhListenerBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhListenerBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhListenerBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhListenerBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_listener_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-listener.json:
+ command: /usr/bin/aodh-listener
+ config_files:
+ - dest: /etc/aodh/aodh.conf
+ owner: aodh
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf
+ docker_config:
+ step_4:
+ aodh_listener:
+ image: *aodh_listener_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/aodh-listener.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-listener service
+ tags: step2
+ service: name=openstack-aodh-listener.service state=stopped enabled=no
diff --git a/docker/services/aodh-notifier.yaml b/docker/services/aodh-notifier.yaml
new file mode 100644
index 00000000..f525d6bd
--- /dev/null
+++ b/docker/services/aodh-notifier.yaml
@@ -0,0 +1,84 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Aodh Notifier service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhNotifierImage:
+ description: image
+ default: 'centos-binary-aodh-notifier:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ AodhNotifierBase:
+ type: ../../puppet/services/aodh-notifier.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhNotifierBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhNotifierBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhNotifierBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhNotifierBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_notifier_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-notifier.json:
+ command: /usr/bin/aodh-notifier
+ config_files:
+ - dest: /etc/aodh/aodh.conf
+ owner: aodh
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf
+ docker_config:
+ step_4:
+ aodh_notifier:
+ image: *aodh_notifier_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/aodh-notifier.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-notifier service
+ tags: step2
+ service: name=openstack-aodh-notifier.service state=stopped enabled=no
diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml
new file mode 100644
index 00000000..15795828
--- /dev/null
+++ b/docker/services/database/mongodb.yaml
@@ -0,0 +1,115 @@
+heat_template_version: ocata
+
+description: >
+ MongoDB service deployment using puppet and docker
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMongodbImage:
+ description: image
+ default: 'centos-binary-mongodb:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ MongodbPuppetBase:
+ type: ../../../puppet/services/database/mongodb.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Containerized service Mongodb using composable services.
+ value:
+ service_name: {get_attr: [MongodbPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [MongodbPuppetBase, role_data, config_settings]
+ - mongodb::server::fork: false
+ step_config: &step_config
+ list_join:
+ - "\n"
+ - - "['Mongodb_database', 'Mongodb_user', 'Mongodb_replset'].each |String $val| { noop_resource($val) }"
+ - {get_attr: [MongodbPuppetBase, role_data, step_config]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: mongodb
+ puppet_tags: file # set this even though file is the default
+ step_config: *step_config
+ config_image: &mongodb_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/mongodb.json:
+ command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run
+ config_files:
+ - dest: /etc/mongod.conf
+ source: /var/lib/kolla/config_files/src/etc/mongod.conf
+ owner: mongodb
+ perm: '0600'
+ - dest: /etc/mongos.conf
+ source: /var/lib/kolla/config_files/src/etc/mongos.conf
+ owner: mongodb
+ perm: '0600'
+ docker_config:
+ step_2:
+ mongodb_data_ownership:
+ start_order: 0
+ image: *mongodb_image
+ net: host
+ user: root
+ command: ['chown', '-R', 'mongodb:', '/var/lib/mongodb']
+ volumes:
+ - /var/lib/mongodb:/var/lib/mongodb
+ mongodb:
+ start_order: 1
+ image: *mongodb_image
+ net: host
+ privileged: false
+ volumes: &mongodb_volumes
+ - /var/lib/kolla/config_files/mongodb.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/mongodb/:/var/lib/kolla/config_files/src:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log/kolla
+ - /var/lib/mongodb:/var/lib/mongodb
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ docker_puppet_tasks:
+ # MySQL database initialization occurs only on single node
+ step_2:
+ config_volume: 'mongodb_init_tasks'
+ puppet_tags: 'mongodb_database,mongodb_user,mongodb_replset'
+ step_config: 'include ::tripleo::profile::base::database::mongodb'
+ config_image: *mongodb_image
+ volumes:
+ - /var/lib/mongodb:/var/lib/mongodb
+ - logs:/var/log/kolla:ro
+ host_prep_tasks:
+ - name: create /var/lib/mongodb
+ file:
+ path: /var/lib/mongodb
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable mongodb service
+ tags: step2
+ service: name=mongod state=stopped enabled=no
diff --git a/docker/services/database/mysql.yaml b/docker/services/database/mysql.yaml
new file mode 100644
index 00000000..0ffd0336
--- /dev/null
+++ b/docker/services/database/mysql.yaml
@@ -0,0 +1,150 @@
+heat_template_version: ocata
+
+description: >
+ MySQL service deployment using puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMysqlImage:
+ description: image
+ default: 'centos-binary-mariadb:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ MysqlRootPassword:
+ type: string
+ hidden: true
+ default: ''
+
+resources:
+
+ MysqlPuppetBase:
+ type: ../../../puppet/services/database/mysql.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Containerized service MySQL using composable services.
+ value:
+ service_name: {get_attr: [MysqlPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - {get_attr: [MysqlPuppetBase, role_data, config_settings]}
+ # Set PID file to what kolla mariadb bootstrap script expects
+ - tripleo::profile::base::database::mysql::mysql_server_options:
+ mysqld:
+ pid-file: /var/lib/mysql/mariadb.pid
+ mysqld_safe:
+ pid-file: /var/lib/mysql/mariadb.pid
+ step_config: &step_config
+ list_join:
+ - "\n"
+ - - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }"
+ - {get_attr: [MysqlPuppetBase, role_data, step_config]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: mysql
+ puppet_tags: file # set this even though file is the default
+ step_config: *step_config
+ config_image: &mysql_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/mysql.json:
+ command: /usr/bin/mysqld_safe
+ config_files:
+ - dest: /etc/mysql/my.cnf
+ source: /var/lib/kolla/config_files/src/etc/my.cnf
+ owner: mysql
+ perm: '0644'
+ - dest: /etc/my.cnf.d/galera.cnf
+ source: /var/lib/kolla/config_files/src/etc/my.cnf.d/galera.cnf
+ owner: mysql
+ perm: '0644'
+ docker_config:
+ step_2:
+ mysql_data_ownership:
+ start_order: 0
+ detach: false
+ image: *mysql_image
+ net: host
+ user: root
+ # Kolla does only non-recursive chown
+ command: ['chown', '-R', 'mysql:', '/var/lib/mysql']
+ volumes:
+ - /var/lib/mysql:/var/lib/mysql
+ mysql_bootstrap:
+ start_order: 1
+ detach: false
+ image: *mysql_image
+ net: host
+ # Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done
+ command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start']
+ volumes: &mysql_volumes
+ - /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /var/lib/mysql:/var/lib/mysql
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - KOLLA_BOOTSTRAP=True
+ # NOTE(mandre) skip wsrep cluster status check
+ - KOLLA_KUBERNETES=True
+ -
+ list_join:
+ - '='
+ - - 'DB_ROOT_PASSWORD'
+ -
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: MysqlRootPassword}
+ - {get_param: [DefaultPasswords, mysql_root_password]}
+ mysql:
+ start_order: 2
+ image: *mysql_image
+ restart: always
+ net: host
+ volumes: *mysql_volumes
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ docker_puppet_tasks:
+ # MySQL database initialization occurs only on single node
+ step_2:
+ config_volume: 'mysql_init_tasks'
+ puppet_tags: 'mysql_database,mysql_grant,mysql_user'
+ step_config: 'include ::tripleo::profile::base::database::mysql'
+ config_image: *mysql_image
+ volumes:
+ - "/var/lib/mysql:/var/lib/mysql/:ro"
+ - "/var/lib/config-data/mysql/root:/root:ro" #provides .my.cnf
+ host_prep_tasks:
+ - name: create /var/lib/mysql
+ file:
+ path: /var/lib/mysql
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable mysql service
+ tags: step2
+ service: name=mariadb state=stopped enabled=no
diff --git a/docker/services/glance-api.yaml b/docker/services/glance-api.yaml
new file mode 100644
index 00000000..77e4aa01
--- /dev/null
+++ b/docker/services/glance-api.yaml
@@ -0,0 +1,102 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Glance service configured with Puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGlanceApiImage:
+ description: image
+ default: 'centos-binary-glance-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ GlanceApiPuppetBase:
+ type: ../../puppet/services/glance-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Glance API role.
+ value:
+ service_name: {get_attr: [GlanceApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [GlanceApiPuppetBase, role_data, config_settings]
+ - glance::api::sync_db: false
+ step_config: &step_config
+ get_attr: [GlanceApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [GlanceApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: glance_api
+ puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config
+ step_config: *step_config
+ config_image: &glance_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/glance-api.json:
+ command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
+ config_files:
+ - dest: /etc/glance/glance-api.conf
+ owner: glance
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/glance/glance-api.conf
+ - dest: /etc/glance/glance-swift.conf
+ owner: glance
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/glance/glance-swift.conf
+ docker_config:
+ step_3:
+ glance_api_db_sync:
+ image: *glance_image
+ net: host
+ privileged: false
+ detach: false
+ volumes: &glance_volumes
+ - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
+ - /etc/localtime:/etc/localtime:ro
+ - /lib/modules:/lib/modules:ro
+ - /var/lib/config-data/glance_api/:/var/lib/kolla/config_files/src:ro
+ - /run:/run
+ - /dev:/dev
+ - /etc/hosts:/etc/hosts:ro
+ environment:
+ - KOLLA_BOOTSTRAP=True
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_4:
+ glance_api:
+ image: *glance_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: *glance_volumes
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable glance_api service
+ tags: step2
+ service: name=openstack-glance-api state=stopped enabled=no
diff --git a/docker/services/gnocchi-api.yaml b/docker/services/gnocchi-api.yaml
new file mode 100644
index 00000000..a64d1507
--- /dev/null
+++ b/docker/services/gnocchi-api.yaml
@@ -0,0 +1,118 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized gnocchi service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiApiImage:
+ description: image
+ default: 'centos-binary-gnocchi-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ GnocchiApiPuppetBase:
+ type: ../../puppet/services/gnocchi-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [GnocchiApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [GnocchiApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_api_paste_ini,gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/gnocchi/gnocchi.conf
+ owner: gnocchi
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/gnocchi/gnocchi.conf
+ - dest: /etc/httpd/conf.d/10-gnocchi_wsgi.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-gnocchi_wsgi.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ - dest: /var/www/cgi-bin/gnocchi/app
+ owner: gnocchi
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/gnocchi/app
+ docker_config:
+ step_3:
+ gnocchi-init-log:
+ start_order: 0
+ image: *gnocchi_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/gnocchi && chown gnocchi:gnocchi /var/log/gnocchi']
+ volumes:
+ - logs:/var/log
+ gnocchi_db_sync:
+ start_order: 1
+ image: *gnocchi_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log
+ command: ["/usr/bin/gnocchi-upgrade", "--skip-storage"]
+ step_4:
+ gnocchi-api:
+ image: *gnocchi_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/gnocchi-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/gnocchi/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/gnocchi-metricd.yaml b/docker/services/gnocchi-metricd.yaml
new file mode 100644
index 00000000..6437e942
--- /dev/null
+++ b/docker/services/gnocchi-metricd.yaml
@@ -0,0 +1,78 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Gnocchi Metricd service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiMetricdImage:
+ description: image
+ default: 'centos-binary-gnocchi-metricd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ GnocchiMetricdBase:
+ type: ../../puppet/services/gnocchi-metricd.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiMetricdBase, role_data, service_name]}
+ config_settings: {get_attr: [GnocchiMetricdBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [GnocchiMetricdBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiMetricdBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_metricd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-metricd.json:
+ command: /usr/bin/gnocchi-metricd
+ config_files:
+ - dest: /etc/gnocchi/gnocchi.conf
+ owner: gnocchi
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/gnocchi/gnocchi.conf
+ docker_config:
+ step_4:
+ gnocchi_metricd:
+ image: *gnocchi_metricd_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/gnocchi-metricd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/gnocchi-statsd.yaml b/docker/services/gnocchi-statsd.yaml
new file mode 100644
index 00000000..32c16521
--- /dev/null
+++ b/docker/services/gnocchi-statsd.yaml
@@ -0,0 +1,78 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Gnocchi Statsd service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiStatsdImage:
+ description: image
+ default: 'centos-binary-gnocchi-statsd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ GnocchiStatsdBase:
+ type: ../../puppet/services/gnocchi-statsd.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiStatsdBase, role_data, service_name]}
+ config_settings: {get_attr: [GnocchiStatsdBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [GnocchiStatsdBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiStatsdBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_statsd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-statsd.json:
+ command: /usr/bin/gnocchi-statsd
+ config_files:
+ - dest: /etc/gnocchi/gnocchi.conf
+ owner: gnocchi
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/gnocchi/gnocchi.conf
+ docker_config:
+ step_4:
+ gnocchi_statsd:
+ image: *gnocchi_statsd_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/gnocchi-statsd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/heat-api-cfn.yaml b/docker/services/heat-api-cfn.yaml
new file mode 100644
index 00000000..85ad9212
--- /dev/null
+++ b/docker/services/heat-api-cfn.yaml
@@ -0,0 +1,96 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Heat API CFN service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerHeatApiCfnImage:
+ description: image
+ default: 'centos-binary-heat-api-cfn:latest'
+ type: string
+ # we configure all heat services in the same heat engine container
+ DockerHeatConfigImage:
+ description: image
+ default: 'centos-binary-heat-engine:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ HeatBase:
+ type: ../../puppet/services/heat-api-cfn.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Heat API CFN role.
+ value:
+ service_name: {get_attr: [HeatBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [HeatBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [HeatBase, role_data, step_config]
+ service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: heat
+ puppet_tags: heat_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/heat_api_cfn.json:
+ command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+ config_files:
+ - dest: /etc/heat/heat.conf
+ owner: heat
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ docker_config:
+ step_4:
+ heat_api_cfn:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /dev:/dev
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable heat_api_cfn service
+ tags: step2
+ service: name=openstack-heat-api-cfn state=stopped enabled=no
diff --git a/docker/services/heat-api.yaml b/docker/services/heat-api.yaml
new file mode 100644
index 00000000..12884f56
--- /dev/null
+++ b/docker/services/heat-api.yaml
@@ -0,0 +1,96 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Heat API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerHeatApiImage:
+ description: image
+ default: 'centos-binary-heat-api:latest'
+ type: string
+ # we configure all heat services in the same heat engine container
+ DockerHeatConfigImage:
+ description: image
+ default: 'centos-binary-heat-engine:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ HeatBase:
+ type: ../../puppet/services/heat-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Heat API role.
+ value:
+ service_name: {get_attr: [HeatBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [HeatBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [HeatBase, role_data, step_config]
+ service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: heat
+ puppet_tags: heat_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/heat_api.json:
+ command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+ config_files:
+ - dest: /etc/heat/heat.conf
+ owner: heat
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ docker_config:
+ step_4:
+ heat_api:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /dev:/dev
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable heat_api service
+ tags: step2
+ service: name=openstack-heat-api state=stopped enabled=no
diff --git a/docker/services/heat-engine.yaml b/docker/services/heat-engine.yaml
new file mode 100644
index 00000000..85a00b1d
--- /dev/null
+++ b/docker/services/heat-engine.yaml
@@ -0,0 +1,98 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Heat Engine service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerHeatEngineImage:
+ description: image
+ default: 'centos-binary-heat-engine:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ HeatBase:
+ type: ../../puppet/services/heat-engine.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Heat Engine role.
+ value:
+ service_name: {get_attr: [HeatBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [HeatBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [HeatBase, role_data, step_config]
+ service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: heat
+ puppet_tags: heat_config,file,concat,file_line
+ step_config: *step_config
+ config_image: &heat_engine_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/heat_engine.json:
+ command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+ config_files:
+ - dest: /etc/heat/heat.conf
+ owner: heat
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ docker_config:
+ step_3:
+ heat_engine_db_sync:
+ image: *heat_engine_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/heat/etc/heat:/etc/heat:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ command: ['heat-manage', 'db_sync']
+ step_4:
+ heat_engine:
+ image: *heat_engine_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable heat_engine service
+ tags: step2
+ service: name=openstack-heat-engine state=stopped enabled=no
diff --git a/docker/services/ironic-api.yaml b/docker/services/ironic-api.yaml
new file mode 100644
index 00000000..5ae82d46
--- /dev/null
+++ b/docker/services/ironic-api.yaml
@@ -0,0 +1,105 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Ironic API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerIronicApiImage:
+ description: image
+ default: 'centos-binary-ironic-api:latest'
+ type: string
+ DockerIronicConfigImage:
+ description: image
+ default: 'centos-binary-ironic-pxe:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ IronicApiBase:
+ type: ../../puppet/services/ironic-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Ironic API role.
+ value:
+ service_name: {get_attr: [IronicApiBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [IronicApiBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [IronicApiBase, role_data, step_config]
+ service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ironic
+ puppet_tags: ironic_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ironic_api.json:
+ command: /usr/bin/ironic-api
+ config_files:
+ - dest: /etc/ironic/ironic.conf
+ owner: ironic
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+ docker_config:
+ step_3:
+ ironic_db_sync:
+ image: &ironic_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/ironic/etc/:/etc/:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ command: ['ironic-dbsync', '--config-file', '/etc/ironic/ironic.conf']
+ step_4:
+ ironic_api:
+ start_order: 10
+ image: *ironic_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable ironic_api service
+ tags: step2
+ service: name=openstack-ironic-api state=stopped enabled=no
diff --git a/docker/services/ironic-conductor.yaml b/docker/services/ironic-conductor.yaml
new file mode 100644
index 00000000..678b8c27
--- /dev/null
+++ b/docker/services/ironic-conductor.yaml
@@ -0,0 +1,145 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Ironic Conductor service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerIronicConductorImage:
+ description: image
+ default: 'centos-binary-ironic-conductor:latest'
+ type: string
+ DockerIronicConfigImage:
+ description: image
+ default: 'centos-binary-ironic-pxe:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ IronicConductorBase:
+ type: ../../puppet/services/ironic-conductor.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Ironic Conductor role.
+ value:
+ service_name: {get_attr: [IronicConductorBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [IronicConductorBase, role_data, config_settings]
+ # to avoid hard linking errors we store these on the same
+ # volume/device as the ironic master_path
+ # https://github.com/docker/docker/issues/7457
+ - ironic::drivers::pxe::tftp_root: /var/lib/ironic/tftpboot
+ - ironic::drivers::pxe::tftp_master_path: /var/lib/ironic/tftpboot/master_images
+ - ironic::pxe::tftp_root: /var/lib/ironic/tftpboot
+ - ironic::pxe::http_root: /var/lib/ironic/httpboot
+ - ironic::conductor::http_root: /var/lib/ironic/httpboot
+ step_config: &step_config
+ get_attr: [IronicConductorBase, role_data, step_config]
+ service_config_settings: {get_attr: [IronicConductorBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ironic
+ puppet_tags: ironic_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ironic_conductor.json:
+ command: /usr/bin/ironic-conductor
+ config_files:
+ - dest: /etc/ironic/ironic.conf
+ owner: ironic
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+ permissions:
+ - path: /var/lib/ironic/httpboot
+ owner: ironic:ironic
+ recurse: true
+ - path: /var/lib/ironic/tftpboot
+ owner: ironic:ironic
+ recurse: true
+ docker_config:
+ step_4:
+ ironic_conductor:
+ start_order: 80
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ]
+ net: host
+ privileged: true
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /lib/modules:/lib/modules:ro
+ - /sys:/sys
+ - /dev:/dev
+ - /run:/run #shared?
+ - /var/lib/ironic:/var/lib/ironic
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create ironic persistent data directory
+ file:
+ path: /var/lib/ironic
+ state: directory
+ - name: stat /httpboot
+ stat: path=/httpboot
+ register: stat_httpboot
+ - name: stat /tftpboot
+ stat: path=/tftpboot
+ register: stat_tftpboot
+ - name: stat /var/lib/ironic/httpboot
+ stat: path=/var/lib/ironic/httpboot
+ register: stat_ironic_httpboot
+ - name: stat /var/lib/ironic/tftpboot
+ stat: path=/var/lib/ironic/tftpboot
+ register: stat_ironic_tftpboot
+ # cannot use 'copy' module as with 'remote_src' it doesn't support recursion
+ - name: migrate /httpboot to containerized (if applicable)
+ command: /bin/cp -R /httpboot /var/lib/ironic/httpboot
+ when: stat_httpboot.stat.exists and not stat_ironic_httpboot.stat.exists
+ - name: migrate /tftpboot to containerized (if applicable)
+ command: /bin/cp -R /tftpboot /var/lib/ironic/tftpboot
+ when: stat_tftpboot.stat.exists and not stat_ironic_tftpboot.stat.exists
+ # Even if there was nothing to copy from original locations,
+ # we need to create the dirs before starting the containers
+ - name: ensure ironic pxe directories exist
+ file:
+ path: /var/lib/ironic/{{ item }}
+ state: directory
+ with_items:
+ - httpboot
+ - tftpboot
+ upgrade_tasks:
+ - name: Stop and disable ironic_conductor service
+ tags: step2
+ service: name=openstack-ironic-conductor state=stopped enabled=no
diff --git a/docker/services/ironic-pxe.yaml b/docker/services/ironic-pxe.yaml
new file mode 100644
index 00000000..c6607094
--- /dev/null
+++ b/docker/services/ironic-pxe.yaml
@@ -0,0 +1,137 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Ironic PXE service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerIronicPxeImage:
+ description: image
+ default: 'centos-binary-ironic-pxe:latest'
+ type: string
+ DockerIronicConfigImage:
+ description: image
+ default: 'centos-binary-ironic-pxe:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the Ironic PXE role.
+ value:
+ service_name: ironic_pxe
+ config_settings: {}
+ step_config: &step_config ''
+ service_config_settings: {}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ironic
+ puppet_tags: ironic_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ironic_pxe_http.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/ironic/ironic.conf
+ owner: ironic
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+ - dest: /etc/httpd/conf.d/10-ipxe_vhost.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-ipxe_vhost.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ /var/lib/kolla/config_files/ironic_pxe_tftp.json:
+ command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot
+ config_files:
+ - dest: /etc/ironic/ironic.conf
+ owner: ironic
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+ - dest: /var/lib/ironic/tftpboot/chain.c32
+ owner: ironic
+ perm: '0744'
+ source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/chain.c32
+ - dest: /var/lib/ironic/tftpboot/pxelinux.0
+ owner: ironic
+ perm: '0744'
+ source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/pxelinux.0
+ - dest: /var/lib/ironic/tftpboot/ipxe.efi
+ owner: ironic
+ perm: '0744'
+ source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/ipxe.efi
+ - dest: /var/lib/ironic/tftpboot/undionly.kpxe
+ owner: ironic
+ perm: '0744'
+ source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/undionly.kpxe
+ - dest: /var/lib/ironic/tftpboot/map-file
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/map-file
+ docker_config:
+ step_4:
+ ironic_pxe_tftp:
+ start_order: 90
+ image: &ironic_pxe_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /dev/log:/dev/log
+ - /var/lib/ironic:/var/lib/ironic/
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ ironic_pxe_http:
+ start_order: 91
+ image: *ironic_pxe_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /var/lib/ironic:/var/lib/ironic/
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create ironic persistent data directory
+ file:
+ path: /var/lib/ironic
+ state: directory
diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml
new file mode 100644
index 00000000..e50315ba
--- /dev/null
+++ b/docker/services/keystone.yaml
@@ -0,0 +1,172 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Keystone service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerKeystoneImage:
+ description: image
+ default: 'centos-binary-keystone:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ AdminPassword:
+ description: The password for the keystone admin account, used for monitoring, querying neutron etc.
+ type: string
+ hidden: true
+ KeystoneTokenProvider:
+ description: The keystone token format
+ type: string
+ default: 'fernet'
+ constraints:
+ - allowed_values: ['uuid', 'fernet']
+
+resources:
+
+ KeystoneBase:
+ type: ../../puppet/services/keystone.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+conditions:
+ keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]}
+
+outputs:
+ role_data:
+ description: Role data for the Keystone API role.
+ value:
+ service_name: {get_attr: [KeystoneBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [KeystoneBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ list_join:
+ - "\n"
+ - - "['Keystone_user', 'Keystone_endpoint', 'Keystone_domain', 'Keystone_tenant', 'Keystone_user_role', 'Keystone_role', 'Keystone_service'].each |String $val| { noop_resource($val) }"
+ - {get_attr: [KeystoneBase, role_data, step_config]}
+ service_config_settings: {get_attr: [KeystoneBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: keystone
+ puppet_tags: keystone_config
+ step_config: *step_config
+ config_image: &keystone_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/keystone.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/keystone/keystone.conf
+ owner: keystone
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/keystone/keystone.conf
+ - dest: /etc/keystone/credential-keys/0
+ owner: keystone
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/0
+ - dest: /etc/keystone/credential-keys/1
+ owner: keystone
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/1
+ - dest: /etc/httpd/conf.d/10-keystone_wsgi_admin.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_admin.conf
+ - dest: /etc/httpd/conf.d/10-keystone_wsgi_main.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_main.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ - dest: /var/www/cgi-bin/keystone/keystone-admin
+ owner: keystone
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-admin
+ - dest: /var/www/cgi-bin/keystone/keystone-public
+ owner: keystone
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-public
+ docker_config:
+ step_3:
+ keystone-init-log:
+ start_order: 0
+ image: *keystone_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/keystone && chown keystone:keystone /var/log/keystone']
+ volumes:
+ - logs:/var/log
+ keystone_db_sync:
+ start_order: 1
+ image: *keystone_image
+ net: host
+ privileged: false
+ detach: false
+ volumes: &keystone_volumes
+ - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/keystone/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/keystone/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log
+ -
+ if:
+ - keystone_fernet_tokens
+ - /var/lib/config-data/keystone/etc/keystone/fernet-keys:/etc/keystone/fernet-keys:ro
+ - ''
+ environment:
+ - KOLLA_BOOTSTRAP=True
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ keystone:
+ start_order: 1
+ image: *keystone_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: *keystone_volumes
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ keystone_bootstrap:
+ start_order: 2
+ action: exec
+ command:
+ [ 'keystone', 'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ]
+ docker_puppet_tasks:
+ # Keystone endpoint creation occurs only on single node
+ step_3:
+ config_volume: 'keystone_init_tasks'
+ puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
+ step_config: 'include ::tripleo::profile::base::keystone'
+ config_image: *keystone_image
+ upgrade_tasks:
+ - name: Stop and disable keystone service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped enabled=no
+ metadata_settings:
+ get_attr: [KeystoneBase, role_data, metadata_settings]
diff --git a/docker/services/memcached.yaml b/docker/services/memcached.yaml
new file mode 100644
index 00000000..a78be3c8
--- /dev/null
+++ b/docker/services/memcached.yaml
@@ -0,0 +1,75 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Memcached services
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMemcachedImage:
+ description: image
+ default: 'centos-binary-memcached:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ MemcachedBase:
+ type: ../../puppet/services/memcached.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Memcached API role.
+ value:
+ service_name: {get_attr: [MemcachedBase, role_data, service_name]}
+ config_settings: {get_attr: [MemcachedBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [MemcachedBase, role_data, step_config]
+ service_config_settings: {get_attr: [MemcachedBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: 'memcached'
+ puppet_tags: 'file'
+ step_config: *step_config
+ config_image: &memcached_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
+ kolla_config: {}
+ docker_config:
+ step_1:
+ memcached:
+ image: *memcached_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS']
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable memcached service
+ tags: step2
+ service: name=memcached state=stopped enabled=no
diff --git a/docker/services/mistral-api.yaml b/docker/services/mistral-api.yaml
new file mode 100644
index 00000000..4dd3b74c
--- /dev/null
+++ b/docker/services/mistral-api.yaml
@@ -0,0 +1,121 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Mistral API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMistralApiImage:
+ description: image
+ default: 'centos-binary-mistral-api:latest'
+ type: string
+ DockerMistralConfigImage:
+ description: image
+ default: 'centos-binary-mistral-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ MistralApiBase:
+ type: ../../puppet/services/mistral-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Mistral API role.
+ value:
+ service_name: {get_attr: [MistralApiBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [MistralApiBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [MistralApiBase, role_data, step_config]
+ service_config_settings: {get_attr: [MistralApiBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: mistral
+ puppet_tags: mistral_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/mistral_api.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api
+ config_files:
+ - dest: /etc/mistral/mistral.conf
+ owner: mistral
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ docker_config:
+ step_3:
+ mistral_db_sync:
+ start_order: 1
+ image: &mistral_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/mistral/etc/:/etc/:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head']
+ mistral_db_populate:
+ start_order: 2
+ image: *mistral_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/mistral/etc/:/etc/:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ # NOTE: dprince this requires that we install openstack-tripleo-common into
+ # the Mistral API image so that we get tripleo* actions
+ command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate']
+ step_4:
+ mistral_api:
+ start_order: 15
+ image: *mistral_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable mistral_api service
+ tags: step2
+ service: name=openstack-mistral-api state=stopped enabled=no
diff --git a/docker/services/mistral-engine.yaml b/docker/services/mistral-engine.yaml
new file mode 100644
index 00000000..db2721bd
--- /dev/null
+++ b/docker/services/mistral-engine.yaml
@@ -0,0 +1,94 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Mistral Engine service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMistralEngineImage:
+ description: image
+ default: 'centos-binary-mistral-engine:latest'
+ type: string
+ DockerMistralConfigImage:
+ description: image
+ default: 'centos-binary-mistral-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ MistralBase:
+ type: ../../puppet/services/mistral-engine.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Mistral Engine role.
+ value:
+ service_name: {get_attr: [MistralBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [MistralBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [MistralBase, role_data, step_config]
+ service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: mistral
+ puppet_tags: mistral_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/mistral_engine.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine
+ config_files:
+ - dest: /etc/mistral/mistral.conf
+ owner: mistral
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ docker_config:
+ step_4:
+ mistral_engine:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable mistral_engine service
+ tags: step2
+ service: name=openstack-mistral-engine state=stopped enabled=no
+
diff --git a/docker/services/mistral-executor.yaml b/docker/services/mistral-executor.yaml
new file mode 100644
index 00000000..d68830ed
--- /dev/null
+++ b/docker/services/mistral-executor.yaml
@@ -0,0 +1,97 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Mistral Executor service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMistralExecutorImage:
+ description: image
+ default: 'centos-binary-mistral-executor:latest'
+ type: string
+ DockerMistralConfigImage:
+ description: image
+ default: 'centos-binary-mistral-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ MistralBase:
+ type: ../../puppet/services/mistral-executor.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Mistral Executor role.
+ value:
+ service_name: {get_attr: [MistralBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [MistralBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [MistralBase, role_data, step_config]
+ service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: mistral
+ puppet_tags: mistral_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/mistral_executor.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor
+ config_files:
+ - dest: /etc/mistral/mistral.conf
+ owner: mistral
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ docker_config:
+ step_4:
+ mistral_executor:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ # FIXME: this is required in order for Nova cells
+ # initialization workflows on the Undercloud. Need to
+ # exclude this on the overcloud for security reasons.
+ - /var/lib/config-data/nova/etc/nova:/etc/nova:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable mistral_executor service
+ tags: step2
+ service: name=openstack-mistral-executor state=stopped enabled=no
diff --git a/docker/services/neutron-api.yaml b/docker/services/neutron-api.yaml
new file mode 100644
index 00000000..ed03de6c
--- /dev/null
+++ b/docker/services/neutron-api.yaml
@@ -0,0 +1,111 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Neutron API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNeutronApiImage:
+ description: image
+ default: 'centos-binary-neutron-server:latest'
+ type: string
+ # we configure all neutron services in the same neutron
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-openvswitch-agent:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NeutronBase:
+ type: ../../puppet/services/neutron-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron API role.
+ value:
+ service_name: {get_attr: [NeutronBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [NeutronBase, role_data, step_config]
+ service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: neutron
+ puppet_tags: neutron_config,neutron_api_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/neutron_api.json:
+ command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
+ config_files:
+ - dest: /etc/neutron/neutron.conf
+ owner: neutron
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
+ - dest: /etc/neutron/plugin.ini
+ owner: neutron
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini
+ docker_config:
+ step_3:
+ neutron_db_sync:
+ image: &neutron_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
+ net: host
+ privileged: false
+ detach: false
+ # FIXME: we should make config file permissions right
+ # and run as neutron user
+ user: root
+ volumes:
+ - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
+ - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ command: ['neutron-db-manage', 'upgrade', 'heads']
+ step_4:
+ neutron_api:
+ image: *neutron_api_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable neutron_api service
+ tags: step2
+ service: name=neutron-server state=stopped enabled=no
diff --git a/docker/services/neutron-dhcp.yaml b/docker/services/neutron-dhcp.yaml
new file mode 100644
index 00000000..9be13ad3
--- /dev/null
+++ b/docker/services/neutron-dhcp.yaml
@@ -0,0 +1,99 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Neutron DHCP service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNeutronDHCPImage:
+ description: image
+ default: 'centos-binary-neutron-dhcp-agent:latest'
+ type: string
+ # we configure all neutron services in the same neutron
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-openvswitch-agent:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NeutronBase:
+ type: ../../puppet/services/neutron-dhcp.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron DHCP role.
+ value:
+ service_name: {get_attr: [NeutronBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [NeutronBase, role_data, step_config]
+ service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: neutron
+ puppet_tags: neutron_config,neutron_dhcp_agent_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/neutron_dhcp.json:
+ command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
+ config_files:
+ - dest: /etc/neutron/neutron.conf
+ owner: neutron
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
+ - dest: /etc/neutron/dhcp_agent.ini
+ owner: neutron
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/neutron/dhcp_agent.ini
+ docker_config:
+ step_4:
+ neutron_dhcp:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronDHCPImage} ]
+ net: host
+ pid: host
+ privileged: true
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /lib/modules:/lib/modules:ro
+ - /run/:/run
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable neutron_dhcp service
+ tags: step2
+ service: name=neutron-dhcp-agent state=stopped enabled=no
diff --git a/docker/services/neutron-l3.yaml b/docker/services/neutron-l3.yaml
new file mode 100644
index 00000000..db4fa863
--- /dev/null
+++ b/docker/services/neutron-l3.yaml
@@ -0,0 +1,90 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Neutron L3 agent
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNeutronL3AgentImage:
+ description: image
+ default: 'centos-binary-neutron-l3-agent:latest'
+ type: string
+ # we configure all neutron services in the same neutron
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-openvswitch-agent:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ NeutronL3Base:
+ type: ../../puppet/services/neutron-l3.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for Neutron L3 agent
+ value:
+ service_name: {get_attr: [NeutronL3Base, role_data, service_name]}
+ config_settings: {get_attr: [NeutronL3Base, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NeutronL3Base, role_data, step_config]
+ puppet_config:
+ puppet_tags: neutron_config,neutron_l3_agent_config
+ config_volume: neutron
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/neutron-l3-agent.json:
+ command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
+ config_files:
+ - dest: /etc/neutron/neutron.conf
+ owner: neutron
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
+ - dest: /etc/neutron/l3_agent.ini
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/neutron/l3_agent.ini
+ docker_config:
+ step_4:
+ neutronl3agent:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ]
+ net: host
+ pid: host
+ privileged: true
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/neutron-ovs-agent.yaml b/docker/services/neutron-ovs-agent.yaml
index 0a061f6c..4102693b 100644
--- a/docker/services/neutron-ovs-agent.yaml
+++ b/docker/services/neutron-ovs-agent.yaml
@@ -10,7 +10,7 @@ parameters:
type: string
DockerOpenvswitchImage:
description: image
- default: 'centos-binary-neutron-openvswitch-agent'
+ default: 'centos-binary-neutron-openvswitch-agent:latest'
type: string
ServiceNetMap:
default: {}
@@ -32,53 +32,59 @@ resources:
NeutronOvsAgentBase:
type: ../../puppet/services/neutron-ovs-agent.yaml
properties:
+ EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
description: Role data for Neutron openvswitch service
value:
+ service_name: {get_attr: [NeutronOvsAgentBase, role_data, service_name]}
config_settings: {get_attr: [NeutronOvsAgentBase, role_data, config_settings]}
- step_config: {get_attr: [NeutronOvsAgentBase, role_data, step_config]}
- puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
+ step_config: &step_config
+ get_attr: [NeutronOvsAgentBase, role_data, step_config]
+ puppet_config:
+ config_volume: neutron
+ puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
+ step_config: *step_config
+ config_image: &neutron_ovs_agent_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
kolla_config:
- /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:
- command: /usr/bin/neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+ /var/lib/kolla/config_files/neutron-openvswitch-agent.json:
+ command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
config_files:
- dest: /etc/neutron/neutron.conf
owner: neutron
perm: '0600'
- source: /var/lib/kolla/config_files/neutron.conf
+ source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
- dest: /etc/neutron/plugins/ml2/openvswitch_agent.ini
owner: neutron
perm: '0600'
- source: /var/lib/kolla/config_files/openvswitch_agent.ini
+ source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/openvswitch_agent.ini
- dest: /etc/neutron/plugins/ml2/ml2_conf.ini
owner: neutron
perm: '0600'
- source: /var/lib/kolla/config_files/ml2_conf.ini
+ source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini
docker_config:
- step_1:
+ step_4:
neutronovsagent:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
+ image: *neutron_ovs_agent_image
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json
- - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro
- - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro
- - /var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro
+ - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro
- /etc/localtime:/etc/localtime:ro
- /lib/modules:/lib/modules:ro
- /run:/run
- - logs:/var/log/kolla/
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- step_2: {}
+ upgrade_tasks:
+ - name: Stop and disable neutron_ovs_agent service
+ tags: step2
+ service: name=neutron-openvswitch-agent state=stopped enabled=no
diff --git a/docker/services/neutron-plugin-ml2.yaml b/docker/services/neutron-plugin-ml2.yaml
new file mode 100644
index 00000000..34864d3a
--- /dev/null
+++ b/docker/services/neutron-plugin-ml2.yaml
@@ -0,0 +1,60 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Neutron ML2 Plugin configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-openvswitch-agent:latest'
+ type: string
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NeutronBase:
+ type: ../../puppet/services/neutron-plugin-ml2.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron ML2 Plugin role.
+ value:
+ service_name: {get_attr: [NeutronBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [NeutronBase, role_data, step_config]
+ service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: 'neutron'
+ puppet_tags: ''
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ kolla_config: {}
+ docker_config: {}
diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml
new file mode 100644
index 00000000..e5c78d6c
--- /dev/null
+++ b/docker/services/nova-api.yaml
@@ -0,0 +1,151 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaApiImage:
+ description: image
+ default: 'centos-binary-nova-api:latest'
+ type: string
+ DockerNovaConfigImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NovaApiBase:
+ type: ../../puppet/services/nova-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Nova API role.
+ value:
+ service_name: {get_attr: [NovaApiBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NovaApiBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [NovaApiBase, role_data, step_config]
+ service_config_settings: {get_attr: [NovaApiBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_api.json:
+ command: /usr/bin/nova-api
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ docker_config:
+ step_3:
+ nova_api_db_sync:
+ start_order: 1
+ image: &nova_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ]
+ net: host
+ detach: false
+ volumes: &nova_api_volumes
+ - /var/lib/config-data/nova/etc/:/etc/:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ command: ['/usr/bin/nova-manage', 'api_db', 'sync']
+ # FIXME: we probably want to wait on the 'cell_v2 update' in order for this
+ # to be capable of upgrading a baremetal setup. This is to ensure the name
+ # of the cell is 'default'
+ nova_api_map_cell0:
+ start_order: 2
+ image: *nova_api_image
+ net: host
+ detach: false
+ volumes: *nova_api_volumes
+ command:
+ - '/usr/bin/nova-manage'
+ - 'cell_v2'
+ - 'map_cell0'
+ nova_api_create_default_cell:
+ start_order: 3
+ image: *nova_api_image
+ net: host
+ detach: false
+ volumes: *nova_api_volumes
+ # NOTE: allowing the exit code 2 is a dirty way of making
+ # this idempotent (if the resource already exists a conflict
+ # is raised)
+ exit_codes: [0,2]
+ command:
+ - '/usr/bin/nova-manage'
+ - 'cell_v2'
+ - 'create_cell'
+ - '--name="default"'
+ nova_db_sync:
+ start_order: 4
+ image: *nova_api_image
+ net: host
+ detach: false
+ volumes: *nova_api_volumes
+ command: ['/usr/bin/nova-manage', 'db', 'sync']
+ step_4:
+ nova_api:
+ start_order: 2
+ image: *nova_api_image
+ net: host
+ user: nova
+ privileged: true
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_5:
+ nova_api_discover_hosts:
+ start_order: 1
+ image: *nova_api_image
+ net: host
+ detach: false
+ volumes: *nova_api_volumes
+ command:
+ - '/usr/bin/nova-manage'
+ - 'cell_v2'
+ - 'discover_hosts'
+ upgrade_tasks:
+ - name: Stop and disable nova_api service
+ tags: step2
+ service: name=openstack-nova-api state=stopped enabled=no
diff --git a/docker/services/nova-compute.yaml b/docker/services/nova-compute.yaml
index e765609e..957eed7f 100644
--- a/docker/services/nova-compute.yaml
+++ b/docker/services/nova-compute.yaml
@@ -10,7 +10,7 @@ parameters:
type: string
DockerNovaComputeImage:
description: image
- default: 'centos-binary-nova-compute'
+ default: 'centos-binary-nova-compute:latest'
type: string
ServiceNetMap:
default: {}
@@ -29,53 +29,69 @@ parameters:
resources:
+
NovaComputeBase:
type: ../../puppet/services/nova-compute.yaml
properties:
EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
outputs:
role_data:
description: Role data for the Nova Compute service.
value:
+ service_name: {get_attr: [NovaComputeBase, role_data, service_name]}
config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]}
- step_config: {get_attr: [NovaComputeBase, role_data, step_config]}
- puppet_tags: nova_config,nova_paste_api_ini
+ step_config: &step_config
+ get_attr: [NovaComputeBase, role_data, step_config]
+ puppet_config:
+ config_volume: nova_libvirt
+ puppet_tags: nova_config,nova_paste_api_ini
+ step_config: *step_config
+ config_image: &nova_compute_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
kolla_config:
- /var/lib/etc-data/json-config/nova-compute.json:
+ /var/lib/kolla/config_files/nova-compute.json:
command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
config_files:
- dest: /etc/nova/nova.conf
owner: nova
perm: '0600'
- source: /var/lib/kolla/config_files/nova.conf
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
- dest: /etc/nova/rootwrap.conf
owner: nova
perm: '0600'
- source: /var/lib/kolla/config_files/rootwrap.conf
+ source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf
docker_config:
- step_1:
+ # FIXME: run discover hosts here
+ step_4:
novacompute:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ image: *nova_compute_image
net: host
privileged: true
user: root
restart: always
volumes:
- - /var/lib/etc-data/json-config/nova-compute.json:/var/lib/kolla/config_files/config.json
- - /var/lib/etc-data/nova/nova.conf:/var/lib/kolla/config_files/nova.conf:ro
- - /var/lib/etc-data/nova/rootwrap.conf:/var/lib/kolla/config_files/rootwrap.conf:ro
+ - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro
+ - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
- /etc/localtime:/etc/localtime:ro
- /lib/modules:/lib/modules:ro
- /run:/run
- - /dev:/dev
- - logs:/var/log/kolla/
- - /etc/iscsi:/etc/iscsi
- - libvirtd:/var/lib/libvirt
- - nova_compute:/var/lib/nova/
+ - /var/lib/nova:/var/lib/nova
+ - /var/lib/libvirt:/var/lib/libvirt
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- step_2: {}
+ host_prep_tasks:
+ - name: create /var/lib/libvirt
+ file:
+ path: /var/lib/libvirt
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable nova-compute service
+ tags: step2
+ service: name=nova-compute state=stopped enabled=no
diff --git a/docker/services/nova-conductor.yaml b/docker/services/nova-conductor.yaml
new file mode 100644
index 00000000..f85cf546
--- /dev/null
+++ b/docker/services/nova-conductor.yaml
@@ -0,0 +1,91 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova Conductor service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaConductorImage:
+ description: image
+ default: 'centos-binary-nova-conductor:latest'
+ type: string
+ DockerNovaConfigImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ NovaConductorBase:
+ type: ../../puppet/services/nova-conductor.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Conductor service.
+ value:
+ service_name: {get_attr: [NovaConductorBase, role_data, service_name]}
+ config_settings: {get_attr: [NovaConductorBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NovaConductorBase, role_data, step_config]
+ service_config_settings: {get_attr: [NovaConductorBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_conductor.json:
+ command: /usr/bin/nova-conductor
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ docker_config:
+ step_4:
+ nova_conductor:
+ image: &nova_conductor_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable nova_conductor service
+ tags: step2
+ service: name=openstack-nova-conductor state=stopped enabled=no
diff --git a/docker/services/nova-ironic.yaml b/docker/services/nova-ironic.yaml
new file mode 100644
index 00000000..3d849f59
--- /dev/null
+++ b/docker/services/nova-ironic.yaml
@@ -0,0 +1,94 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova Ironic Compute service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaComputeImage:
+ description: image
+ default: 'centos-binary-nova-compute-ironic:latest'
+ type: string
+ DockerNovaConfigImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+
+ NovaIronicBase:
+ type: ../../puppet/services/nova-ironic.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Compute service.
+ value:
+ service_name: {get_attr: [NovaIronicBase, role_data, service_name]}
+ config_settings: {get_attr: [NovaIronicBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NovaIronicBase, role_data, step_config]
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config,nova_paste_api_ini
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_ironic.json:
+ command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ - dest: /etc/nova/rootwrap.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf
+ docker_config:
+ step_5:
+ novacompute:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ net: host
+ privileged: true
+ user: root
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova:/var/lib/kolla/config_files/src:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
+ - /var/lib/nova/:/var/lib/nova
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable nova-compute service
+ tags: step2
+ service: name=nova-compute state=stopped enabled=no
diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml
index 004d624a..480bb80e 100644
--- a/docker/services/nova-libvirt.yaml
+++ b/docker/services/nova-libvirt.yaml
@@ -10,7 +10,13 @@ parameters:
type: string
DockerLibvirtImage:
description: image
- default: 'centos-binary-libvirt'
+ default: 'centos-binary-nova-libvirt:latest'
+ type: string
+ # we configure libvirt via the nova-compute container due to coupling
+ # in the puppet modules
+ DockerNovaConfigImage:
+ description: image
+ default: 'centos-binary-nova-compute:latest'
type: string
ServiceNetMap:
default: {}
@@ -33,50 +39,68 @@ resources:
type: ../../puppet/services/nova-libvirt.yaml
properties:
EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
outputs:
role_data:
description: Role data for the Libvirt service.
value:
+ service_name: {get_attr: [NovaLibvirtBase, role_data, service_name]}
config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]}
- step_config: {get_attr: [NovaLibvirtBase, role_data, step_config]}
- puppet_tags: nova_config
+ step_config: &step_config
+ get_attr: [NovaLibvirtBase, role_data, step_config]
+ puppet_config:
+ config_volume: nova_libvirt
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
- /var/lib/etc-data/json-config/nova-libvirt.json:
+ /var/lib/kolla/config_files/nova-libvirt.json:
command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
config_files:
- dest: /etc/libvirt/libvirtd.conf
owner: root
perm: '0644'
- source: /var/lib/kolla/config_files/libvirtd.conf
+ source: /var/lib/kolla/config_files/src/etc/libvirt/libvirtd.conf
docker_config:
- step_1:
+ step_3:
nova_libvirt:
image:
list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/etc-data/json-config/nova-libvirt.json:/var/lib/kolla/config_files/config.json
- - /var/lib/etc-data/libvirt/libvirtd.conf:/var/lib/kolla/config_files/libvirtd.conf
- # NOTE(mandre) Ideally the qemu.conf file is mounted in
- # /var/lib/kolla/config_files and copied to the right place but
- # copy-json.py doesn't allow us to do that without appending the
- # file as an additional config on the CLI
- - /var/lib/etc-data/libvirt/qemu.conf:/etc/libvirt/qemu.conf:ro
+ - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro
+ - /dev:/dev
- /etc/localtime:/etc/localtime:ro
- /lib/modules:/lib/modules:ro
- /run:/run
- - /dev:/dev
- /sys/fs/cgroup:/sys/fs/cgroup
- - logs:/var/log/kolla/
- - libvirtd:/var/lib/libvirt
- - nova_compute:/var/lib/nova/
- - nova_libvirt_qemu:/etc/libvirt/qemu
+ - /var/lib/nova:/var/lib/nova
+ # Needed to use host's virtlogd
+ - /var/run/libvirt:/var/run/libvirt
+ - /var/lib/libvirt:/var/lib/libvirt
+ - /etc/libvirt/qemu:/etc/libvirt/qemu
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- step_2: {}
+ host_prep_tasks:
+ - name: create libvirt persistent data directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /etc/libvirt/qemu
+ - /var/lib/libvirt
+ upgrade_tasks:
+ - name: Stop and disable libvirtd service
+ tags: step2
+ service: name=libvirtd state=stopped enabled=no
diff --git a/docker/services/nova-metadata.yaml b/docker/services/nova-metadata.yaml
new file mode 100644
index 00000000..b452c61b
--- /dev/null
+++ b/docker/services/nova-metadata.yaml
@@ -0,0 +1,50 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova Metadata service
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ NovaMetadataBase:
+ type: ../../puppet/services/nova-metadata.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Metadata service.
+ value:
+ service_name: {get_attr: [NovaMetadataBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NovaMetadataBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [NovaMetadataBase, role_data, step_config]
+ service_config_settings: {get_attr: [NovaMetadataBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ''
+ puppet_tags: ''
+ step_config: *step_config
+ config_image: ''
+ kolla_config: {}
+ docker_config: {}
diff --git a/docker/services/nova-placement.yaml b/docker/services/nova-placement.yaml
new file mode 100644
index 00000000..e49839b5
--- /dev/null
+++ b/docker/services/nova-placement.yaml
@@ -0,0 +1,110 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova Placement API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaPlacementImage:
+ description: image
+ default: 'centos-binary-nova-placement-api'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NovaPlacementBase:
+ type: ../../puppet/services/nova-placement.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Placement API role.
+ value:
+ service_name: {get_attr: [NovaPlacementBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NovaPlacementBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [NovaPlacementBase, role_data, step_config]
+ service_config_settings: {get_attr: [NovaPlacementBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: nova_placement
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image: &nova_placement_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_placement.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ - dest: /etc/httpd/conf.d/10-placement_wsgi.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-placement_wsgi.conf
+ # puppet generates a stubbed out version of the stock one so we
+ # copy it in to overwrite the existing one
+ - dest: /etc/httpd/conf.d/00-nova-placement-api.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/00-nova-placement-api.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ - dest: /var/www/cgi-bin/nova/nova-placement-api
+ owner: nova
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/nova/nova-placement-api
+ docker_config:
+ # start this early so it is up before computes start reporting
+ step_3:
+ nova_placement:
+ start_order: 1
+ image: *nova_placement_image
+ net: host
+ user: root
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_placement/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/nova_placement/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable nova_placement service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped enabled=no
diff --git a/docker/services/nova-scheduler.yaml b/docker/services/nova-scheduler.yaml
new file mode 100644
index 00000000..de1199e1
--- /dev/null
+++ b/docker/services/nova-scheduler.yaml
@@ -0,0 +1,90 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova Scheduler service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaSchedulerImage:
+ description: image
+ default: 'centos-binary-nova-scheduler:latest'
+ type: string
+ DockerNovaConfigImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NovaSchedulerBase:
+ type: ../../puppet/services/nova-scheduler.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Scheduler service.
+ value:
+ service_name: {get_attr: [NovaSchedulerBase, role_data, service_name]}
+ config_settings: {get_attr: [NovaSchedulerBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NovaSchedulerBase, role_data, step_config]
+ service_config_settings: {get_attr: [NovaSchedulerBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_scheduler.json:
+ command: /usr/bin/nova-scheduler
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ docker_config:
+ step_4:
+ nova_scheduler:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable nova_scheduler service
+ tags: step2
+ service: name=openstack-nova-scheduler state=stopped enabled=no
diff --git a/docker/services/panko-api.yaml b/docker/services/panko-api.yaml
new file mode 100644
index 00000000..32efc5d7
--- /dev/null
+++ b/docker/services/panko-api.yaml
@@ -0,0 +1,119 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Panko service configured with docker
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerPankoApiImage:
+ description: image
+ default: 'centos-binary-panko-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ PankoApiPuppetBase:
+ type: ../../puppet/services/panko-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Panko API role.
+ value:
+ service_name: {get_attr: [PankoApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [PankoApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [PankoApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [PankoApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: panko
+ puppet_tags: panko_api_paste_ini,panko_config
+ step_config: *step_config
+ config_image: &panko_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/panko-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/httpd/conf.d/10-panko_wsgi.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-panko_wsgi.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ - dest: /etc/panko/panko.conf
+ owner: panko
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/panko/panko.conf
+ - dest: /var/www/cgi-bin/panko/app
+ owner: panko
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/panko/app
+ docker_config:
+ step_3:
+ panko-init-log:
+ start_order: 0
+ image: *panko_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/panko && chown panko:panko /var/log/panko']
+ volumes:
+ - logs:/var/log
+ panko_db_sync:
+ start_order: 1
+ image: *panko_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ - /var/lib/config-data/panko/etc/panko:/etc/panko:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log
+ command: /usr/bin/panko-dbsync
+ step_4:
+ panko_api:
+ start_order: 2
+ image: *panko_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/panko-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/panko/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/panko/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/rabbitmq.yaml b/docker/services/rabbitmq.yaml
new file mode 100644
index 00000000..341ec3de
--- /dev/null
+++ b/docker/services/rabbitmq.yaml
@@ -0,0 +1,130 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Rabbitmq service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerRabbitmqImage:
+ description: image
+ default: 'centos-binary-rabbitmq:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RabbitCookie:
+ type: string
+ default: ''
+ hidden: true
+
+resources:
+
+ RabbitmqBase:
+ type: ../../puppet/services/rabbitmq.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Rabbitmq API role.
+ value:
+ service_name: {get_attr: [RabbitmqBase, role_data, service_name]}
+ config_settings: {get_attr: [RabbitmqBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [RabbitmqBase, role_data, step_config]
+ service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: rabbitmq
+ puppet_tags: file
+ step_config: *step_config
+ config_image: &rabbitmq_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/rabbitmq.json:
+ command: /usr/lib/rabbitmq/bin/rabbitmq-server
+ config_files:
+ - dest: /etc/rabbitmq/rabbitmq.config
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq.config
+ - dest: /etc/rabbitmq/enabled_plugins
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/rabbitmq/enabled_plugins
+ - dest: /etc/rabbitmq/rabbitmq-env.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq-env.conf
+ - dest: /etc/rabbitmq/rabbitmqadmin.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmqadmin.conf
+ docker_config:
+ step_1:
+ rabbitmq_bootstrap:
+ start_order: 0
+ image: *rabbitmq_image
+ net: host
+ privileged: false
+ volumes:
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - KOLLA_BOOTSTRAP=True
+ -
+ list_join:
+ - '='
+ - - 'RABBITMQ_CLUSTER_COOKIE'
+ -
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: RabbitCookie}
+ - {get_param: [DefaultPasswords, rabbit_cookie]}
+ rabbitmq:
+ start_order: 1
+ image: *rabbitmq_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/lib/rabbitmq
+ file:
+ path: /var/lib/rabbitmq
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable rabbitmq service
+ tags: step2
+ service: name=rabbitmq-server state=stopped enabled=no
diff --git a/docker/services/services.yaml b/docker/services/services.yaml
index 8c31107f..84c56b5b 100644
--- a/docker/services/services.yaml
+++ b/docker/services/services.yaml
@@ -66,10 +66,26 @@ outputs:
global_config_settings:
{get_attr: [PuppetServices, role_data, global_config_settings]}
step_config:
- {get_attr: [PuppetServices, role_data, step_config]}
- puppet_tags: {list_join: [",", {get_attr: [ServiceChain, role_data, puppet_tags]}]}
+ {get_attr: [ServiceChain, role_data, step_config]}
+ puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
kolla_config:
map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
docker_config:
- step_1: {map_merge: {get_attr: [ServiceChain, role_data, docker_config, step_1]}}
- step_2: {map_merge: {get_attr: [ServiceChain, role_data, docker_config, step_2]}}
+ {get_attr: [ServiceChain, role_data, docker_config]}
+ docker_puppet_tasks:
+ {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
+ host_prep_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks
+ expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ upgrade_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ upgrade_batch_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
diff --git a/docker/services/swift-proxy.yaml b/docker/services/swift-proxy.yaml
new file mode 100644
index 00000000..0d7cd7b9
--- /dev/null
+++ b/docker/services/swift-proxy.yaml
@@ -0,0 +1,87 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized swift proxy service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSwiftProxyImage:
+ description: image
+ default: 'centos-binary-swift-proxy-server:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ SwiftProxyBase:
+ type: ../../puppet/services/swift-proxy.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the swift proxy.
+ value:
+ service_name: {get_attr: [SwiftProxyBase, role_data, service_name]}
+ config_settings: {get_attr: [SwiftProxyBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [SwiftProxyBase, role_data, step_config]
+ service_config_settings: {get_attr: [SwiftProxyBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: swift
+ puppet_tags: swift_proxy_config
+ step_config: *step_config
+ config_image: &swift_proxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/swift_proxy.json:
+ command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
+ docker_config:
+ step_4:
+ swift_proxy:
+ image: *swift_proxy_image
+ net: host
+ user: swift
+ restart: always
+ # I'm mounting /etc/swift as rw. Are the rings written to at all during runtime?
+ volumes:
+ - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /srv/node
+ file:
+ path: /srv/node
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable swift_proxy service
+ tags: step2
+ service: name=openstack-swift-proxy state=stopped enabled=no
diff --git a/docker/services/swift-ringbuilder.yaml b/docker/services/swift-ringbuilder.yaml
new file mode 100644
index 00000000..21102505
--- /dev/null
+++ b/docker/services/swift-ringbuilder.yaml
@@ -0,0 +1,82 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Swift Ringbuilder
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSwiftProxyImage:
+ description: image
+ default: 'centos-binary-swift-proxy-server:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ SwiftMinPartHours:
+ type: number
+ default: 1
+ description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance.
+ SwiftPartPower:
+ default: 10
+ description: Partition Power to use when building Swift rings
+ type: number
+ SwiftRingBuild:
+ default: true
+ description: Whether to manage Swift rings or not
+ type: boolean
+ SwiftReplicas:
+ type: number
+ default: 3
+ description: How many replicas to use in the swift rings.
+ SwiftRawDisks:
+ default: {}
+ description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
+ type: json
+ SwiftUseLocalDir:
+ default: true
+ description: 'Use a local directory for Swift storage services when building rings'
+ type: boolean
+
+resources:
+
+ SwiftRingbuilderBase:
+ type: ../../puppet/services/swift-ringbuilder.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for Swift Ringbuilder configuration in containers.
+ value:
+ service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]}
+ config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [SwiftRingbuilderBase, role_data, step_config]
+ service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: 'swift'
+ puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ kolla_config: {}
+ docker_config: {}
diff --git a/docker/services/swift-storage.yaml b/docker/services/swift-storage.yaml
new file mode 100644
index 00000000..301ef69b
--- /dev/null
+++ b/docker/services/swift-storage.yaml
@@ -0,0 +1,334 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Swift Storage services.
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSwiftProxyImage:
+ description: image
+ default: 'centos-binary-swift-proxy-server:latest'
+ type: string
+ DockerSwiftAccountImage:
+ description: image
+ default: 'centos-binary-swift-account:latest'
+ type: string
+ DockerSwiftContainerImage:
+ description: image
+ default: 'centos-binary-swift-container:latest'
+ type: string
+ DockerSwiftObjectImage:
+ description: image
+ default: 'centos-binary-swift-object:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+
+resources:
+
+ SwiftStorageBase:
+ type: ../../puppet/services/swift-storage.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the swift storage services.
+ value:
+ service_name: {get_attr: [SwiftStorageBase, role_data, service_name]}
+ config_settings: {get_attr: [SwiftStorageBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [SwiftStorageBase, role_data, step_config]
+ service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: swift
+ puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
+ step_config: *step_config
+ config_image: &swift_proxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/swift_account_auditor.json:
+ command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf
+ /var/lib/kolla/config_files/swift_account_reaper.json:
+ command: /usr/bin/swift-account-reaper /etc/swift/account-server.conf
+ /var/lib/kolla/config_files/swift_account_replicator.json:
+ command: /usr/bin/swift-account-replicator /etc/swift/account-server.conf
+ /var/lib/kolla/config_files/swift_account_server.json:
+ command: /usr/bin/swift-account-server /etc/swift/account-server.conf
+ /var/lib/kolla/config_files/swift_container_auditor.json:
+ command: /usr/bin/swift-container-auditor /etc/swift/container-server.conf
+ /var/lib/kolla/config_files/swift_container_replicator.json:
+ command: /usr/bin/swift-container-replicator /etc/swift/container-server.conf
+ /var/lib/kolla/config_files/swift_container_updater.json:
+ command: /usr/bin/swift-container-updater /etc/swift/container-server.conf
+ /var/lib/kolla/config_files/swift_container_server.json:
+ command: /usr/bin/swift-container-server /etc/swift/container-server.conf
+ /var/lib/kolla/config_files/swift_object_auditor.json:
+ command: /usr/bin/swift-object-auditor /etc/swift/object-server.conf
+ /var/lib/kolla/config_files/swift_object_expirer.json:
+ command: /usr/bin/swift-object-expirer /etc/swift/object-expirer.conf
+ /var/lib/kolla/config_files/swift_object_replicator.json:
+ command: /usr/bin/swift-object-replicator /etc/swift/object-server.conf
+ /var/lib/kolla/config_files/swift_object_updater.json:
+ command: /usr/bin/swift-object-updater /etc/swift/object-server.conf
+ /var/lib/kolla/config_files/swift_object_server.json:
+ command: /usr/bin/swift-object-server /etc/swift/object-server.conf
+ docker_config:
+ step_3:
+ # The puppet config sets this up but we don't have a way to mount the named
+ # volume during the configuration stage. We just need to create this
+ # directory and make sure it's owned by swift.
+ swift_setup_srv:
+ image: &swift_account_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ user: root
+ command: ['chown', '-R', 'swift:', '/srv/node']
+ volumes:
+ - /srv/node:/srv/node
+ step_4:
+ swift_account_auditor:
+ image: *swift_account_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: &kolla_env
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ swift_account_reaper:
+ image: *swift_account_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_account_replicator:
+ image: *swift_account_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_account_server:
+ image: *swift_account_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_container_auditor:
+ image: &swift_container_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_container_replicator:
+ image: *swift_container_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_container_updater:
+ image: *swift_container_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_container_server:
+ image: *swift_container_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_object_auditor:
+ image: &swift_object_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_object_expirer:
+ image: *swift_proxy_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_object_replicator:
+ image: *swift_object_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_object_updater:
+ image: *swift_object_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ swift_object_server:
+ image: *swift_object_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ environment: *kolla_env
+ host_prep_tasks:
+ - name: create /srv/node
+ file:
+ path: /srv/node
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable swift storage services
+ tags: step2
+ service: name={{ item }} state=stopped enabled=no
+ with_items:
+ - openstack-swift-account-auditor
+ - openstack-swift-account-reaper
+ - openstack-swift-account-replicator
+ - openstack-swift-account
+ - openstack-swift-container-auditor
+ - openstack-swift-container-replicator
+ - openstack-swift-container-updater
+ - openstack-swift-container
+ - openstack-swift-object-auditor
+ - openstack-swift-object-replicator
+ - openstack-swift-object-updater
+ - openstack-swift-object
diff --git a/docker/services/zaqar.yaml b/docker/services/zaqar.yaml
new file mode 100644
index 00000000..3ec819e0
--- /dev/null
+++ b/docker/services/zaqar.yaml
@@ -0,0 +1,106 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Zaqar services
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerZaqarImage:
+ description: image
+ default: 'centos-binary-zaqar:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ZaqarBase:
+ type: ../../puppet/services/zaqar.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Zaqar API role.
+ value:
+ service_name: {get_attr: [ZaqarBase, role_data, service_name]}
+ config_settings: {get_attr: [ZaqarBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [ZaqarBase, role_data, step_config]
+ service_config_settings: {get_attr: [ZaqarBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: zaqar
+ puppet_tags: zaqar_config
+ step_config: *step_config
+ config_image: &zaqar_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/zaqar.json:
+ command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf
+ config_files:
+ - dest: /etc/zaqar/zaqar.conf
+ owner: zaqar
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf
+ /var/lib/kolla/config_files/zaqar_websocket.json:
+ command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf --config-file /etc/zaqar/1.conf
+ config_files:
+ - dest: /etc/zaqar/zaqar.conf
+ owner: zaqar
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf
+ - dest: /etc/zaqar/1.conf
+ owner: zaqar
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/zaqar/1.conf
+ docker_config:
+ step_4:
+ zaqar:
+ image: *zaqar_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ zaqar_websocket:
+ image: *zaqar_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable zaqar service
+ tags: step2
+ service: name=openstack-zaqar.service state=stopped enabled=no
+
diff --git a/environments/cadf.yaml b/environments/cadf.yaml
new file mode 100644
index 00000000..af5c7fdf
--- /dev/null
+++ b/environments/cadf.yaml
@@ -0,0 +1,2 @@
+parameter_defaults:
+ KeystoneNotificationFormat: cadf
diff --git a/environments/contrail/roles_data_contrail.yaml b/environments/contrail/roles_data_contrail.yaml
index 5f6c4691..d6d6f291 100644
--- a/environments/contrail/roles_data_contrail.yaml
+++ b/environments/contrail/roles_data_contrail.yaml
@@ -29,6 +29,7 @@
CountDefault: 1
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
@@ -115,6 +116,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
@@ -140,6 +142,7 @@
- name: BlockStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -156,6 +159,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
@@ -173,6 +177,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -188,6 +193,7 @@
- name: ContrailController
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailConfig
- OS::TripleO::Services::ContrailControl
- OS::TripleO::Services::ContrailDatabase
@@ -203,6 +209,7 @@
- name: ContrailAnalytics
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailAnalytics
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -215,6 +222,7 @@
- name: ContrailAnalyticsDatabase
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailAnalyticsDatabase
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -227,6 +235,7 @@
- name: ContrailTsn
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailTsn
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
diff --git a/environments/deployed-server-pacemaker-environment.yaml b/environments/deployed-server-pacemaker-environment.yaml
new file mode 100644
index 00000000..85fa7d2f
--- /dev/null
+++ b/environments/deployed-server-pacemaker-environment.yaml
@@ -0,0 +1,4 @@
+resource_registry:
+ OS::TripleO::Tasks::ControllerDeployedServerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerDeployedServerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerDeployedServerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
diff --git a/environments/docker.yaml b/environments/docker.yaml
index 4f5b36b4..755e94c2 100644
--- a/environments/docker.yaml
+++ b/environments/docker.yaml
@@ -1,30 +1,59 @@
resource_registry:
- # Docker container with heat agents for containerized compute node.
- OS::TripleO::Compute::NodeUserData: ../docker/firstboot/install_docker_agents.yaml
+ # This can be used when you don't want to run puppet on the host,
+ # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
+ # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+ OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+ # The compute node still needs extra initialization steps
+ OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+
+ #NOTE (dprince) add roles to be docker enabled as we support them
OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
OS::TripleO::Services::NovaCompute: ../docker/services/nova-compute.yaml
- # NOTE (dprince) here we set new roles to be docker enabled as we add support
- #OS::TripleO::ComputePostDeploySteps: ../docker/post.yaml
- # NOTE (mandre) Defining per role post deploy steps doesn't work yet
- # Set a global PostDeploySteps that works for both containerized and
- # non-containerized roles
+ OS::TripleO::Services::Keystone: ../docker/services/keystone.yaml
+ OS::TripleO::Services::GlanceApi: ../docker/services/glance-api.yaml
+ OS::TripleO::Services::HeatApi: ../docker/services/heat-api.yaml
+ OS::TripleO::Services::HeatApiCfn: ../docker/services/heat-api-cfn.yaml
+ OS::TripleO::Services::HeatEngine: ../docker/services/heat-engine.yaml
+ OS::TripleO::Services::NovaApi: ../docker/services/nova-api.yaml
+ OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml
+ OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml
+ OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml
+ OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
+ OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
+ OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
+ OS::TripleO::Services::NeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
+ OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
+ OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
+ OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
+ OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
+ OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
+ OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml
+ OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
+ OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml
+ OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml
+ OS::TripleO::Services::GnocchiApi: ../docker/services/gnocchi-api.yaml
+ OS::TripleO::Services::GnocchiMetricd: ../docker/services/gnocchi-metricd.yaml
+ OS::TripleO::Services::GnocchiStatsd: ../docker/services/gnocchi-statsd.yaml
+ OS::TripleO::Services::AodhApi: ../docker/services/aodh-api.yaml
+ OS::TripleO::Services::AodhEvaluator: ../docker/services/aodh-evaluator.yaml
+ OS::TripleO::Services::AodhNotifier: ../docker/services/aodh-notifier.yaml
+ OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
+ OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
+
OS::TripleO::PostDeploySteps: ../docker/post.yaml
+ OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
+
OS::TripleO::Services: ../docker/services/services.yaml
parameter_defaults:
# Defaults to 'tripleoupstream'. Specify a local docker registry
- # Example: 192.0.2.1:8787/tripleoupstream
+ # Example: 192.168.24.1:8787/tripleoupstream
DockerNamespace: tripleoupstream
- # Enable local Docker registry
DockerNamespaceIsRegistry: false
- DockerAgentImage: heat-docker-agents:newton
- # Docker containers
- DockerNovaComputeImage: centos-binary-nova-compute:newton
- DockerLibvirtImage: centos-binary-nova-libvirt:newton
- DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:newton
ComputeServices:
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::Docker
diff --git a/environments/enable-internal-tls.yaml b/environments/enable-internal-tls.yaml
index ff4ecfbe..e245a6af 100644
--- a/environments/enable-internal-tls.yaml
+++ b/environments/enable-internal-tls.yaml
@@ -2,15 +2,20 @@
# a TLS for in the internal network via certmonger
parameter_defaults:
EnableInternalTLS: true
+ RabbitClientUseSSL: true
# Required for novajoin to enroll the overcloud nodes
ServerMetadata:
ipa_enroll: True
resource_registry:
+ OS::TripleO::Services::CertmongerUser: ../puppet/services/certmonger-user.yaml
+
OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml
OS::TripleO::Services::ApacheTLS: ../puppet/services/apache-internal-tls-certmonger.yaml
OS::TripleO::Services::MySQLTLS: ../puppet/services/database/mysql-internal-tls-certmonger.yaml
+ OS::TripleO::Services::RabbitMQTLS: ../puppet/services/rabbitmq-internal-tls-certmonger.yaml
+
# We use apache as a TLS proxy
OS::TripleO::Services::TLSProxyBase: ../puppet/services/apache.yaml
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml
index 77fa5a49..8f74ec35 100644
--- a/environments/hyperconverged-ceph.yaml
+++ b/environments/hyperconverged-ceph.yaml
@@ -6,11 +6,13 @@ resource_registry:
parameter_defaults:
ComputeServices:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Kernel
@@ -25,4 +27,8 @@ parameter_defaults:
- OS::TripleO::Services::OpenDaylightOvs
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::AuditD
+ - OS::TripleO::Services::Collectd
- OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::Vpp
+ - OS::TripleO::Services::MySQLClient
diff --git a/environments/low-memory-usage.yaml b/environments/low-memory-usage.yaml
index 47b2003d..3a606336 100644
--- a/environments/low-memory-usage.yaml
+++ b/environments/low-memory-usage.yaml
@@ -11,8 +11,8 @@ parameter_defaults:
SwiftWorkers: 1
GnocchiMetricdWorkers: 1
- ApacheMaxRequestWorkers: 32
- ApacheServerLimit: 32
+ ApacheMaxRequestWorkers: 100
+ ApacheServerLimit: 100
ControllerExtraConfig:
'nova::network::neutron::neutron_url_timeout': '60'
diff --git a/environments/major-upgrade-composable-steps-docker.yaml b/environments/major-upgrade-composable-steps-docker.yaml
new file mode 100644
index 00000000..5fa2f2d8
--- /dev/null
+++ b/environments/major-upgrade-composable-steps-docker.yaml
@@ -0,0 +1,10 @@
+resource_registry:
+ # FIXME(shardy) do we need to break major_upgrade_steps.yaml apart to
+ # enable docker specific logic, or is just overridding PostUpgradeSteps
+ # enough (as we want to share the ansible tasks steps etc)
+ OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
+parameter_defaults:
+ UpgradeLevelNovaCompute: auto
+ UpgradeInitCommonCommand: |
+ #!/bin/bash
+ # Ocata to Pike, put any needed host-level workarounds here
diff --git a/environments/major-upgrade-composable-steps.yaml b/environments/major-upgrade-composable-steps.yaml
index 44580b43..9ecc2251 100644
--- a/environments/major-upgrade-composable-steps.yaml
+++ b/environments/major-upgrade-composable-steps.yaml
@@ -2,3 +2,14 @@ resource_registry:
OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
parameter_defaults:
UpgradeLevelNovaCompute: auto
+ UpgradeInitCommonCommand: |
+ #!/bin/bash
+ # Newton to Ocata, we need to remove old hiera hook data and
+ # install ansible heat agents and ansible-pacemaker
+ set -eu
+ yum install -y openstack-heat-agents
+ yum install -y python-heat-agent-*
+ yum install -y ansible-pacemaker
+ rm -f /usr/libexec/os-apply-config/templates/etc/puppet/hiera.yaml
+ rm -f /usr/libexec/os-refresh-config/configure.d/40-hiera-datafiles
+ rm -f /etc/puppet/hieradata/*.yaml
diff --git a/environments/major-upgrade-converge-docker.yaml b/environments/major-upgrade-converge-docker.yaml
new file mode 100644
index 00000000..463206f1
--- /dev/null
+++ b/environments/major-upgrade-converge-docker.yaml
@@ -0,0 +1,7 @@
+# Use this to reset any mappings only used for upgrades after the
+# update of all nodes is completed
+resource_registry:
+ OS::TripleO::PostDeploySteps: ../docker/post.yaml
+parameter_defaults:
+ UpgradeLevelNovaCompute: ''
+ UpgradeInitCommonCommand: ''
diff --git a/environments/major-upgrade-converge.yaml b/environments/major-upgrade-converge.yaml
index e3c0e531..f09fb20e 100644
--- a/environments/major-upgrade-converge.yaml
+++ b/environments/major-upgrade-converge.yaml
@@ -4,3 +4,4 @@ resource_registry:
OS::TripleO::PostDeploySteps: ../puppet/post.yaml
parameter_defaults:
UpgradeLevelNovaCompute: ''
+ UpgradeInitCommonCommand: ''
diff --git a/environments/net-bond-with-vlans-no-external.yaml b/environments/net-bond-with-vlans-no-external.yaml
index 75959a0b..cc27d4f0 100644
--- a/environments/net-bond-with-vlans-no-external.yaml
+++ b/environments/net-bond-with-vlans-no-external.yaml
@@ -20,7 +20,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-no-external.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml
-
-# NOTE: with no external interface we should be able to use the
-# default Neutron l3_agent.ini setting for the external bridge (br-ex)
-# i.e. No need to set: NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-bond-with-vlans-v6.yaml b/environments/net-bond-with-vlans-v6.yaml
index 73dda3d9..dc6fdfe3 100644
--- a/environments/net-bond-with-vlans-v6.yaml
+++ b/environments/net-bond-with-vlans-v6.yaml
@@ -12,9 +12,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-v6.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml
-
-parameter_defaults:
- # This sets 'external_network_bridge' in l3_agent.ini to an empty string
- # so that external networks act like provider bridge networks (they
- # will plug into br-int instead of br-ex)
- NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-bond-with-vlans.yaml b/environments/net-bond-with-vlans.yaml
index de8f8f74..38c31cac 100644
--- a/environments/net-bond-with-vlans.yaml
+++ b/environments/net-bond-with-vlans.yaml
@@ -11,9 +11,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml
-
-parameter_defaults:
- # This sets 'external_network_bridge' in l3_agent.ini to an empty string
- # so that external networks act like provider bridge networks (they
- # will plug into br-int instead of br-ex)
- NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-linux-bridge-with-vlans.yaml b/environments/net-single-nic-linux-bridge-with-vlans.yaml
index fd80bb9b..f34cfb92 100644
--- a/environments/net-single-nic-linux-bridge-with-vlans.yaml
+++ b/environments/net-single-nic-linux-bridge-with-vlans.yaml
@@ -11,9 +11,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
-
-parameter_defaults:
- # This sets 'external_network_bridge' in l3_agent.ini to an empty string
- # so that external networks act like provider bridge networks (they
- # will plug into br-int instead of br-ex)
- NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-with-vlans-no-external.yaml b/environments/net-single-nic-with-vlans-no-external.yaml
index c7594b32..65d38137 100644
--- a/environments/net-single-nic-with-vlans-no-external.yaml
+++ b/environments/net-single-nic-with-vlans-no-external.yaml
@@ -19,7 +19,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-no-external.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml
-
-# NOTE: with no external interface we should be able to use the
-# default Neutron l3_agent.ini setting for the external bridge (br-ex)
-# i.e. No need to set: NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-with-vlans-v6.yaml b/environments/net-single-nic-with-vlans-v6.yaml
index 8210bad3..966e5fe9 100644
--- a/environments/net-single-nic-with-vlans-v6.yaml
+++ b/environments/net-single-nic-with-vlans-v6.yaml
@@ -11,9 +11,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-v6.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml
-
-parameter_defaults:
- # This sets 'external_network_bridge' in l3_agent.ini to an empty string
- # so that external networks act like provider bridge networks (they
- # will plug into br-int instead of br-ex)
- NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-with-vlans.yaml b/environments/net-single-nic-with-vlans.yaml
index a61bc6e1..b087b3e4 100644
--- a/environments/net-single-nic-with-vlans.yaml
+++ b/environments/net-single-nic-with-vlans.yaml
@@ -11,9 +11,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml
-
-parameter_defaults:
- # This sets 'external_network_bridge' in l3_agent.ini to an empty string
- # so that external networks act like provider bridge networks (they
- # will plug into br-int instead of br-ex)
- NeutronExternalNetworkBridge: "''"
diff --git a/environments/network-environment.yaml b/environments/network-environment.yaml
index 796eb806..210b6b03 100644
--- a/environments/network-environment.yaml
+++ b/environments/network-environment.yaml
@@ -48,8 +48,6 @@ parameter_defaults:
# ManagementInterfaceDefaultRoute: 10.0.1.1
# Define the DNS servers (maximum 2) for the overcloud nodes
DnsServers: ["8.8.8.8","8.8.4.4"]
- # Set to empty string to enable multiple external networks or VLANs
- NeutronExternalNetworkBridge: "''"
# List of Neutron network types for tenant networks (will be used in order)
NeutronNetworkType: 'vxlan,vlan'
# The tunnel type for the tenant network (vxlan or gre). Set to '' to disable tunneling.
diff --git a/environments/neutron-bgpvpn.yaml b/environments/neutron-bgpvpn.yaml
new file mode 100644
index 00000000..dc6c1454
--- /dev/null
+++ b/environments/neutron-bgpvpn.yaml
@@ -0,0 +1,16 @@
+# A Heat environment file that can be used to deploy Neutron BGPVPN service
+#
+# Currently there are four types of service provider for Neutron BGPVPN
+# The default option is a dummy driver that allows to enable the API.
+# In order to enable other backend, replace the content of BgpvpnServiceProvider
+#
+# - Bagpipe: BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.service_drivers.bagpipe.bagpipe.BaGPipeBGPVPNDriver:default
+# - OpenContrail: BGPVPN:OpenContrail:networking_bgpvpn.neutron.services.service_drivers.opencontrail.opencontrail.OpenContrailBGPVPNDriver:default
+# - OpenDaylight: BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default
+# - Nuage: BGPVPN:Nuage:nuage_neutron.bgpvpn.services.service_drivers.driver.NuageBGPVPNDriver:default
+resource_registry:
+ OS::TripleO::Services::NeutronBgpvpnApi: ../puppet/services/neutron-bgpvpn-api.yaml
+
+parameter_defaults:
+ NeutronServicePlugins: 'networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+ BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
diff --git a/environments/neutron-opendaylight.yaml b/environments/neutron-opendaylight.yaml
index e08b2b27..ed7292b7 100644
--- a/environments/neutron-opendaylight.yaml
+++ b/environments/neutron-opendaylight.yaml
@@ -10,4 +10,4 @@ resource_registry:
parameter_defaults:
NeutronEnableForceMetadata: true
NeutronMechanismDrivers: 'opendaylight_v2'
- NeutronServicePlugins: 'odl-router_v2'
+ NeutronServicePlugins: 'odl-router_v2,trunk'
diff --git a/environments/services-docker/ironic.yaml b/environments/services-docker/ironic.yaml
new file mode 100644
index 00000000..e927ecb3
--- /dev/null
+++ b/environments/services-docker/ironic.yaml
@@ -0,0 +1,5 @@
+resource_registry:
+ OS::TripleO::Services::IronicApi: ../../docker/services/ironic-api.yaml
+ OS::TripleO::Services::IronicConductor: ../../docker/services/ironic-conductor.yaml
+ OS::TripleO::Services::IronicPxe: ../../docker/services/ironic-pxe.yaml
+ OS::TripleO::Services::NovaIronic: ../../docker/services/nova-ironic.yaml
diff --git a/environments/services-docker/mistral.yaml b/environments/services-docker/mistral.yaml
new file mode 100644
index 00000000..a215d2a0
--- /dev/null
+++ b/environments/services-docker/mistral.yaml
@@ -0,0 +1,4 @@
+resource_registry:
+ OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml
+ OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml
+ OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml
diff --git a/environments/services-docker/zaqar.yaml b/environments/services-docker/zaqar.yaml
new file mode 100644
index 00000000..ca0b3b15
--- /dev/null
+++ b/environments/services-docker/zaqar.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
diff --git a/environments/services/panko.yaml b/environments/services/panko.yaml
deleted file mode 100644
index 28bf99f6..00000000
--- a/environments/services/panko.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-resource_registry:
- OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
diff --git a/environments/services/vpp.yaml b/environments/services/vpp.yaml
new file mode 100644
index 00000000..9bad70f8
--- /dev/null
+++ b/environments/services/vpp.yaml
@@ -0,0 +1,9 @@
+resource_registry:
+ OS::TripleO::Services::Vpp: ../../puppet/services/vpp.yaml
+
+#parameter_defaults:
+ #VPP main thread core pinning
+ #VppCpuMainCore: '1'
+
+ #List of cores for VPP worker thread pinning
+ #VppCpuCorelistWorkers: ['3','4']
diff --git a/environments/undercloud.yaml b/environments/undercloud.yaml
index 0fd01920..2540fbe5 100644
--- a/environments/undercloud.yaml
+++ b/environments/undercloud.yaml
@@ -16,3 +16,4 @@ parameter_defaults:
NeutronDhcpAgentsPerNetwork: 2
HeatConvergenceEngine: false
HeatMaxResourcesPerStack: -1
+ HeatMaxJsonBodySize: 2097152
diff --git a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
index c388358a..24557517 100644
--- a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
@@ -21,3 +21,7 @@ parameter_defaults:
rhel_reg_type: ""
rhel_reg_method: ""
rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms"
+ rhel_reg_http_proxy_host: ""
+ rhel_reg_http_proxy_port: ""
+ rhel_reg_http_proxy_username: ""
+ rhel_reg_http_proxy_password: ""
diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
index fdf2e957..e8316c53 100644
--- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
@@ -45,6 +45,14 @@ parameters:
type: string
rhel_reg_sat_repo:
type: string
+ rhel_reg_http_proxy_host:
+ type: string
+ rhel_reg_http_proxy_port:
+ type: string
+ rhel_reg_http_proxy_username:
+ type: string
+ rhel_reg_http_proxy_password:
+ type: string
resources:
@@ -71,6 +79,10 @@ resources:
- name: REG_TYPE
- name: REG_METHOD
- name: REG_SAT_REPO
+ - name: REG_HTTP_PROXY_HOST
+ - name: REG_HTTP_PROXY_PORT
+ - name: REG_HTTP_PROXY_USERNAME
+ - name: REG_HTTP_PROXY_PASSWORD
config: {get_file: scripts/rhel-registration}
RHELRegistrationDeployment:
@@ -99,6 +111,10 @@ resources:
REG_TYPE: {get_param: rhel_reg_type}
REG_METHOD: {get_param: rhel_reg_method}
REG_SAT_REPO: {get_param: rhel_reg_sat_repo}
+ REG_HTTP_PROXY_HOST: {get_param: rhel_reg_http_proxy_host}
+ REG_HTTP_PROXY_PORT: {get_param: rhel_reg_http_proxy_port}
+ REG_HTTP_PROXY_USERNAME: {get_param: rhel_reg_http_proxy_username}
+ REG_HTTP_PROXY_PASSWORD: {get_param: rhel_reg_http_proxy_password}
RHELUnregistration:
type: OS::Heat::SoftwareConfig
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
index 2650a967..0d0fa3f1 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
@@ -11,12 +11,20 @@ if [ -e $OK ] ; then
exit 0
fi
-retryCount=0
+retry_max_count=10
opts=
+config_opts=
attach_opts=
sat5_opts=
repos="repos --enable rhel-7-server-rpms"
satellite_repo=${REG_SAT_REPO}
+proxy_host=
+proxy_port=
+proxy_url=
+proxy_username=
+proxy_password=
+
+# process variables..
if [ -n "${REG_AUTO_ATTACH:-}" ]; then
opts="$opts --auto-attach"
@@ -97,28 +105,93 @@ if [ -n "${REG_TYPE:-}" ]; then
opts="$opts --type=$REG_TYPE"
fi
-function retry() {
- if [[ $retryCount < 3 ]]; then
- $@
- if ! [[ $? == 0 ]]; then
- retryCount=$(echo $retryCount + 1 | bc)
- echo "WARN: Failed to connect when running '$@', retrying..."
- retry $@
+# Proxy settings (host and port)
+if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then
+ proxy_host="${REG_HTTP_PROXY_HOST}"
+fi
+
+if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then
+ proxy_port="${REG_HTTP_PROXY_PORT}"
+fi
+
+# Proxy settings (user and password)
+if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then
+ proxy_username="${REG_HTTP_PROXY_USERNAME}"
+fi
+
+if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
+ proxy_password="${REG_HTTP_PROXY_PASSWORD}"
+fi
+
+# Sanity Checks for proxy host/port/user/password
+if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then
+ if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then
+ # Good both values are not empty
+ proxy_url="http://${proxy_host}:${proxy_port}"
+ config_opts="--server.proxy_hostname=${proxy_host} --server.proxy_port=${proxy_port}"
+ sat5_opts="${sat5_opts} --proxy_hostname=${proxy_url}"
+ echo "RHSM Proxy set to: ${proxy_url}"
+ if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then
+ if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
+ config_opts="${config_opts} --server.proxy_user=${proxy_username} --server.proxy_password=${proxy_password}"
+ sat5_opts="${sat5_opts} --proxyUser=${proxy_username} --proxyPassword=${proxy_password}"
+ else
+ echo "Warning: REG_HTTP_PROXY_PASSWORD cannot be null with non-empty REG_HTTP_PROXY_USERNAME! Skipping..."
+ proxy_username= ; proxy_password=
+ fi
+ else
+ if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
+ echo "Warning: REG_HTTP_PROXY_USERNAME cannot be null with non-empty REG_HTTP_PROXY_PASSWORD! Skipping..."
+ proxy_username= ; proxy_password=
+ fi
+ fi
else
- retryCount=0
+ echo "Warning: REG_HTTP_PROXY_PORT cannot be null with non-empty REG_HTTP_PROXY_HOST! Skipping..."
+ proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password=
fi
- else
- echo "ERROR: Failed to connect after 3 attempts when running '$@'"
- exit 1
- fi
+else
+ if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then
+ echo "Warning: REG_HTTP_PROXY_HOST cannot be null with non-empty REG_HTTP_PROXY_PORT! Skipping..."
+ proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password=
+ fi
+fi
+
+function retry() {
+ # Inhibit -e since we want to retry without exiting..
+ set +e
+ # Retry delay (seconds)
+ retry_delay=2.0
+ retry_count=0
+ mycli="$@"
+ while [ $retry_count -lt ${retry_max_count} ]
+ do
+ echo "INFO: Sleeping ${retry_delay} ..."
+ sleep ${retry_delay}
+ echo "INFO: Executing '${mycli}' ..."
+ ${mycli}
+ if [ $? -eq 0 ]; then
+ echo "INFO: Ran '${mycli}' successfully, not retrying..."
+ break
+ else
+ echo "WARN: Failed to connect when running '${mycli}', retrying (attempt #$retry_count )..."
+ retry_count=$(echo $retry_count + 1 | bc)
+ fi
+ done
+
+ if [ $retry_count -ge ${retry_max_count} ]; then
+ echo "ERROR: Failed to connect after ${retry_max_count} attempts when running '${mycli}'"
+ exit 1
+ fi
+ # Re-enable -e when exiting retry()
+ set -e
}
function detect_satellite_version {
ping_api=$REG_SAT_URL/katello/api/ping
- if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
+ if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
echo Satellite 6 detected at $REG_SAT_URL
satellite_version=6
- elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
echo Satellite 5 detected at $REG_SAT_URL
satellite_version=5
else
@@ -127,20 +200,41 @@ function detect_satellite_version {
fi
}
+if [ "x${proxy_url}" != "x" ];then
+ # Config subscription-manager for proxy
+ subscription-manager config ${config_opts}
+
+ # Config yum for proxy..
+ sed -i -e '/^proxy=/d' /etc/yum.conf
+ echo "proxy=${proxy_url}" >> /etc/yum.conf
+
+ # Handle optional username/password
+ if [ -n "${proxy_username}" ]; then
+ sed -i -e '/^proxy_username=/d' /etc/yum.conf
+ echo "proxy_username=${proxy_username}" >> /etc/yum.conf
+ fi
+
+ if [ -n "${proxy_password}" ]; then
+ sed -i -e '/^proxy_password=/d' /etc/yum.conf
+ echo "proxy_password=${proxy_password}" >> /etc/yum.conf
+ fi
+
+fi
+
case "${REG_METHOD:-}" in
portal)
retry subscription-manager register $opts
if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then
retry subscription-manager attach $attach_opts
fi
- retry subscription-manager repos --disable '*'
+ retry subscription-manager repos --disable='*'
retry subscription-manager $repos
;;
satellite)
detect_satellite_version
if [ "$satellite_version" = "6" ]; then
repos="$repos --enable ${satellite_repo}"
- curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
retry subscription-manager register $opts
retry subscription-manager $repos
@@ -149,7 +243,7 @@ case "${REG_METHOD:-}" in
retry subscription-manager repos --disable ${satellite_repo}
else
pushd /usr/share/rhn/
- curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
popd
retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
fi
diff --git a/extraconfig/tasks/aodh_data_migration.sh b/extraconfig/tasks/aodh_data_migration.sh
deleted file mode 100644
index d4c29673..00000000
--- a/extraconfig/tasks/aodh_data_migration.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# This delivers the aodh data migration script to be invoked as part of the tripleo
-# major upgrade workflow to migrate all the alarm data from mongodb to mysql.
-# This needs to run post controller node upgrades so new aodh mysql db configured and
-# running.
-#
-set -eu
-
-#Get existing mongodb connection
-MONGO_DB_CONNECTION="$(crudini --get /etc/ceilometer/ceilometer.conf database connection)"
-
-# Get the aodh database string from hiera data
-MYSQL_DB_CONNECTION="$(crudini --get /etc/aodh/aodh.conf database connection)"
-
-#Run migration
-/usr/bin/aodh-data-migration --nosql-conn $MONGO_DB_CONNECTION --sql-conn $MYSQL_DB_CONNECTION
-
-
diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
deleted file mode 100644
index cf5d7a84..00000000
--- a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Software-config for ceilometer configuration under httpd during upgrades
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
- CeilometerWsgiMitakaNewtonPreUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- config:
- get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp
-
- CeilometerWsgiMitakaNewtonUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - "#!/bin/bash\n\nset -e\n\n"
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - "disable_standalone_ceilometer_api\n\n"
-
- CeilometerWsgiMitakaNewtonPostUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: |
- #!/bin/bash
- set -e
- /usr/bin/systemctl reload httpd
-
- CeilometerWsgiMitakaNewtonPreUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonUpgradeConfigDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonPostUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig}
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
deleted file mode 100755
index 8bdff5e7..00000000
--- a/extraconfig/tasks/major_upgrade_check.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster()
-{
- if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
- fi
-}
-
-check_pcsd()
-{
- if pcs status 2>&1 | grep -E 'Offline'; then
- echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
- exit 1
- fi
-}
-
-mysql_need_update()
-{
- # Shall we upgrade mysql data directory during the stack upgrade?
- if [ "$mariadb_do_major_upgrade" = "auto" ]; then
- ret=$(is_mysql_upgrade_needed)
- if [ $ret = "1" ]; then
- DO_MYSQL_UPGRADE=1
- else
- DO_MYSQL_UPGRADE=0
- fi
- echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
- elif [ "$mariadb_do_major_upgrade" = "no" ]; then
- DO_MYSQL_UPGRADE=0
- else
- DO_MYSQL_UPGRADE=1
- fi
-}
-
-check_disk_for_mysql_dump()
-{
- # Where to backup current database if mysql need to be upgraded
- MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
- MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
- # Spare disk ratio for extra safety
- MYSQL_BACKUP_SIZE_RATIO=1.2
-
- mysql_need_update
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-
- if [ -d "$MYSQL_BACKUP_DIR" ]; then
- echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
- exit 1
- fi
- mkdir "$MYSQL_BACKUP_DIR"
- if [ $? -ne 0 ]; then
- echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
- exit 1
- fi
-
- # the /root/.my.cnf is needed because we set the mysql root
- # password from liberty onwards
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- # While not ideal, this step allows us to calculate exactly how much space the dump
- # will need. Our main goal here is avoiding any chance of corruption due to disk space
- # exhaustion
- backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
- database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
- free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
- # we need at least space for a new mysql database + dump of the existing one,
- # times a small factor for additional safety room
- # note: bash doesn't do floating point math or floats in if statements,
- # so use python to apply the ratio and cast it back to integer
- required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
- if [ $required_space -ge $free_space ]; then
- echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
- exit 1
- fi
- fi
- fi
-}
-
-check_python_rpm()
-{
- # If for some reason rpm-python are missing we want to error out early enough
- if ! rpm -q rpm-python &> /dev/null; then
- echo_error "ERROR: upgrade cannot start without rpm-python installed"
- exit 1
- fi
-}
-
-check_clean_cluster()
-{
- if pcs status | grep -q Stopped:; then
- echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running."
- exit 1
- fi
-}
-
-check_galera_root_password()
-{
- # BZ: 1357112
- if [ ! -e /root/.my.cnf ]; then
- echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
- exit 1
- fi
-}
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
deleted file mode 100755
index 080831ab..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster
-check_pcsd
-if [[ -n $(is_bootstrap_node) ]]; then
- check_clean_cluster
-fi
-check_python_rpm
-check_galera_root_password
-check_disk_for_mysql_dump
-
-# We want to disable fencing during the cluster --stop as it might fence
-# nodes where a service fails to stop, which could be fatal during an upgrade
-# procedure. So we remember the stonith state. If it was enabled we reenable it
-# at the end of this script
-if [[ -n $(is_bootstrap_node) ]]; then
- STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
- # We create this empty file if stonith was set to true so we can reenable stonith in step2
- rm -f /var/tmp/stonith-true
- if [ $STONITH_STATE == "true" ]; then
- touch /var/tmp/stonith-true
- fi
- pcs property set stonith-enabled=false
-fi
-
-# Migrate to HA NG and fix up rabbitmq queues
-# We fix up the rabbitmq ha queues after the migration because it will
-# restart the rabbitmq resource. Doing it after the migration means no other
-# services will be restart as there are no other constraints
-if [[ -n $(is_bootstrap_node) ]]; then
- migrate_full_to_ng_ha
- rabbitmq_newton_ocata_upgrade
-fi
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
deleted file mode 100755
index 6bfe1239..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_sync_timeout=1800
-
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
- manage_systemd_service stop "${service%%-clone}"
- # So the reason for not reusing check_resource_systemd is that
- # I have observed systemctl is-active returning unknown with at least
- # one service that was stopped (See LP 1627254)
- timeout=600
- tstart=$(date +%s)
- tend=$(( $tstart + $timeout ))
- check_interval=3
- while (( $(date +%s) < $tend )); do
- if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
- echo "$service still active, sleeping $check_interval seconds."
- sleep $check_interval
- else
- # we do not care if it is inactive, unknown or failed as long as it is
- # not running
- break
- fi
-
- done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
-# to support specific upgrade scenario
-
-# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
-# later
-mysql_need_update
-
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
- cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
- fi
-
- pcs resource disable redis
- check_resource redis stopped 600
- pcs resource disable rabbitmq
- check_resource rabbitmq stopped 600
- pcs resource disable galera
- check_resource galera stopped 600
- pcs resource disable openstack-cinder-volume
- check_resource openstack-cinder-volume stopped 600
- # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
- pcs resource disable $vip
- check_resource $vip stopped 60
- done
- pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_sync_timeout )) ; then
- echo_error "ERROR: cluster shutdown timed out"
- exit 1
- fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
- echo_error "ERROR: mysql backup dir already exist"
- exit 1
- fi
- mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-special_case_ovs_upgrade_if_needed
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- # Scripts run via heat have no HOME variable set and this confuses
- # mysqladmin
- export HOME=/root
-
- mkdir /var/lib/mysql || /bin/true
- chown mysql:mysql /var/lib/mysql
- chmod 0755 /var/lib/mysql
- restorecon -R /var/lib/mysql/
- mysql_install_db --datadir=/var/lib/mysql --user=mysql
- chown -R mysql:mysql /var/lib/mysql/
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- mysqld_safe --wsrep-new-cluster &
- # We have a populated /root/.my.cnf with root/password here so
- # we need to temporarily rename it because the newly created
- # db is empty and no root password is set
- mv /root/.my.cnf /root/.my.cnf.temporary
- timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
- mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
- mv /root/.my.cnf.temporary /root/.my.cnf
- mysqladmin -u root shutdown
- # The import was successful so we may remove the folder
- rm -r "$MYSQL_BACKUP_DIR"
- fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller. Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ -f /var/tmp/stonith-true ]; then
- pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
- fi
- rm -f /var/tmp/stonith-true
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
deleted file mode 100755
index a3cbd945..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- pcs cluster start --all
-
- tstart=$(date +%s)
- while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_form_timeout )) ; then
- echo_error "ERROR: timed out forming the cluster"
- exit 1
- fi
- done
-
- if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
- echo_error "ERROR: timed out waiting for cluster to finish transition"
- exit 1
- fi
-
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
- pcs resource enable $vip
- check_resource_pacemaker $vip started 60
- done
-fi
-
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo_error "ERROR galera sync timed out"
- exit 1
- fi
- done
-
- # Run all the db syncs
- # TODO: check if this can be triggered in puppet and removed from here
- ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
- cinder-manage db sync
- glance-manage db_sync
- heat-manage --config-file /etc/heat/heat.conf db_sync
- keystone-manage db_sync
- neutron-db-manage upgrade heads
- nova-manage db sync
- nova-manage api_db sync
- nova-manage db online_data_migrations
- sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
deleted file mode 100755
index d2cb9553..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
-start_or_enable_service redis
-check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
-
-# start httpd so keystone is available for gnocchi
-# upgrade to run.
-systemctl start httpd
-
-# Swift isn't controled by pacemaker
-systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
deleted file mode 100755
index fa95f1f8..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-if [[ -n $(is_bootstrap_node) ]]; then
- # run gnocchi upgrade
- gnocchi-upgrade
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
deleted file mode 100755
index d569084d..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
- services=${services%%openstack-sahara*}
-fi
-for service in $services; do
- manage_systemd_service start "${service%%-clone}"
- check_resource_systemd "${service%%-clone}" started 600
-done
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
deleted file mode 100644
index 8c91027d..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ /dev/null
@@ -1,179 +0,0 @@
-heat_template_version: ocata
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-
- UpgradeLevelNovaCompute:
- type: string
- description: Nova Compute upgrade level
- default: ''
- MySqlMajorUpgrade:
- type: string
- description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade
- constraints:
- - allowed_values: ['auto', 'yes', 'no']
- default: 'auto'
- IgnoreCephUpgradeWarnings:
- type: boolean
- default: false
- description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean
- KeepSaharaServicesOnUpgrade:
- type: boolean
- default: true
- description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton
-
-
-resources:
- # TODO(jistr): for Mitaka->Newton upgrades and further we can use
- # map_merge with input_values instead of feeding params into scripts
- # via str_replace on bash snippets
-
- ControllerPacemakerUpgradeConfig_Step1:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_1.sh
-
- ControllerPacemakerUpgradeDeployment_Step1:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step2:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_2.sh
-
- ControllerPacemakerUpgradeDeployment_Step2:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step1
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step3:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_3.sh
-
- ControllerPacemakerUpgradeDeployment_Step3:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step2
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step4:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_4.sh
-
- ControllerPacemakerUpgradeDeployment_Step4:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step3
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step4}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step5:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_5.sh
-
- ControllerPacemakerUpgradeDeployment_Step5:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step4
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step6:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE'
- params:
- KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_6.sh
-
- ControllerPacemakerUpgradeDeployment_Step6:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step5
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
- input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
deleted file mode 100644
index ae22a1e7..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/bin/bash
-
-# Special pieces of upgrade migration logic go into this
-# file. E.g. Pacemaker cluster transitions for existing deployments,
-# matching changes to overcloud_controller_pacemaker.pp (Puppet
-# handles deployment, this file handles migrations).
-#
-# This file shouldn't execute any action on its own, all logic should
-# be wrapped into bash functions. Upgrade scripts will source this
-# file and call the functions defined in this file where appropriate.
-#
-# The migration functions should be idempotent. If the migration has
-# been already applied, it should be possible to call the function
-# again without damaging the deployment or failing the upgrade.
-
-# If the major version of mysql is going to change after the major
-# upgrade, the database must be upgraded on disk to avoid failures
-# due to internal incompatibilities between major mysql versions
-# https://bugs.launchpad.net/tripleo/+bug/1587449
-# This function detects whether a database upgrade is required
-# after a mysql package upgrade. It returns 0 when no major upgrade
-# has to take place, 1 otherwise.
-function is_mysql_upgrade_needed {
- # The name of the package which provides mysql might differ
- # after the upgrade. Consider the generic package name, which
- # should capture the major version change (e.g. 5.5 -> 10.1)
- local name="mariadb"
- local output
- local ret
- set +e
- output=$(yum -q check-update $name)
- ret=$?
- set -e
- if [ $ret -ne 100 ]; then
- # no updates so we exit
- echo "0"
- return
- fi
-
- local currentepoch=$(rpm -q --qf "%{epoch}" $name)
- local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2)
- local currentrelease=$(rpm -q --qf "%{release}" $name)
- local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name)
- local newepoch=$(echo "$newoutput" | awk '{ print $1 }')
- local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2)
- local newrelease=$(echo "$newoutput" | awk '{ print $3 }')
-
- # With this we trigger the dump restore/path if we change either epoch or
- # version in the package If only the release tag changes we do not do it
- # FIXME: we could refine this by trying to parse the mariadb version
- # into X.Y.Z and trigger the update only if X and/or Y change.
- output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc")
- if [ "$output" != "-1" ]; then
- echo "0"
- return
- fi
- echo "1"
-}
-
-# This function returns the list of services to be migrated away from pacemaker
-# and to systemd. The reason to have these services in a separate function is because
-# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
-# and in the function to migrate the cluster from full HA to HA NG
-function services_to_migrate {
- # The following PCMK resources the ones the we are going to delete
- PCMK_RESOURCE_TODELETE="
- httpd-clone
- memcached-clone
- mongod-clone
- neutron-dhcp-agent-clone
- neutron-l3-agent-clone
- neutron-metadata-agent-clone
- neutron-netns-cleanup-clone
- neutron-openvswitch-agent-clone
- neutron-ovs-cleanup-clone
- neutron-server-clone
- openstack-aodh-evaluator-clone
- openstack-aodh-listener-clone
- openstack-aodh-notifier-clone
- openstack-ceilometer-central-clone
- openstack-ceilometer-collector-clone
- openstack-ceilometer-notification-clone
- openstack-cinder-api-clone
- openstack-cinder-scheduler-clone
- openstack-glance-api-clone
- openstack-gnocchi-metricd-clone
- openstack-gnocchi-statsd-clone
- openstack-heat-api-cfn-clone
- openstack-heat-api-clone
- openstack-heat-api-cloudwatch-clone
- openstack-heat-engine-clone
- openstack-nova-api-clone
- openstack-nova-conductor-clone
- openstack-nova-consoleauth-clone
- openstack-nova-novncproxy-clone
- openstack-nova-scheduler-clone
- openstack-sahara-api-clone
- openstack-sahara-engine-clone
- "
- echo $PCMK_RESOURCE_TODELETE
-}
-
-# This function will migrate a mitaka system where all the resources are managed
-# via pacemaker to a newton setup where only a few services will be managed by pacemaker
-# On a high-level it will operate as follows:
-# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
-# during the conversion
-# 2. Remove all the colocation constraints and then the ordering constraints, except the
-# ones related to haproxy/VIPs which exist in Newton as well
-# 3. Take the cluster out of maintenance-mode
-# 4. Remove all the resources that won't be managed by pacemaker in newton. The
-# outcome will be
-# that they are stopped and removed from pacemakers control
-# 5. Do a resource cleanup to make sure the cluster is in a clean state
-function migrate_full_to_ng_ha {
- if [[ -n $(pcmk_running) ]]; then
- pcs property set maintenance-mode=true
-
- # First we go through all the colocation constraints (except the ones
- # we want to keep, i.e. the haproxy/ip ones) and we remove those
- COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $COL_CONSTRAINTS; do
- log_debug "Deleting colocation constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
-
- # Now we kill all the ordering constraints (except the haproxy/ip ones)
- ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $ORD_CONSTRAINTS; do
- log_debug "Deleting ordering constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
- # At this stage all the pacemaker resources are removed from the CIB.
- # Once we remove the maintenance-mode those systemd resources will keep
- # on running. They shall be systemd enabled via the puppet converge
- # step later on
- pcs property set maintenance-mode=false
-
- # At this stage there are no constraints whatsoever except the haproxy/ip ones
- # which we want to keep. We now disable and then delete each resource
- # that will move to systemd.
- # We want the systemd resources be stopped before doing "yum update",
- # that way "systemctl try-restart <service>" is no-op because the
- # service was down already
- PCS_STATUS_OUTPUT="$(pcs status)"
- for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
- if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
- log_debug "Deleting $resource from the CIB"
- if ! pcs resource disable "$resource" --wait=600; then
- echo_error "ERROR: resource $resource failed to be disabled"
- exit 1
- fi
- pcs resource delete --force "$resource"
- else
- log_debug "Service $resource not found as a pacemaker resource, not trying to delete."
- fi
- done
-
- # We need to do a pcs resource cleanup here + crm_resource --wait to
- # make sure the cluster is in a clean state before we stop everything,
- # upgrade and restart everything
- pcs resource cleanup
- # We are making sure here that the cluster is stable before proceeding
- if ! timeout -k 10 600 crm_resource --wait; then
- echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
- exit 1
- fi
- fi
-}
-
-function disable_standalone_ceilometer_api {
- if [[ -n $(is_bootstrap_node) ]]; then
- if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then
- # Disable pacemaker resources for ceilometer-api
- manage_pacemaker_service disable openstack-ceilometer-api
- check_resource_pacemaker openstack-ceilometer-api stopped 600
- pcs resource delete openstack-ceilometer-api --wait=600
- fi
- fi
-}
-
-
-# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
-# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
-# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
-# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
-# Note that changing an attribute like this makes the rabbitmq resource restart
-function rabbitmq_newton_ocata_upgrade {
- if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
- # Number of controller is obtained by counting how many hostnames we
- # have in controller_node_names hiera key
- nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
- nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
- if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
- echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
- exit 1
- fi
- pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
- fi
-}
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
deleted file mode 100644
index 45933fb7..00000000
--- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Software-config for performing aodh data migration
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
-
- AodhMysqlMigrationScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: aodh_data_migration.sh}
-
- AodhMysqlMigrationScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: AodhMysqlMigrationScriptConfig}
- input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
deleted file mode 100644
index a8d43663..00000000
--- a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This puppet manifest is to be used only during a Mitaka->Newton upgrade
-# It configures ceilometer to be run under httpd but it makes sure to not
-# restart any services. This snippet needs to be called before init as a
-# pre upgrade migration.
-
-Service <|
- tag == 'ceilometer-service'
-|> {
- hasrestart => true,
- restart => '/bin/true',
- start => '/bin/true',
- stop => '/bin/true',
-}
-
-if $::hostname == downcase(hiera('bootstrap_nodeid')) {
- $pacemaker_master = true
- $sync_db = true
-} else {
- $pacemaker_master = false
- $sync_db = false
-}
-
-include ::tripleo::packages
-
-
-if str2bool(hiera('mongodb::server::ipv6', false)) {
- $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[')
- $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
-} else {
- $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017')
-}
-$mongodb_replset = hiera('mongodb::server::replset')
-$mongo_node_string = join($mongo_node_ips_with_port, ',')
-$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
-
-$rabbit_hosts = hiera('rabbitmq_node_ips', undef)
-$rabbit_port = hiera('ceilometer::rabbit_port', 5672)
-$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}")
-
-class { '::ceilometer' :
- rabbit_hosts => $rabbit_endpoints,
-}
-
-class {'::ceilometer::db':
- database_connection => $database_connection,
-}
-
-if $sync_db {
- include ::ceilometer::db::sync
-}
-
-include ::ceilometer::config
-
-class { '::ceilometer::api':
- enabled => true,
- service_name => 'httpd',
- keystone_password => hiera('ceilometer::keystone::auth::password'),
- identity_uri => hiera('ceilometer::keystone::authtoken::auth_url'),
- auth_uri => hiera('ceilometer::keystone::authtoken::auth_uri'),
- keystone_tenant => hiera('ceilometer::keystone::authtoken::project_name'),
-}
-
-class { '::apache' :
- service_enable => false,
- service_manage => true,
- service_restart => '/bin/true',
- purge_configs => false,
- purge_vhost_dir => false,
-}
-
-# To ensure existing ports are not overridden
-class { '::aodh::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::gnocchi::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-
-class { '::keystone::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::ceilometer::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
diff --git a/extraconfig/tasks/run_puppet.sh b/extraconfig/tasks/run_puppet.sh
new file mode 100755
index 00000000..b7771e33
--- /dev/null
+++ b/extraconfig/tasks/run_puppet.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+function run_puppet {
+ set -eux
+ local manifest="$1"
+ local role="$2"
+ local step="$3"
+ local rc=0
+
+ export FACTER_deploy_config_name="${role}Deployment_Step${step}"
+ if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then
+ set +e
+ puppet apply --detailed-exitcodes "${manifest}"
+ rc=$?
+ echo "puppet apply exited with exit code $rc"
+ else
+ echo "Step${step} doesn't exist for ${role}"
+ fi
+ set -e
+
+ if [ $rc -eq 2 -o $rc -eq 0 ]; then
+ set +xu
+ return 0
+ fi
+ set +xu
+ return $rc
+}
diff --git a/extraconfig/tasks/swift-ring-deploy.yaml b/extraconfig/tasks/swift-ring-deploy.yaml
new file mode 100644
index 00000000..d17f78ae
--- /dev/null
+++ b/extraconfig/tasks/swift-ring-deploy.yaml
@@ -0,0 +1,31 @@
+heat_template_version: ocata
+
+parameters:
+ servers:
+ type: json
+ SwiftRingGetTempurl:
+ default: ''
+ description: A temporary Swift URL to download rings from.
+ type: string
+
+resources:
+ SwiftRingDeployConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: swift_ring_get_tempurl
+ config: |
+ #!/bin/sh
+ pushd /
+ curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true
+ popd
+
+ SwiftRingDeploy:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ name: SwiftRingDeploy
+ config: {get_resource: SwiftRingDeployConfig}
+ servers: {get_param: servers}
+ input_values:
+ swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
diff --git a/extraconfig/tasks/swift-ring-update.yaml b/extraconfig/tasks/swift-ring-update.yaml
new file mode 100644
index 00000000..440c6883
--- /dev/null
+++ b/extraconfig/tasks/swift-ring-update.yaml
@@ -0,0 +1,42 @@
+heat_template_version: ocata
+
+parameters:
+ servers:
+ type: json
+ SwiftRingPutTempurl:
+ default: ''
+ description: A temporary Swift URL to upload rings to.
+ type: string
+
+resources:
+ SwiftRingUpdateConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: swift_ring_put_tempurl
+ config: |
+ #!/bin/sh
+ TMP_DATA=$(mktemp -d)
+ function cleanup {
+ rm -Rf "$TMP_DATA"
+ }
+ trap cleanup EXIT
+ # sanity check in case rings are not consistent within cluster
+ swift-recon --md5 | grep -q "doesn't match" && exit 1
+ pushd ${TMP_DATA}
+ tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/*
+ resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz`
+ popd
+ if [ "$resp" != "201" ]; then
+ exit 1
+ fi
+
+ SwiftRingUpdate:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ name: SwiftRingUpdate
+ config: {get_resource: SwiftRingUpdateConfig}
+ servers: {get_param: servers}
+ input_values:
+ swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh
index 27ba33a8..24211ab0 100644
--- a/extraconfig/tasks/tripleo_upgrade_node.sh
+++ b/extraconfig/tasks/tripleo_upgrade_node.sh
@@ -15,9 +15,13 @@ cat > $UPGRADE_SCRIPT << ENDOFCAT
set -eu
NOVA_COMPUTE=""
-if systemctl show 'openstack-nova-compute' --property ActiveState | grep '\bactive\b'; then
+if hiera -c /etc/puppet/hiera.yaml service_names | grep nova_compute ; then
NOVA_COMPUTE="true"
fi
+SWIFT_STORAGE=""
+if hiera -c /etc/puppet/hiera.yaml service_names | grep swift_storage ; then
+ SWIFT_STORAGE="true"
+fi
DEBUG="true"
SCRIPT_NAME=$(basename $0)
@@ -30,22 +34,33 @@ if [[ -n \$NOVA_COMPUTE ]]; then
crudini --set /etc/nova/nova.conf upgrade_levels compute auto
fi
-$(declare -f special_case_ovs_upgrade_if_needed)
-special_case_ovs_upgrade_if_needed
+if [[ -n \$SWIFT_STORAGE ]]; then
+ systemctl_swift stop
+fi
-yum -y install python-zaqarclient # needed for os-collect-config
-systemctl_swift stop
yum -y update
-systemctl_swift start
+if [[ -n \$SWIFT_STORAGE ]]; then
+ systemctl_swift start
+fi
# Due to bug#1640177 we need to restart compute agent
if [[ -n \$NOVA_COMPUTE ]]; then
- echo "Restarting openstack ceilometer agent compute"
+ log_debug "Restarting openstack ceilometer agent compute"
systemctl restart openstack-ceilometer-compute
fi
-# Apply puppet manifest to converge just right after the \$ROLE upgrade
-puppet apply /root/${ROLE}_puppet_config.pp
+# Apply puppet manifest to converge just right after the ${ROLE} upgrade
+$(declare -f run_puppet)
+for step in 1 2 3 4 5 6; do
+ log_debug "Running puppet step \$step for ${ROLE}"
+ if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then
+ log_debug "Puppet failure at step \${step}"
+ exit 1
+ fi
+ log_debug "Completed puppet step \$step"
+done
+
+log_debug "TripleO upgrade run completed."
ENDOFCAT
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index c66dd01f..3bf72f14 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -47,7 +47,10 @@ if [[ "$list_updates" == "" ]]; then
exit 0
fi
-pacemaker_status=$(systemctl is-active pacemaker || :)
+pacemaker_status=""
+if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then
+ pacemaker_status=$(systemctl is-active pacemaker)
+fi
# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455
# and https://bugs.launchpad.net/tripleo/+bug/1634851
@@ -67,9 +70,6 @@ if [[ "$pacemaker_status" == "active" && \
fi
fi
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-special_case_ovs_upgrade_if_needed
-
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Pacemaker running, stopping cluster node and doing full package update"
node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")
@@ -97,17 +97,6 @@ return_code=$?
echo "$result"
echo "yum return code: $return_code"
-# Writes any changes caused by alterations to os-net-config and bounces the
-# interfaces *before* restarting the cluster.
-os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
-RETVAL=$?
-if [[ $RETVAL == 2 ]]; then
- echo "os-net-config: interface configuration files updated successfully"
-elif [[ $RETVAL != 0 ]]; then
- echo "ERROR: os-net-config configuration failed"
- exit $RETVAL
-fi
-
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Starting cluster node"
pcs cluster start
diff --git a/firstboot/os-net-config-mappings.yaml b/firstboot/os-net-config-mappings.yaml
index d7e0c524..f82bc19f 100644
--- a/firstboot/os-net-config-mappings.yaml
+++ b/firstboot/os-net-config-mappings.yaml
@@ -9,8 +9,28 @@ description: >
nic1: "00:c8:7c:e6:f0:2e"
node2:
nic1: "00:18:7d:99:0c:b6"
- This will result in the first nodeN entry where a mac matches a
- local device being written as a mapping file for os-net-config in
+ node3:
+ dmiString: 'system-uuid'
+ id: 'A8C85861-1B16-4803-8689-AFC62984F8F6'
+ nic1: em3
+ # Dell PowerEdge
+ nodegroup1:
+ dmiString: "system-product-name"
+ id: "PowerEdge R630"
+ nic1: em3
+ nic2: em1
+ nic3: em2
+ # Cisco UCS B200-M4"
+ nodegroup2:
+ dmiString: "system-product-name"
+ id: "UCSB-B200-M4"
+ nic1: enp7s0
+ nic2: enp6s0
+
+ This will result in the first node* entry where either:
+ a) a mac matches a local device
+ or b) a DMI String matches the specified id
+ being written as a mapping file for os-net-config in
/etc/os-net-config/mapping.yaml
parameters:
@@ -47,15 +67,36 @@ resources:
echo '$node_lookup' | python -c "
import json
import sys
+ import copy
+ from subprocess import PIPE, Popen
import yaml
+
+ def write_mapping_file(interface_mapping):
+ with open('/etc/os-net-config/mapping.yaml', 'w') as f:
+ yaml.safe_dump(interface_mapping, f, default_flow_style=False)
+
input = sys.stdin.readline() or '{}'
data = json.loads(input)
for node in data:
+ interface_mapping = {'interface_mapping':
+ copy.deepcopy(data[node])}
+ if 'dmiString' in interface_mapping['interface_mapping']:
+ del interface_mapping['interface_mapping']['dmiString']
+ if 'id' in interface_mapping['interface_mapping']:
+ del interface_mapping['interface_mapping']['id']
+ # Match on mac addresses first
if any(x in '$eth_addr'.split(',') for x in data[node].values()):
- interface_mapping = {'interface_mapping': data[node]}
- with open('/etc/os-net-config/mapping.yaml', 'w') as f:
- yaml.safe_dump(interface_mapping, f, default_flow_style=False)
+ write_mapping_file(interface_mapping)
break
+ # If data contain dmiString and id keys, try to match node(group)
+ if 'dmiString' in data[node] and 'id' in data[node]:
+ ps = Popen([ 'dmidecode',
+ '--string', data[node].get('dmiString') ],
+ stdout=PIPE)
+ out, err = ps.communicate()
+ if data[node].get('id') == out.rstrip():
+ write_mapping_file(interface_mapping)
+ break
"
params:
$node_lookup: {get_param: NetConfigDataLookup}
diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml
index 5782bbe9..83d875e8 100644
--- a/network/ports/net_ip_list_map.yaml
+++ b/network/ports/net_ip_list_map.yaml
@@ -35,6 +35,32 @@ parameters:
default: []
type: json
+ InternalApiNetName:
+ default: internal_api
+ description: The name of the internal API network.
+ type: string
+ ExternalNetName:
+ default: external
+ description: The name of the external network.
+ type: string
+ ManagementNetName:
+ default: management
+ description: The name of the management network.
+ type: string
+ StorageNetName:
+ default: storage
+ description: The name of the storage network.
+ type: string
+ StorageMgmtNetName:
+ default: storage_mgmt
+ description: The name of the Storage management network.
+ type: string
+ TenantNetName:
+ default: tenant
+ description: The name of the tenant network.
+ type: string
+
+
resources:
# This adds the extra "services" on for keystone
# so that keystone_admin_api_network and
@@ -58,19 +84,33 @@ resources:
- keystone_admin_api
- keystone_public_api
+ NetIpMapValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - ctlplane: {get_param: ControlPlaneIpList}
+ external: {get_param: ExternalIpList}
+ internal_api: {get_param: InternalApiIpList}
+ storage: {get_param: StorageIpList}
+ storage_mgmt: {get_param: StorageMgmtIpList}
+ tenant: {get_param: TenantIpList}
+ management: {get_param: ManagementIpList}
+ - keys:
+ external: {get_param: ExternalNetName}
+ internal_api: {get_param: InternalApiNetName}
+ storage: {get_param: StorageNetName}
+ storage_mgmt: {get_param: StorageMgmtNetName}
+ tenant: {get_param: TenantNetName}
+ management: {get_param: ManagementNetName}
+
outputs:
net_ip_map:
description: >
A Hash containing a mapping of network names to assigned lists
of IP addresses.
- value:
- ctlplane: {get_param: ControlPlaneIpList}
- external: {get_param: ExternalIpList}
- internal_api: {get_param: InternalApiIpList}
- storage: {get_param: StorageIpList}
- storage_mgmt: {get_param: StorageMgmtIpList}
- tenant: {get_param: TenantIpList}
- management: {get_param: ManagementIpList}
+ value: {get_attr: [NetIpMapValue, value]}
service_ips:
description: >
Map of enabled services to a list of their IP addresses
@@ -92,14 +132,7 @@ outputs:
for_each:
SERVICE: {get_attr: [EnabledServicesValue, value]}
- values: {get_param: ServiceNetMap}
- - values:
- ctlplane: {get_param: ControlPlaneIpList}
- external: {get_param: ExternalIpList}
- internal_api: {get_param: InternalApiIpList}
- storage: {get_param: StorageIpList}
- storage_mgmt: {get_param: StorageMgmtIpList}
- tenant: {get_param: TenantIpList}
- management: {get_param: ManagementIpList}
+ - values: {get_attr: [NetIpMapValue, value]}
service_hostnames:
description: >
Map of enabled services to a list of hostnames where they're running
diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml
index c8cf733f..c974d72e 100644
--- a/network/ports/net_ip_map.yaml
+++ b/network/ports/net_ip_map.yaml
@@ -69,35 +69,136 @@ parameters:
type: string
description: IP address with brackets in case of IPv6
+ InternalApiNetName:
+ default: internal_api
+ description: The name of the internal API network.
+ type: string
+ ExternalNetName:
+ default: external
+ description: The name of the external network.
+ type: string
+ ManagementNetName:
+ default: management
+ description: The name of the management network.
+ type: string
+ StorageNetName:
+ default: storage
+ description: The name of the storage network.
+ type: string
+ StorageMgmtNetName:
+ default: storage_mgmt
+ description: The name of the Storage management network.
+ type: string
+ TenantNetName:
+ default: tenant
+ description: The name of the tenant network.
+ type: string
+
+resources:
+
+ NetIpMapValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - ctlplane: {get_param: ControlPlaneIp}
+ external: {get_param: ExternalIp}
+ internal_api: {get_param: InternalApiIp}
+ storage: {get_param: StorageIp}
+ storage_mgmt: {get_param: StorageMgmtIp}
+ tenant: {get_param: TenantIp}
+ management: {get_param: ManagementIp}
+ ctlplane_subnet:
+ list_join:
+ - ''
+ - - {get_param: ControlPlaneIp}
+ - '/'
+ - {get_param: ControlPlaneSubnetCidr}
+ external_subnet: {get_param: ExternalIpSubnet}
+ internal_api_subnet: {get_param: InternalApiIpSubnet}
+ storage_subnet: {get_param: StorageIpSubnet}
+ storage_mgmt_subnet: {get_param: StorageMgmtIpSubnet}
+ tenant_subnet: {get_param: TenantIpSubnet}
+ management_subnet: {get_param: ManagementIpSubnet}
+ ctlplane_uri: {get_param: ControlPlaneIp}
+ external_uri: {get_param: ExternalIpUri}
+ internal_api_uri: {get_param: InternalApiIpUri}
+ storage_uri: {get_param: StorageIpUri}
+ storage_mgmt_uri: {get_param: StorageMgmtIpUri}
+ tenant_uri: {get_param: TenantIpUri}
+ management_uri: {get_param: ManagementIpUri}
+ - keys:
+ external: {get_param: ExternalNetName}
+ internal_api: {get_param: InternalApiNetName}
+ storage: {get_param: StorageNetName}
+ storage_mgmt: {get_param: StorageMgmtNetName}
+ tenant: {get_param: TenantNetName}
+ management: {get_param: ManagementNetName}
+ external_subnet:
+ str_replace:
+ template: NAME_subnet
+ params:
+ NAME: {get_param: ExternalNetName}
+ internal_api_subnet:
+ str_replace:
+ template: NAME_subnet
+ params:
+ NAME: {get_param: InternalApiNetName}
+ storage_subnet:
+ str_replace:
+ template: NAME_subnet
+ params:
+ NAME: {get_param: StorageNetName}
+ storage_mgmt_subnet:
+ str_replace:
+ template: NAME_subnet
+ params:
+ NAME: {get_param: StorageMgmtNetName}
+ tenant_subnet:
+ str_replace:
+ template: NAME_subnet
+ params:
+ NAME: {get_param: TenantNetName}
+ management_subnet:
+ str_replace:
+ template: NAME_subnet
+ params:
+ NAME: {get_param: ManagementNetName}
+ external_uri:
+ str_replace:
+ template: NAME_uri
+ params:
+ NAME: {get_param: ExternalNetName}
+ internal_api_uri:
+ str_replace:
+ template: NAME_uri
+ params:
+ NAME: {get_param: InternalApiNetName}
+ storage_uri:
+ str_replace:
+ template: NAME_uri
+ params:
+ NAME: {get_param: StorageNetName}
+ storage_mgmt_uri:
+ str_replace:
+ template: NAME_uri
+ params:
+ NAME: {get_param: StorageMgmtNetName}
+ tenant_uri:
+ str_replace:
+ template: NAME_uri
+ params:
+ NAME: {get_param: TenantNetName}
+ management_uri:
+ str_replace:
+ template: NAME_uri
+ params:
+ NAME: {get_param: ManagementNetName}
+
outputs:
net_ip_map:
description: >
A Hash containing a mapping of network names to assigned IPs
for a specific machine.
- value:
- ctlplane: {get_param: ControlPlaneIp}
- external: {get_param: ExternalIp}
- internal_api: {get_param: InternalApiIp}
- storage: {get_param: StorageIp}
- storage_mgmt: {get_param: StorageMgmtIp}
- tenant: {get_param: TenantIp}
- management: {get_param: ManagementIp}
- ctlplane_subnet:
- list_join:
- - ''
- - - {get_param: ControlPlaneIp}
- - '/'
- - {get_param: ControlPlaneSubnetCidr}
- external_subnet: {get_param: ExternalIpSubnet}
- internal_api_subnet: {get_param: InternalApiIpSubnet}
- storage_subnet: {get_param: StorageIpSubnet}
- storage_mgmt_subnet: {get_param: StorageMgmtIpSubnet}
- tenant_subnet: {get_param: TenantIpSubnet}
- management_subnet: {get_param: ManagementIpSubnet}
- ctlplane_uri: {get_param: ControlPlaneIp}
- external_uri: {get_param: ExternalIpUri}
- internal_api_uri: {get_param: InternalApiIpUri}
- storage_uri: {get_param: StorageIpUri}
- storage_mgmt_uri: {get_param: StorageMgmtIpUri}
- tenant_uri: {get_param: TenantIpUri}
- management_uri: {get_param: ManagementIpUri}
+ value: {get_attr: [NetIpMapValue, value]}
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index a6b32ddb..d9eaf8df 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -2,7 +2,7 @@ resource_registry:
OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment
OS::TripleO::PostDeploySteps: puppet/post.yaml
- OS::TripleO::PostUpgradeSteps: puppet/post.yaml
+ OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
@@ -11,6 +11,9 @@ resource_registry:
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
+ OS::TripleO::Tasks::SwiftRingDeploy: extraconfig/tasks/swift-ring-deploy.yaml
+ OS::TripleO::Tasks::SwiftRingUpdate: extraconfig/tasks/swift-ring-update.yaml
+
{% for role in roles %}
OS::TripleO::{{role.name}}::PreNetworkConfig: OS::Heat::None
OS::TripleO::{{role.name}}PostDeploySteps: puppet/post.yaml
@@ -66,8 +69,10 @@ resource_registry:
OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
- OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None
- OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None
+{% for role in roles %}
+ OS::TripleO::Tasks::{{role.name}}PrePuppet: OS::Heat::None
+ OS::TripleO::Tasks::{{role.name}}PostPuppet: OS::Heat::None
+{% endfor %}
# "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
# phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
@@ -140,6 +145,7 @@ resource_registry:
OS::TripleO::Services::Kernel: puppet/services/kernel.yaml
OS::TripleO::Services::MySQL: puppet/services/database/mysql.yaml
OS::TripleO::Services::MySQLTLS: OS::Heat::None
+ OS::TripleO::Services::NeutronBgpvpnApi: OS::Heat::None
OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
@@ -164,6 +170,7 @@ resource_registry:
OS::TripleO::Services::PacemakerRemote: OS::Heat::None
OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None
OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
+ OS::TripleO::Services::RabbitMQTLS: OS::Heat::None
OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml
OS::TripleO::Services::HAProxyPublicTLS: OS::Heat::None
OS::TripleO::Services::HAProxyInternalTLS: OS::Heat::None
@@ -218,7 +225,7 @@ resource_registry:
OS::TripleO::Services::AodhEvaluator: puppet/services/aodh-evaluator.yaml
OS::TripleO::Services::AodhNotifier: puppet/services/aodh-notifier.yaml
OS::TripleO::Services::AodhListener: puppet/services/aodh-listener.yaml
- OS::TripleO::Services::PankoApi: OS::Heat::None
+ OS::TripleO::Services::PankoApi: puppet/services/panko-api.yaml
OS::TripleO::Services::MistralEngine: OS::Heat::None
OS::TripleO::Services::MistralApi: OS::Heat::None
OS::TripleO::Services::MistralExecutor: OS::Heat::None
@@ -242,6 +249,10 @@ resource_registry:
OS::TripleO::Services::OctaviaHealthManager: OS::Heat::None
OS::TripleO::Services::OctaviaHousekeeping: OS::Heat::None
OS::TripleO::Services::OctaviaWorker: OS::Heat::None
+ OS::TripleO::Services::MySQLClient: puppet/services/database/mysql-client.yaml
+ OS::TripleO::Services::Vpp: OS::Heat::None
+ OS::TripleO::Services::Docker: OS::Heat::None
+ OS::TripleO::Services::CertmongerUser: OS::Heat::None
parameter_defaults:
EnablePackageInstall: false
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index 5b2ca4a2..7b780112 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -243,6 +243,12 @@ resources:
NetIpMap: {get_attr: [VipMap, net_ip_map]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
+ EndpointMapData:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value: {get_attr: [EndpointMap, endpoint_map]}
+
# Jinja loop for Role in roles_data.yaml
{% for role in roles %}
# Resources generated for {{role.name}} Role
@@ -255,6 +261,18 @@ resources:
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
+ # Filter any null/None service_names which may be present due to mapping
+ # of services to OS::Heat::None
+ {{role.name}}ServiceNames:
+ type: OS::Heat::Value
+ depends_on: {{role.name}}ServiceChain
+ properties:
+ type: comma_delimited_list
+ value:
+ yaql:
+ expression: coalesce($.data, []).where($ != null)
+ data: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+
{{role.name}}HostsDeployment:
type: OS::Heat::StructuredDeployments
properties:
@@ -305,7 +323,7 @@ resources:
StorageMgmtIpList: {get_attr: [{{role.name}}, storage_mgmt_ip_address]}
TenantIpList: {get_attr: [{{role.name}}, tenant_ip_address]}
ManagementIpList: {get_attr: [{{role.name}}, management_ip_address]}
- EnabledServices: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ EnabledServices: {get_attr: [{{role.name}}ServiceNames, value]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
ServiceHostnameList: {get_attr: [{{role.name}}, hostname]}
NetworkHostnameMap:
@@ -361,8 +379,8 @@ resources:
{% for r in roles %}
- get_attr: [{{r.name}}ServiceChain, role_data, service_config_settings]
{% endfor %}
- services: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
- ServiceNames: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ services: {get_attr: [{{role.name}}ServiceNames, value]}
+ ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]}
MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]}
ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChain, role_data, service_metadata_settings]}
{% endfor %}
@@ -396,7 +414,7 @@ resources:
list_join:
- ','
{% for role in roles %}
- - {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ - {get_attr: [{{role.name}}ServiceNames, value]}
{% endfor %}
logging_groups:
yaql:
@@ -561,12 +579,24 @@ resources:
PingTestIps:
list_join:
- ' '
- - - {get_attr: [{{primary_role_name}}, resource.0.external_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.internal_api_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.storage_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.storage_mgmt_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.tenant_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.management_ip_address]}
+ - - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [Controller, external_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [Controller, internal_api_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [Controller, storage_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [Controller, storage_mgmt_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [Controller, tenant_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [Controller, management_ip_address]}
UpdateWorkflow:
type: OS::TripleO::Tasks::UpdateWorkflow
@@ -622,7 +652,7 @@ outputs:
value: true
KeystoneURL:
description: URL for the Overcloud Keystone service
- value: {get_attr: [EndpointMap, endpoint_map, KeystonePublic, uri]}
+ value: {get_attr: [EndpointMapData, value, KeystonePublic, uri]}
KeystoneAdminVip:
description: Keystone Admin VIP endpoint
value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
@@ -631,7 +661,7 @@ outputs:
Mapping of the resources with the needed info for their endpoints.
This includes the protocol used, the IP, port and also a full
representation of the URI.
- value: {get_attr: [EndpointMap, endpoint_map]}
+ value: {get_attr: [EndpointMapData, value]}
HostsEntry:
description: |
The content that should be appended to your /etc/hosts if you want to get
@@ -646,7 +676,7 @@ outputs:
description: The services enabled on each role
value:
{% for role in roles %}
- {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ {{role.name}}: {get_attr: [{{role.name}}ServiceNames, value]}
{% endfor %}
RoleData:
description: The configuration data associated with each role
diff --git a/plan-environment.yaml b/plan-environment.yaml
new file mode 100644
index 00000000..f629eff3
--- /dev/null
+++ b/plan-environment.yaml
@@ -0,0 +1,5 @@
+version: 1.0
+
+template: overcloud.yaml
+environments:
+- path: overcloud-resource-registry-puppet.yaml
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index ee43c3a5..7edf17af 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -68,6 +68,32 @@ parameters:
type: boolean
default: false
+ InternalApiNetName:
+ default: internal_api
+ description: The name of the internal API network.
+ type: string
+ ExternalNetName:
+ default: external
+ description: The name of the external network.
+ type: string
+ ManagementNetName:
+ default: management
+ description: The name of the management network.
+ type: string
+ StorageNetName:
+ default: storage
+ description: The name of the storage network.
+ type: string
+ StorageMgmtNetName:
+ default: storage_mgmt
+ description: The name of the Storage management network.
+ type: string
+ TenantNetName:
+ default: tenant
+ description: The name of the tenant network.
+ type: string
+
+
resources:
allNodesConfigImpl:
@@ -175,21 +201,21 @@ resources:
get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_admin_api_network]}]
keystone_public_api_vip:
get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_public_api_network]}]
- public_virtual_ip: {get_param: [NetVipMap, external]}
+ public_virtual_ip: {get_param: [NetVipMap, {get_param: ExternalNetName}]}
controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
- internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]}
- storage_virtual_ip: {get_param: [NetVipMap, storage]}
- storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]}
+ internal_api_virtual_ip: {get_param: [NetVipMap, {get_param: InternalApiNetName}]}
+ storage_virtual_ip: {get_param: [NetVipMap, {get_param: StorageNetName}]}
+ storage_mgmt_virtual_ip: {get_param: [NetVipMap, {get_param: StorageMgmtNetName}]}
redis_vip: {get_param: RedisVirtualIP}
# public_virtual_ip and controller_virtual_ip are needed in
# both HAproxy & keepalived.
- tripleo::haproxy::public_virtual_ip: {get_param: [NetVipMap, external]}
+ tripleo::haproxy::public_virtual_ip: {get_param: [NetVipMap, {get_param: ExternalNetName}]}
tripleo::haproxy::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
- tripleo::keepalived::public_virtual_ip: {get_param: [NetVipMap, external]}
+ tripleo::keepalived::public_virtual_ip: {get_param: [NetVipMap, {get_param: ExternalNetName}]}
tripleo::keepalived::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
- tripleo::keepalived::internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]}
- tripleo::keepalived::storage_virtual_ip: {get_param: [NetVipMap, storage]}
- tripleo::keepalived::storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]}
+ tripleo::keepalived::internal_api_virtual_ip: {get_param: [NetVipMap, {get_param: InternalApiNetName}]}
+ tripleo::keepalived::storage_virtual_ip: {get_param: [NetVipMap, {get_param: StorageNetName}]}
+ tripleo::keepalived::storage_mgmt_virtual_ip: {get_param: [NetVipMap, {get_param: StorageMgmtNetName}]}
tripleo::keepalived::redis_virtual_ip: {get_param: RedisVirtualIP}
tripleo::redis_notification::haproxy_monitor_ip: {get_param: [NetVipMap, ctlplane]}
cloud_name_external: {get_param: cloud_name_external}
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index e92de45f..51f9abac 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -115,6 +115,14 @@ parameters:
Command or script snippet to run on all overcloud nodes to
initialize the upgrade process. E.g. a repository switch.
default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
resources:
BlockStorage:
@@ -360,6 +368,7 @@ resources:
- - "#!/bin/bash\n\n"
- "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
# Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
# but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
@@ -439,6 +448,7 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
properties:
name: UpdateDeployment
config: {get_resource: UpdateConfig}
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index 892f91ef..d7d7f478 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -121,6 +121,14 @@ parameters:
Command or script snippet to run on all overcloud nodes to
initialize the upgrade process. E.g. a repository switch.
default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
resources:
CephStorage:
@@ -366,6 +374,7 @@ resources:
- - "#!/bin/bash\n\n"
- "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
# Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
# but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
@@ -451,6 +460,7 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
properties:
config: {get_resource: UpdateConfig}
server: {get_resource: CephStorage}
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index 62adcd33..ebdd762d 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -133,6 +133,14 @@ parameters:
Command or script snippet to run on all overcloud nodes to
initialize the upgrade process. E.g. a repository switch.
default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
resources:
@@ -383,6 +391,7 @@ resources:
- - "#!/bin/bash\n\n"
- "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
# Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
# but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
@@ -474,6 +483,7 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
properties:
name: UpdateDeployment
config: {get_resource: UpdateConfig}
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index d3268ee2..2f4f583c 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -147,6 +147,14 @@ parameters:
Command or script snippet to run on all overcloud nodes to
initialize the upgrade process. E.g. a repository switch.
default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
parameter_groups:
- label: deprecated
@@ -417,6 +425,7 @@ resources:
- - "#!/bin/bash\n\n"
- "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
# Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
# but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
@@ -514,6 +523,7 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
properties:
name: UpdateDeployment
config: {get_resource: UpdateConfig}
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
index 3daf3fd3..b6d1239a 100644
--- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -53,41 +53,40 @@ resources:
NetworkMidoNetConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- midonet_data:
- mapped_data:
- enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController}
- enable_cassandra_on_controller: {get_param: EnableCassandraOnController}
- midonet_tunnelzone_name: {get_param: TunnelZoneName}
- midonet_tunnelzone_type: {get_param: TunnelZoneType}
- midonet_libvirt_qemu_data: |
- user = "root"
- group = "root"
- cgroup_device_acl = [
- "/dev/null", "/dev/full", "/dev/zero",
- "/dev/random", "/dev/urandom",
- "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
- "/dev/rtc","/dev/hpet", "/dev/vfio/vfio",
- "/dev/net/tun"
- ]
- tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort}
- tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort}
- tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort}
- tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift}
- tripleo::haproxy::midonet_api: true
- # Missed Neutron Puppet data
- neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver'
- neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver'
- neutron::plugins::midonet::midonet_api_port: 8081
- neutron::params::midonet_server_package: 'python-networking-midonet'
+ datafiles:
+ midonet_data:
+ mapped_data:
+ enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController}
+ enable_cassandra_on_controller: {get_param: EnableCassandraOnController}
+ midonet_tunnelzone_name: {get_param: TunnelZoneName}
+ midonet_tunnelzone_type: {get_param: TunnelZoneType}
+ midonet_libvirt_qemu_data: |
+ user = "root"
+ group = "root"
+ cgroup_device_acl = [
+ "/dev/null", "/dev/full", "/dev/zero",
+ "/dev/random", "/dev/urandom",
+ "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+ "/dev/rtc","/dev/hpet", "/dev/vfio/vfio",
+ "/dev/net/tun"
+ ]
+ tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort}
+ tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort}
+ tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort}
+ tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift}
+ tripleo::haproxy::midonet_api: true
+ # Missed Neutron Puppet data
+ neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver'
+ neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver'
+ neutron::plugins::midonet::midonet_api_port: 8081
+ neutron::params::midonet_server_package: 'python-networking-midonet'
- # Make sure the l3 agent does not run
- l3_agent_service: false
- neutron::agents::l3::manage_service: false
- neutron::agents::l3::enabled: false
+ # Make sure the l3 agent does not run
+ l3_agent_service: false
+ neutron::agents::l3::manage_service: false
+ neutron::agents::l3::enabled: false
NetworkMidonetDeploymentControllers:
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index 9b900bc4..b05fa636 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -101,31 +101,30 @@ resources:
NetworkCiscoConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- neutron_cisco_data:
- mapped_data:
- neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip}
- neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username}
- neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password}
- neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list}
- neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs}
- neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig}
- neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork}
- neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix}
- neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin}
- neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix}
- neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig}
- neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime}
- neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount}
- neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate}
- neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk}
- neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig}
- neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks}
- neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges}
- neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges}
+ datafiles:
+ neutron_cisco_data:
+ mapped_data:
+ neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip}
+ neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username}
+ neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password}
+ neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list}
+ neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs}
+ neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig}
+ neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork}
+ neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix}
+ neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin}
+ neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix}
+ neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig}
+ neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime}
+ neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount}
+ neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate}
+ neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk}
+ neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig}
+ neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks}
+ neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges}
+ neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges}
NetworkCiscoDeployment:
type: OS::Heat::StructuredDeployments
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
index 7fe2a842..533c0ee9 100644
--- a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
@@ -20,14 +20,13 @@ resources:
NeutronBigswitchConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- neutron_bigswitch_data:
- mapped_data:
- neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
- neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+ datafiles:
+ neutron_bigswitch_data:
+ mapped_data:
+ neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+ neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
NeutronBigswitchDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
index 47c782c7..1d16e909 100644
--- a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
@@ -50,22 +50,21 @@ resources:
NovaNuageConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- nova_nuage_data:
- mapped_data:
- nuage::vrs::active_controller: {get_input: ActiveController}
- nuage::vrs::standby_controller: {get_input: StandbyController}
- nuage::metadataagent::metadata_port: {get_input: MetadataPort}
- nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort}
- nuage::metadataagent::metadata_secret: {get_input: SharedSecret}
- nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion}
- nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername}
- nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs}
- nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType}
- nuage::metadataagent::nova_region_name: {get_input: NovaRegionName}
+ datafiles:
+ nova_nuage_data:
+ mapped_data:
+ nuage::vrs::active_controller: {get_input: ActiveController}
+ nuage::vrs::standby_controller: {get_input: StandbyController}
+ nuage::metadataagent::metadata_port: {get_input: MetadataPort}
+ nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort}
+ nuage::metadataagent::metadata_secret: {get_input: SharedSecret}
+ nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion}
+ nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername}
+ nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs}
+ nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType}
+ nuage::metadataagent::nova_region_name: {get_input: NovaRegionName}
NovaNuageDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
index 763ae39a..378f7f98 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
@@ -91,35 +91,34 @@ resources:
CinderNetappConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- cinder_netapp_data:
- mapped_data:
- tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend}
- cinder::backend::netapp::title: {get_input: NetappBackendName}
- cinder::backend::netapp::netapp_login: {get_input: NetappLogin}
- cinder::backend::netapp::netapp_password: {get_input: NetappPassword}
- cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname}
- cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort}
- cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier}
- cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily}
- cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol}
- cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType}
- cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler}
- cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList}
- cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver}
- cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName}
- cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares}
- cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig}
- cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions}
- cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath}
- cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps}
- cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword}
- cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools}
- cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType}
- cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath}
+ datafiles:
+ cinder_netapp_data:
+ mapped_data:
+ tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend}
+ cinder::backend::netapp::title: {get_input: NetappBackendName}
+ cinder::backend::netapp::netapp_login: {get_input: NetappLogin}
+ cinder::backend::netapp::netapp_password: {get_input: NetappPassword}
+ cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname}
+ cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort}
+ cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier}
+ cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily}
+ cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol}
+ cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType}
+ cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler}
+ cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList}
+ cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver}
+ cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName}
+ cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares}
+ cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig}
+ cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions}
+ cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath}
+ cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps}
+ cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword}
+ cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools}
+ cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType}
+ cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath}
CinderNetappDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
index 0f4806db..1456337f 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
@@ -38,19 +38,18 @@ resources:
NeutronBigswitchConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- neutron_bigswitch_data:
- mapped_data:
- neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
- neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
- neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
- neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval}
- neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id}
- neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl}
- neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory}
+ datafiles:
+ neutron_bigswitch_data:
+ mapped_data:
+ neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
+ neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
+ neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
+ neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval}
+ neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id}
+ neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl}
+ neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory}
NeutronBigswitchDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
index 6eae812f..bca6010a 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
@@ -96,48 +96,47 @@ resources:
CiscoN1kvConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- cisco_n1kv_data:
- mapped_data:
- #enable_cisco_n1kv: {get_input: EnableCiscoN1kv}
- # VEM Parameters
- n1kv_vem_source: {get_input: n1kv_vem_source}
- n1kv_vem_version: {get_input: n1kv_vem_version}
- neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
- neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id}
- neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6}
- neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf}
- neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile}
- neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config}
- neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb}
- neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet}
- neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood}
- #VSM Parameter
- n1kv_vsm_source: {get_input: n1kv_vsm_source}
- n1kv_vsm_version: {get_input: n1kv_vsm_version}
- n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf}
- n1k_vsm::vsm_role: {get_input: n1kv_vsm_role}
- n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl}
- n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br}
- n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password}
- n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id}
- n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip}
- n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask}
- n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip}
- n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip}
- n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan}
- # Cisco N1KV driver Parameters
- neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
- neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username}
- neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password}
- neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration}
- neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size}
- neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout}
- neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval}
- neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries}
+ datafiles:
+ cisco_n1kv_data:
+ mapped_data:
+ #enable_cisco_n1kv: {get_input: EnableCiscoN1kv}
+ # VEM Parameters
+ n1kv_vem_source: {get_input: n1kv_vem_source}
+ n1kv_vem_version: {get_input: n1kv_vem_version}
+ neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
+ neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id}
+ neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6}
+ neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf}
+ neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile}
+ neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config}
+ neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb}
+ neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet}
+ neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood}
+ #VSM Parameter
+ n1kv_vsm_source: {get_input: n1kv_vsm_source}
+ n1kv_vsm_version: {get_input: n1kv_vsm_version}
+ n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf}
+ n1k_vsm::vsm_role: {get_input: n1kv_vsm_role}
+ n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl}
+ n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br}
+ n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password}
+ n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id}
+ n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip}
+ n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask}
+ n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip}
+ n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip}
+ n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan}
+ # Cisco N1KV driver Parameters
+ neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
+ neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username}
+ neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password}
+ neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration}
+ neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size}
+ neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout}
+ neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval}
+ neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries}
CiscoN1kvDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml
index 3362a01f..6f2dd684 100644
--- a/puppet/major_upgrade_steps.j2.yaml
+++ b/puppet/major_upgrade_steps.j2.yaml
@@ -79,6 +79,7 @@ resources:
AUTH_URL: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
ROLE_NAME: {{role.name}}
- get_file: ../extraconfig/tasks/pacemaker_common_functions.sh
+ - get_file: ../extraconfig/tasks/run_puppet.sh
- get_file: ../extraconfig/tasks/tripleo_upgrade_node.sh
{{role.name}}DeliverUpgradeScriptDeployment:
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index 1633134d..6ee06d78 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -115,6 +115,14 @@ parameters:
Command or script snippet to run on all overcloud nodes to
initialize the upgrade process. E.g. a repository switch.
default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
resources:
@@ -360,6 +368,7 @@ resources:
- - "#!/bin/bash\n\n"
- "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
# Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
# but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
@@ -438,6 +447,7 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
properties:
config: {get_resource: UpdateConfig}
server: {get_resource: SwiftStorage}
diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2
index c3b54ccd..86af6114 100644
--- a/puppet/puppet-steps.j2
+++ b/puppet/puppet-steps.j2
@@ -21,15 +21,20 @@
{{role.name}}Config:
type: OS::TripleO::{{role.name}}Config
properties:
- StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
+ StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
- {% if role.name == 'Controller' %}
- ControllerPrePuppet:
- type: OS::TripleO::Tasks::ControllerPrePuppet
+ {{role.name}}PrePuppet:
+ type: OS::TripleO::Tasks::{{role.name}}PrePuppet
properties:
- servers: {get_param: [servers, Controller]}
+ servers: {get_param: [servers, {{role.name}}]}
input_values:
update_identifier: {get_param: DeployIdentifier}
+
+ {% if role.name in ['Controller', 'ObjectStorage'] %}
+ {{role.name}}SwiftRingDeploy:
+ type: OS::TripleO::Tasks::SwiftRingDeploy
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
{% endif %}
# Step through a series of configuration steps
@@ -37,7 +42,7 @@
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
{% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ depends_on: [{{role.name}}PrePuppet, {{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
{% else %}
depends_on:
{% for dep in roles %}
@@ -75,14 +80,23 @@
properties:
servers: {get_param: [servers, {{role.name}}]}
- {% if role.name == 'Controller' %}
- ControllerPostPuppet:
+ {{role.name}}PostPuppet:
depends_on:
- - ControllerExtraConfigPost
- type: OS::TripleO::Tasks::ControllerPostPuppet
+ - {{role.name}}ExtraConfigPost
+ type: OS::TripleO::Tasks::{{role.name}}PostPuppet
properties:
- servers: {get_param: [servers, Controller]}
+ servers: {get_param: [servers, {{role.name}}]}
input_values:
update_identifier: {get_param: DeployIdentifier}
+
+ {% if role.name in ['Controller', 'ObjectStorage'] %}
+ {{role.name}}SwiftRingUpdate:
+ type: OS::TripleO::Tasks::SwiftRingUpdate
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ {% endfor %}
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
{% endif %}
{% endfor %}
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 2f070da2..1f68f41f 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -137,7 +137,14 @@ parameters:
Command or script snippet to run on all overcloud nodes to
initialize the upgrade process. E.g. a repository switch.
default: ''
-
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
resources:
{{role}}:
@@ -386,6 +393,7 @@ resources:
- - "#!/bin/bash\n\n"
- "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
# Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
# but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
@@ -473,6 +481,7 @@ resources:
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
properties:
config: {get_resource: UpdateConfig}
server: {get_resource: {{role}}}
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index 9c2d8c5c..f19b6cca 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -19,8 +19,21 @@ environment to set per service parameters.
Config Settings
---------------
-Each service may define a config_settings output variable which returns
-Hiera settings to be configured.
+Each service may define three ways in which to output variables to configure Hiera
+settings on the nodes.
+
+ * config_settings: the hiera keys will be pushed on all roles of which the service
+ is a part of.
+
+ * global_config_settings: the hiera keys will be distributed to all roles
+
+ * service_config_settings: Takes an extra key to wire in values that are
+ defined for a service that need to be consumed by some other service.
+ For example:
+ service_config_settings:
+ haproxy:
+ foo: bar
+ This will set the hiera key 'foo' on all roles where haproxy is included.
Deployment Steps
----------------
@@ -87,11 +100,26 @@ step, "step2" for the second, etc.
Steps/tages correlate to the following:
- 1) Quiesce the control-plane, e.g disable LoadBalancer, stop pacemaker cluster
-
- 2) Stop all control-plane services, ready for upgrade
-
- 3) Perform a package update, (either specific packages or the whole system)
+ 1) Stop all control-plane services.
+
+ 2) Quiesce the control-plane, e.g disable LoadBalancer, stop
+ pacemaker cluster: this will stop the following resource:
+ - ocata:
+ - galera
+ - rabbit
+ - redis
+ - haproxy
+ - vips
+ - cinder-volumes
+ - cinder-backup
+ - manilla-share
+ - rbd-mirror
+
+ The exact order is controlled by the cluster constraints.
+
+ 3) Perform a package update and install new packages: A general
+ upgrade is done, and only new package should go into service
+ ansible tasks.
4) Start services needed for migration tasks (e.g DB)
diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml
index 4bd9fc47..d7c87b61 100644
--- a/puppet/services/aodh-api.yaml
+++ b/puppet/services/aodh-api.yaml
@@ -87,5 +87,5 @@ outputs:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- name: Stop aodh_api service (running under httpd)
- tags: step2
+ tags: step1
service: name=httpd state=stopped
diff --git a/puppet/services/aodh-base.yaml b/puppet/services/aodh-base.yaml
index f5ca329e..48a2aecd 100644
--- a/puppet/services/aodh-base.yaml
+++ b/puppet/services/aodh-base.yaml
@@ -69,8 +69,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/aodh'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
aodh::debug: {get_param: Debug}
aodh::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::rabbit_userid: {get_param: RabbitUserName}
@@ -78,8 +77,10 @@ outputs:
aodh::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
aodh::rabbit_port: {get_param: RabbitClientPort}
aodh::keystone::authtoken::project_name: 'service'
+ aodh::keystone::authtoken::user_domain_name: 'Default'
+ aodh::keystone::authtoken::project_domain_name: 'Default'
aodh::keystone::authtoken::password: {get_param: AodhPassword}
- aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::auth::auth_password: {get_param: AodhPassword}
aodh::auth::auth_region: 'regionOne'
diff --git a/puppet/services/aodh-evaluator.yaml b/puppet/services/aodh-evaluator.yaml
index 56dbb558..b8be4a91 100644
--- a/puppet/services/aodh-evaluator.yaml
+++ b/puppet/services/aodh-evaluator.yaml
@@ -41,9 +41,16 @@ outputs:
step_config: |
include tripleo::profile::base::aodh::evaluator
upgrade_tasks:
+ - name: Check if aodh_evaluator is deployed
+ command: systemctl is-enabled openstack-aodh-evaluator
+ tags: common
+ ignore_errors: True
+ register: aodh_evaluator_enabled
- name: "PreUpgrade step0,validation: Check service openstack-aodh-evaluator is running"
shell: /usr/bin/systemctl show 'openstack-aodh-evaluator' --property ActiveState | grep '\bactive\b'
+ when: aodh_evaluator_enabled.rc == 0
tags: step0,validation
- name: Stop aodh_evaluator service
- tags: step2
+ tags: step1
+ when: aodh_evaluator_enabled.rc == 0
service: name=openstack-aodh-evaluator state=stopped
diff --git a/puppet/services/aodh-listener.yaml b/puppet/services/aodh-listener.yaml
index 76db0ca8..f5c9330d 100644
--- a/puppet/services/aodh-listener.yaml
+++ b/puppet/services/aodh-listener.yaml
@@ -41,9 +41,16 @@ outputs:
step_config: |
include tripleo::profile::base::aodh::listener
upgrade_tasks:
+ - name: Check if aodh_listener is deployed
+ command: systemctl is-enabled openstack-aodh-listener
+ tags: common
+ ignore_errors: True
+ register: aodh_listener_enabled
- name: "PreUpgrade step0,validation: Check service openstack-aodh-listener is running"
shell: /usr/bin/systemctl show 'openstack-aodh-listener' --property ActiveState | grep '\bactive\b'
+ when: aodh_listener_enabled.rc == 0
tags: step0,validation
- name: Stop aodh_listener service
- tags: step2
+ tags: step1
+ when: aodh_listener_enabled.rc == 0
service: name=openstack-aodh-listener state=stopped
diff --git a/puppet/services/aodh-notifier.yaml b/puppet/services/aodh-notifier.yaml
index 30c67635..84c50dd6 100644
--- a/puppet/services/aodh-notifier.yaml
+++ b/puppet/services/aodh-notifier.yaml
@@ -41,9 +41,16 @@ outputs:
step_config: |
include tripleo::profile::base::aodh::notifier
upgrade_tasks:
+ - name: Check if aodh_notifier is deployed
+ command: systemctl is-enabled openstack-aodh-notifier
+ tags: common
+ ignore_errors: True
+ register: aodh_notifier_enabled
- name: "PreUpgrade step0,validation: Check service openstack-aodh-notifier is running"
shell: /usr/bin/systemctl show 'openstack-aodh-notifier' --property ActiveState | grep '\bactive\b'
+ when: aodh_notifier_enabled.rc == 0
tags: step0,validation
- name: Stop aodh_notifier service
- tags: step2
+ tags: step1
+ when: aodh_notifier_enabled.rc == 0
service: name=openstack-aodh-notifier state=stopped
diff --git a/puppet/services/apache-internal-tls-certmonger.yaml b/puppet/services/apache-internal-tls-certmonger.yaml
index 4c21e02a..4c94f440 100644
--- a/puppet/services/apache-internal-tls-certmonger.yaml
+++ b/puppet/services/apache-internal-tls-certmonger.yaml
@@ -64,6 +64,12 @@ outputs:
for_each:
$NETWORK: {get_attr: [ApacheNetworks, value]}
upgrade_tasks:
+ - name: Check if httpd is deployed
+ command: systemctl is-enabled httpd
+ tags: common
+ ignore_errors: True
+ register: httpd_enabled
- name: "PreUpgrade step0,validation: Check service httpd is running"
shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
+ when: httpd_enabled.rc == 0
tags: step0,validation
diff --git a/puppet/services/apache.yaml b/puppet/services/apache.yaml
index 74ddbde8..2d950151 100644
--- a/puppet/services/apache.yaml
+++ b/puppet/services/apache.yaml
@@ -67,6 +67,12 @@ outputs:
metadata_settings:
get_attr: [ApacheTLS, role_data, metadata_settings]
upgrade_tasks:
+ - name: Check if httpd is deployed
+ command: systemctl is-enabled httpd
+ tags: common
+ ignore_errors: True
+ register: httpd_enabled
- name: "PreUpgrade step0,validation: Check service httpd is running"
shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
+ when: httpd_enabled.rc == 0
tags: step0,validation
diff --git a/puppet/services/auditd.yaml b/puppet/services/auditd.yaml
index 639631e1..8085ac8b 100644
--- a/puppet/services/auditd.yaml
+++ b/puppet/services/auditd.yaml
@@ -32,3 +32,19 @@ outputs:
auditd::rules: {get_param: AuditdRules}
step_config: |
include ::tripleo::profile::base::auditd
+ upgrade_tasks:
+ - name: Check if auditd is deployed
+ command: systemctl is-enabled auditd
+ tags: common
+ ignore_errors: True
+ register: auditd_enabled
+ - name: "PreUpgrade step0,validation: Check if auditd is running"
+ shell: >
+ /usr/bin/systemctl show 'auditd' --property ActiveState |
+ grep '\bactive\b'
+ when: auditd_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop auditd service
+ tags: step2
+ when: auditd_enabled.rc == 0
+ service: name=auditd state=stopped
diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml
index 239b6ca9..d8787c87 100644
--- a/puppet/services/barbican-api.yaml
+++ b/puppet/services/barbican-api.yaml
@@ -74,7 +74,7 @@ outputs:
map_merge:
- get_attr: [ApacheServiceBase, role_data, config_settings]
- barbican::keystone::authtoken::password: {get_param: BarbicanPassword}
- barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
barbican::keystone::authtoken::project_name: 'service'
barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]}
@@ -105,8 +105,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/barbican'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
tripleo.barbican_api.firewall_rules:
'117 barbican':
dport:
@@ -136,17 +135,27 @@ outputs:
nova::compute::barbican_endpoint:
get_param: [EndpointMap, BarbicanInternal, uri]
nova::compute::barbican_auth_endpoint:
- get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+ get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]
cinder_api:
cinder::api::keymgr_api_class: >
castellan.key_manager.barbican_key_manager.BarbicanKeyManager
cinder::api::keymgr_encryption_api_url:
get_param: [EndpointMap, BarbicanInternal, uri]
cinder::api::keymgr_encryption_auth_url:
- get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+ get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
+ - name: Check if barbican_api is deployed
+ command: systemctl is-enabled openstack-barbican-api
+ tags: common
+ ignore_errors: True
+ register: barbican_api_enabled
- name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
+ when: barbican_api_enabled.rc == 0
tags: step0,validation
+ - name: Install openstack-barbican-api package if it was disabled
+ tags: step3
+ yum: name=openstack-barbican-api state=latest
+ when: barbican_api_enabled.rc != 0
diff --git a/puppet/services/ceilometer-agent-central.yaml b/puppet/services/ceilometer-agent-central.yaml
index cf8a8a8e..80823526 100644
--- a/puppet/services/ceilometer-agent-central.yaml
+++ b/puppet/services/ceilometer-agent-central.yaml
@@ -52,12 +52,20 @@ outputs:
map_merge:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
- ceilometer_redis_password: {get_param: RedisPassword}
+ central_namespace: true
step_config: |
- include ::tripleo::profile::base::ceilometer::agent::central
+ include ::tripleo::profile::base::ceilometer::agent::polling
upgrade_tasks:
+ - name: Check if ceilometer_agent_central is deployed
+ command: systemctl is-enabled openstack-ceilometer-central
+ tags: common
+ ignore_errors: True
+ register: ceilometer_agent_central_enabled
- name: "PreUpgrade step0,validation: Check service openstack-ceilometer-central is running"
shell: /usr/bin/systemctl show 'openstack-ceilometer-central' --property ActiveState | grep '\bactive\b'
+ when: ceilometer_agent_central_enabled.rc == 0
tags: step0,validation
- name: Stop ceilometer_agent_central service
- tags: step2
+ tags: step1
+ when: ceilometer_agent_central_enabled.rc == 0
service: name=openstack-ceilometer-central state=stopped
diff --git a/puppet/services/ceilometer-agent-compute.yaml b/puppet/services/ceilometer-agent-compute.yaml
index 00042914..546bcd98 100644
--- a/puppet/services/ceilometer-agent-compute.yaml
+++ b/puppet/services/ceilometer-agent-compute.yaml
@@ -46,12 +46,20 @@ outputs:
map_merge:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
- ceilometer::agent::compute::instance_discovery_method: {get_param: InstanceDiscoveryMethod}
+ compute_namespace: true
step_config: |
- include ::tripleo::profile::base::ceilometer::agent::compute
+ include ::tripleo::profile::base::ceilometer::agent::polling
upgrade_tasks:
+ - name: Check if ceilometer_agent_compute is deployed
+ command: systemctl is-enabled openstack-ceilometer-compute
+ tags: common
+ ignore_errors: True
+ register: ceilometer_agent_compute_enabled
- name: "PreUpgrade step0,validation: Check service openstack-ceilometer-compute is running"
shell: /usr/bin/systemctl show 'openstack-ceilometer-compute' --property ActiveState | grep '\bactive\b'
+ when: ceilometer_agent_compute_enabled.rc == 0
tags: step0,validation
- name: Stop ceilometer_agent_compute service
- tags: step2
+ tags: step1
+ when: ceilometer_agent_compute_enabled.rc == 0
service: name=openstack-ceilometer-compute state=stopped
diff --git a/puppet/services/ceilometer-agent-notification.yaml b/puppet/services/ceilometer-agent-notification.yaml
index 760acd65..4ee43f49 100644
--- a/puppet/services/ceilometer-agent-notification.yaml
+++ b/puppet/services/ceilometer-agent-notification.yaml
@@ -50,9 +50,16 @@ outputs:
step_config: |
include ::tripleo::profile::base::ceilometer::agent::notification
upgrade_tasks:
+ - name: Check if ceilometer_agent_notification is deployed
+ command: systemctl is-enabled openstack-ceilometer-notification
+ tags: common
+ ignore_errors: True
+ register: ceilometer_agent_notification_enabled
- name: "PreUpgrade step0,validation: Check service openstack-ceilometer-notification is running"
shell: /usr/bin/systemctl show 'openstack-ceilometer-notification' --property ActiveState | grep '\bactive\b'
+ when: ceilometer_agent_notification_enabled.rc == 0
tags: step0,validation
- name: Stop ceilometer_agent_notification service
- tags: step2
+ tags: step1
+ when: ceilometer_agent_notification_enabled.rc == 0
service: name=openstack-ceilometer-notification state=stopped
diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml
index 741f8da1..f5ee9d40 100644
--- a/puppet/services/ceilometer-api.yaml
+++ b/puppet/services/ceilometer-api.yaml
@@ -94,5 +94,5 @@ outputs:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- name: Stop ceilometer_api service (running under httpd)
- tags: step2
+ tags: step1
service: name=httpd state=stopped
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
index 5658e416..a9c84289 100644
--- a/puppet/services/ceilometer-base.yaml
+++ b/puppet/services/ceilometer-base.yaml
@@ -93,20 +93,23 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ceilometer'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
ceilometer_backend: {get_param: CeilometerBackend}
# we include db_sync class in puppet-tripleo
ceilometer::db::sync_db: false
ceilometer::keystone::authtoken::project_name: 'service'
+ ceilometer::keystone::authtoken::user_domain_name: 'Default'
+ ceilometer::keystone::authtoken::project_domain_name: 'Default'
ceilometer::keystone::authtoken::password: {get_param: CeilometerPassword}
- ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers}
ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
ceilometer::agent::auth::auth_tenant_name: 'service'
+ ceilometer::agent::auth::auth_user_domain_name: 'Default'
+ ceilometer::agent::auth::auth_project_domain_name: 'Default'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher}
diff --git a/puppet/services/ceilometer-collector.yaml b/puppet/services/ceilometer-collector.yaml
index a219f9eb..b0ec971f 100644
--- a/puppet/services/ceilometer-collector.yaml
+++ b/puppet/services/ceilometer-collector.yaml
@@ -60,9 +60,16 @@ outputs:
step_config: |
include ::tripleo::profile::base::ceilometer::collector
upgrade_tasks:
+ - name: Check if ceilometer_collector is deployed
+ command: systemctl is-enabled openstack-ceilometer-collector
+ tags: common
+ ignore_errors: True
+ register: ceilometer_collector_enabled
- name: "PreUpgrade step0,validation: Check service openstack-ceilometer-collector is running"
shell: /usr/bin/systemctl show 'openstack-ceilometer-collector' --property ActiveState | grep '\bactive\b'
+ when: ceilometer_collector_enabled.rc == 0
tags: step0,validation
- name: Stop ceilometer_collector service
- tags: step2
+ tags: step1
+ when: ceilometer_collector_enabled.rc == 0
service: name=openstack-ceilometer-collector state=stopped
diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml
index 1ce58335..d589ef89 100644
--- a/puppet/services/ceph-mon.yaml
+++ b/puppet/services/ceph-mon.yaml
@@ -59,6 +59,14 @@ parameters:
}
default: {}
type: json
+ CephValidationRetries:
+ type: number
+ default: 5
+ description: Number of retry attempts for Ceph validation
+ CephValidationDelay:
+ type: number
+ default: 10
+ description: Interval (in seconds) in between validation checks
MonitoringSubscriptionCephMon:
default: 'overcloud-ceph-mon'
type: string
@@ -119,21 +127,32 @@ outputs:
# rolling upgrade of all osd nodes in step1
- name: Check status
tags: step0,validation
- shell: ceph health | grep -qv HEALTH_ERR
- # FIXME(shardy) I suspect we can use heat or ansible facts here instead?
- - name: Get hostname
+ shell: ceph health | egrep -sq "HEALTH_OK|HEALTH_WARN"
+ - name: Stop CephMon
tags: step0
- shell: hostname -s
- register: mon_id
- - name: Stop Ceph Mon
+ service:
+ name: ceph-mon@{{ ansible_hostname }}
+ state: stopped
+ - name: Update Ceph packages
tags: step0
- service: name=ceph-mon@{{mon_id.stdout}} pattern=ceph-mon state=stopped
- - name: Update ceph packages
+ yum:
+ name: ceph-mon
+ state: latest
+ - name: Start CephMon
tags: step0
- yum: name=ceph-mon state=latest
- - name: Start ceph-mon service
- tags: step0
- service: name=ceph-mon@{{mon_id.stdout}} state=started
+ service:
+ name: ceph-mon@{{ ansible_hostname }}
+ state: started
+ # ceph-ansible
+ # https://github.com/ceph/ceph-ansible/blob/master/infrastructure-playbooks/rolling_update.yml#L149-L157
+ - name: Wait for the monitor to join the quorum...
+ tags: step0,ceph_quorum_validation
+ shell: |
+ ceph -s | grep monmap | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
+ register: ceph_quorum_nodecheck
+ until: ceph_quorum_nodecheck.rc == 0
+ retries: {get_param: CephValidationRetries}
+ delay: {get_param: CephValidationDelay}
- name: ceph osd crush tunables default
tags: step0
shell: ceph osd crush tunables default
diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml
index 9bd83aab..a97fa116 100644
--- a/puppet/services/ceph-osd.yaml
+++ b/puppet/services/ceph-osd.yaml
@@ -21,6 +21,24 @@ parameters:
MonitoringSubscriptionCephOsd:
default: 'overcloud-ceph-osd'
type: string
+ CephValidationRetries:
+ type: number
+ default: 40
+ description: Number of retry attempts for Ceph validation
+ CephValidationDelay:
+ type: number
+ default: 30
+ description: Interval (in seconds) in between validation checks
+ IgnoreCephUpgradeWarnings:
+ type: boolean
+ default: false
+ description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - IgnoreCephUpgradeWarnings
resources:
CephBase:
@@ -66,17 +84,37 @@ outputs:
- name: ceph osd set noscrub
tags: step1
command: ceph osd set noscrub
- - name: Stop Ceph OSD
+ - name: Stop CephOSD
tags: step1
- service: name=ceph-osd@{{ item }} state=stopped
+ service:
+ name: ceph-osd@{{ item }}
+ state: stopped
with_items: "{{osd_ids.stdout.strip().split()}}"
- - name: Update ceph OSD packages
+ - name: Update Ceph packages
tags: step1
- yum: name=ceph-osd state=latest
- - name: Start ceph-osd service
+ yum:
+ name: ceph-osd
+ state: latest
+ - name: Start CephOSD
tags: step1
- service: name=ceph-osd@{{ item }} state=started
+ service:
+ name: ceph-osd@{{ item }}
+ state: started
with_items: "{{osd_ids.stdout.strip().split()}}"
+ # with awk we are meant to check if $2 and $4 are *the same* but it returns 1 when
+ # they are, so the check is inverted to produce an useful exit code
+ - name: Wait for clean pgs...
+ tags: step1,ceph_pgs_clean_validation
+ vars:
+ ignore_warnings: {get_param: IgnoreCephUpgradeWarnings}
+ shell: |
+ ceph pg stat | awk '{exit($2!=$4)}' && ceph health | egrep -sq "HEALTH_OK|HEALTH_WARN"
+ register: ceph_pgs_healthcheck
+ until: ceph_pgs_healthcheck.rc == 0
+ retries: {get_param: CephValidationRetries}
+ delay: {get_param: CephValidationDelay}
+ when:
+ - not ignore_warnings
- name: ceph osd unset noout
tags: step1
command: ceph osd unset noout
diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml
index d7014e54..01531971 100644
--- a/puppet/services/ceph-rgw.yaml
+++ b/puppet/services/ceph-rgw.yaml
@@ -79,12 +79,21 @@ outputs:
ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
upgrade_tasks:
- name: Gather RGW instance ID
- tags: step0
+ tags: common
shell: hiera -c /etc/puppet/hiera.yaml ceph::profile::params::rgw_name radosgw.gateway
register: rgw_id
+ - name: Check if ceph_rgw is deployed
+ command: systemctl is-enabled ceph-radosgw@{{rgw_id.stdout}}
+ tags: common
+ ignore_errors: True
+ register: ceph_rgw_enabled
- name: Check status
shell: /usr/bin/systemctl show ceph-radosgw@{{rgw_id.stdout}} --property ActiveState | grep '\bactive\b'
+ when: ceph_rgw_enabled.rc == 0
tags: step0,validation
- name: Stop RGW instance
tags: step1
- service: name=ceph-radosgw@{{rgw_id.stdout}} state=stopped
+ when: ceph_rgw_enabled.rc == 0
+ service:
+ name: ceph-radosgw@{{rgw_id.stdout}}
+ state: stopped
diff --git a/puppet/services/certmonger-user.yaml b/puppet/services/certmonger-user.yaml
new file mode 100644
index 00000000..af9802b0
--- /dev/null
+++ b/puppet/services/certmonger-user.yaml
@@ -0,0 +1,28 @@
+heat_template_version: ocata
+
+description: >
+ Requests certificates using certmonger through Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the certmonger-user service
+ value:
+ service_name: certmonger_user
+ step_config: |
+ include ::tripleo::profile::base::certmonger_user
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index 8c5a07ac..958b0e7d 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -80,13 +80,16 @@ outputs:
map_merge:
- get_attr: [CinderBase, role_data, config_settings]
- get_attr: [ApacheServiceBase, role_data, config_settings]
- - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
cinder::keystone::authtoken::password: {get_param: CinderPassword}
cinder::keystone::authtoken::project_name: 'service'
+ cinder::keystone::authtoken::user_domain_name: 'Default'
+ cinder::keystone::authtoken::project_domain_name: 'Default'
cinder::api::enable_proxy_headers_parsing: true
- cinder::api::nova_catalog_info: 'compute:Compute Service:internalURL'
+ cinder::api::nova_catalog_info: 'compute:nova:internalURL'
+ cinder::api::nova_catalog_admin_info: 'compute:nova:adminURL'
# TODO(emilien) move it to puppet-cinder
cinder::config:
DEFAULT/swift_catalog_info:
@@ -149,18 +152,25 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
+ - name: Check if cinder_api is deployed
+ command: systemctl is-enabled openstack-cinder-api
+ tags: common
+ ignore_errors: True
+ register: cinder_api_enabled
- name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
+ when: cinder_api_enabled.rc == 0
tags: step0,validation
- name: check for cinder running under apache (post upgrade)
- tags: step2
- shell: "apachectl -t -D DUMP_VHOSTS | grep -q cinder"
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder"
register: cinder_apache
ignore_errors: true
- name: Stop cinder_api service (running under httpd)
- tags: step2
+ tags: step1
service: name=httpd state=stopped
- when: "cinder_apache.rc == 0"
+ when: cinder_apache.rc == 0
- name: Stop and disable cinder_api service (pre-upgrade not under httpd)
- tags: step2
+ tags: step1
+ when: cinder_api_enabled.rc == 0
service: name=openstack-cinder-api state=stopped enabled=no
diff --git a/puppet/services/cinder-backend-scaleio.yaml b/puppet/services/cinder-backend-scaleio.yaml
index eb709cd5..c4e4aa3d 100644
--- a/puppet/services/cinder-backend-scaleio.yaml
+++ b/puppet/services/cinder-backend-scaleio.yaml
@@ -106,6 +106,6 @@ outputs:
cinder::backend::scaleio::sio_round_volume_capacity: {get_param: CinderScaleIORoundVolumeCapacity}
cinder::backend::scaleio::sio_unmap_volume_before_deletion: {get_param: CinderScaleIOUnmapVolumeBeforeDeletion}
cinder::backend::scaleio::sio_max_over_subscription_ratio: {get_param: CinderScaleIOMaxOverSubscriptionRatio}
- cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOThinProvision}
+ cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOSanThinProvision}
step_config: |
include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml
index a5d7fcf1..88e7edb7 100644
--- a/puppet/services/cinder-base.yaml
+++ b/puppet/services/cinder-base.yaml
@@ -100,8 +100,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/cinder'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
cinder::debug: {get_param: Debug}
cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
cinder::rabbit_userid: {get_param: RabbitUserName}
diff --git a/puppet/services/cinder-scheduler.yaml b/puppet/services/cinder-scheduler.yaml
index f102810e..f8361f6f 100644
--- a/puppet/services/cinder-scheduler.yaml
+++ b/puppet/services/cinder-scheduler.yaml
@@ -52,9 +52,16 @@ outputs:
step_config: |
include ::tripleo::profile::base::cinder::scheduler
upgrade_tasks:
+ - name: Check if cinder_scheduler is deployed
+ command: systemctl is-enabled openstack-cinder-scheduler
+ tags: common
+ ignore_errors: True
+ register: cinder_scheduler_enabled
- name: "PreUpgrade step0,validation: Check service openstack-cinder-scheduler is running"
shell: /usr/bin/systemctl show 'openstack-cinder-scheduler' --property ActiveState | grep '\bactive\b'
+ when: cinder_scheduler_enabled.rc == 0
tags: step0,validation
- name: Stop cinder_scheduler service
- tags: step2
+ tags: step1
+ when: cinder_scheduler_enabled.rc == 0
service: name=openstack-cinder-scheduler state=stopped
diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml
index 3a06afb8..26f1a96f 100644
--- a/puppet/services/cinder-volume.yaml
+++ b/puppet/services/cinder-volume.yaml
@@ -94,11 +94,7 @@ outputs:
tripleo::profile::base::cinder::volume::cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend}
tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
- tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers:
- str_replace:
- template: SERVERS
- params:
- SERVERS: {get_param: CinderNfsServers}
+ tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
@@ -116,9 +112,16 @@ outputs:
step_config: |
include ::tripleo::profile::base::cinder::volume
upgrade_tasks:
+ - name: Check if cinder_volume is deployed
+ command: systemctl is-enabled openstack-cinder-volume
+ tags: common
+ ignore_errors: True
+ register: cinder_volume_enabled
- name: "PreUpgrade step0,validation: Check service openstack-cinder-volume is running"
shell: /usr/bin/systemctl show 'openstack-cinder-volume' --property ActiveState | grep '\bactive\b'
+ when: cinder_volume_enabled.rc == 0
tags: step0,validation
- name: Stop cinder_volume service
- tags: step2
+ tags: step1
+ when: cinder_volume_enabled.rc == 0
service: name=openstack-cinder-volume state=stopped
diff --git a/puppet/services/congress.yaml b/puppet/services/congress.yaml
index 1b82f55c..20f64162 100644
--- a/puppet/services/congress.yaml
+++ b/puppet/services/congress.yaml
@@ -64,11 +64,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/congress'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
- congress::keystone::auth::tenant: 'service'
- congress::keystone::auth::password: {get_param: CongressPassword}
- congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
congress::debug: {get_param: Debug}
congress::rpc_backend: rabbit
congress::rabbit_userid: {get_param: RabbitUserName}
@@ -77,6 +73,12 @@ outputs:
congress::rabbit_port: {get_param: RabbitClientPort}
congress::server::bind_host: {get_param: [ServiceNetMap, CongressApiNetwork]}
+ congress::keystone::authtoken::project_name: 'service'
+ congress::keystone::authtoken::user_domain_name: 'Default'
+ congress::keystone::authtoken::project_domain_name: 'Default'
+ congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+
congress::db::mysql::password: {get_param: CongressPassword}
congress::db::mysql::user: congress
congress::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
@@ -85,6 +87,33 @@ outputs:
- '%'
- {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ service_config_settings:
+ keystone:
+ congress::keystone::auth::tenant: 'service'
+ congress::keystone::auth::region: {get_param: KeystoneRegion}
+ congress::keystone::auth::password: {get_param: CongressPassword}
+ congress::keystone::auth::public_url: {get_param: [EndpointMap, CongressPublic, uri]}
+ congress::keystone::auth::internal_url: {get_param: [EndpointMap, CongressInternal, uri]}
+ congress::keystone::auth::admin_url: {get_param: [EndpointMap, CongressAdmin, uri]}
step_config: |
include ::tripleo::profile::base::congress
+
+ upgrade_tasks:
+ - name: Check if congress is deployed
+ command: systemctl is-enabled openstack-congress-server
+ tags: common
+ ignore_errors: True
+ register: congress_enabled
+ - name: "PreUpgrade step0,validation: Check service openstack-congress-server is running"
+ shell: /usr/bin/systemctl show 'openstack-congress-server' --property ActiveState | grep '\bactive\b'
+ when: congress_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop congress service
+ tags: step1
+ when: congress_enabled.rc == 0
+ service: name=openstack-congress-server state=stopped
+ - name: Install openstack-congress package if it was disabled
+ tags: step3
+ yum: name=openstack-congress state=latest
+ when: congress_enabled.rc != 0
diff --git a/puppet/services/database/mysql-client.yaml b/puppet/services/database/mysql-client.yaml
new file mode 100644
index 00000000..78456e28
--- /dev/null
+++ b/puppet/services/database/mysql-client.yaml
@@ -0,0 +1,34 @@
+heat_template_version: ocata
+
+description: >
+ Mysql client settings
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+outputs:
+ role_data:
+ description: Role for setting mysql client parameters
+ value:
+ service_name: mysql_client
+ config_settings:
+ tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS}
+ step_config: |
+ include ::tripleo::profile::base::database::mysql::client
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
index 808f1353..7078b60f 100644
--- a/puppet/services/database/mysql.yaml
+++ b/puppet/services/database/mysql.yaml
@@ -23,6 +23,10 @@ parameters:
description: Configures MySQL max_connections config setting
type: number
default: 4096
+ MysqlIncreaseFileLimit:
+ description: Flag to increase MySQL open-files-limit to 16384
+ type: boolean
+ default: true
MysqlRootPassword:
type: string
hidden: true
@@ -96,6 +100,8 @@ outputs:
$NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
tripleo::profile::base::database::mysql::client_bind_address:
{get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::generate_dropin_file_limit:
+ {get_param: MysqlIncreaseFileLimit}
step_config: |
include ::tripleo::profile::base::database::mysql
metadata_settings:
diff --git a/puppet/services/database/redis-base.yaml b/puppet/services/database/redis-base.yaml
index 2b7dd430..af89ffb1 100644
--- a/puppet/services/database/redis-base.yaml
+++ b/puppet/services/database/redis-base.yaml
@@ -42,3 +42,4 @@ outputs:
redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
+ redis::sentinel::sentinel_bind: {get_param: [ServiceNetMap, RedisNetwork]}
diff --git a/puppet/services/disabled/glance-registry.yaml b/puppet/services/disabled/glance-registry.yaml
index 4d22bddc..7bf4a1fd 100644
--- a/puppet/services/disabled/glance-registry.yaml
+++ b/puppet/services/disabled/glance-registry.yaml
@@ -26,5 +26,5 @@ outputs:
service_name: glance_registry
upgrade_tasks:
- name: Stop and disable glance_registry service on upgrade
- tags: step2
+ tags: step1
service: name=openstack-glance-registry state=stopped enabled=no
diff --git a/puppet/services/docker.yaml b/puppet/services/docker.yaml
new file mode 100644
index 00000000..e7da2383
--- /dev/null
+++ b/puppet/services/docker.yaml
@@ -0,0 +1,43 @@
+heat_template_version: ocata
+
+description: >
+ Configures docker on the host
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: tripleoupstream
+ type: string
+ DockerNamespaceIsRegistry:
+ type: boolean
+ default: false
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the docker service
+ value:
+ service_name: docker
+ config_settings:
+ tripleo::profile::base::docker::docker_namespace: {get_param: DockerNamespace}
+ tripleo::profile::base::docker::insecure_registry: {get_param: DockerNamespaceIsRegistry}
+ step_config: |
+ include ::tripleo::profile::base::docker
+ upgrade_tasks:
+ - name: Install docker packages on upgrade if missing
+ tags: step3
+ yum: name=docker state=latest
+
diff --git a/puppet/services/ec2-api.yaml b/puppet/services/ec2-api.yaml
index 002342b6..10f6d311 100644
--- a/puppet/services/ec2-api.yaml
+++ b/puppet/services/ec2-api.yaml
@@ -72,13 +72,13 @@ outputs:
ec2api::api::ec2api_listen:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, Ec2ApiNetwork]}
ec2api::metadata::metadata_listen:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, Ec2ApiMetadataNetwork]}
ec2api::db::database_connection:
@@ -90,8 +90,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ec2_api'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
-
if:
- nova_workers_zero
@@ -116,3 +115,24 @@ outputs:
ec2api::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ upgrade_tasks:
+ - name: Check if ec2-api is deployed
+ command: systemctl is-enabled openstack-ec2-api
+ tags: common
+ ignore_errors: True
+ register: ec2_api_enabled
+ - name: "PreUpgrade step0,validation: Check if openstack-ec2-api is running"
+ shell: >
+ /usr/bin/systemctl show 'openstack-ec2-api' --property ActiveState |
+ grep '\bactive\b'
+ when: ec2_api_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop openstack-ec2-api service
+ tags: step1
+ when: ec2_api_enabled.rc == 0
+ service: name=openstack-ec2-api state=stopped
+ - name: Install openstack-ec2-api package if it was disabled
+ tags: step3
+ yum: name=openstack-ec2-api state=latest
+ when: ec2_api_enabled.rc != 0
+
diff --git a/puppet/services/etcd.yaml b/puppet/services/etcd.yaml
index f96fa723..5db8bec0 100644
--- a/puppet/services/etcd.yaml
+++ b/puppet/services/etcd.yaml
@@ -19,9 +19,9 @@ parameters:
via parameter_defaults in the resource registry.
type: json
EtcdInitialClusterToken:
- default: 'etcd-tripleo'
description: Initial cluster token for the etcd cluster during bootstrap.
type: string
+ hidden: true
MonitoringSubscriptionEtcd:
default: 'overcloud-etcd'
type: string
@@ -36,7 +36,7 @@ outputs:
etcd::etcd_name:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
@@ -56,3 +56,18 @@ outputs:
- 2380
step_config: |
include ::tripleo::profile::base::etcd
+ upgrade_tasks:
+ - name: Check if etcd is deployed
+ command: systemctl is-enabled etcd
+ tags: step0,validation
+ ignore_errors: True
+ register: etcd_enabled
+ - name: "PreUpgrade step0,validation: Check if etcd is running"
+ shell: >
+ /usr/bin/systemctl show 'etcd' --property ActiveState |
+ grep '\bactive\b'
+ when: etcd_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop etcd service
+ tags: step2
+ service: name=etcd state=stopped
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index c4f97d54..b06f9993 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -48,6 +48,68 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ CephClientUserName:
+ default: openstack
+ type: string
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ GlanceNotifierStrategy:
+ description: Strategy to use for Glance notification queue
+ type: string
+ default: noop
+ GlanceLogFile:
+ description: The filepath of the file to use for logging messages from Glance.
+ type: string
+ default: ''
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
+ GlanceNfsEnabled:
+ default: false
+ description: >
+ When using GlanceBackend 'file', mount NFS share for image storage.
+ type: boolean
+ GlanceNfsShare:
+ default: ''
+ description: >
+ NFS share to mount for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceNfsOptions:
+ default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
+ description: >
+ NFS mount options for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceRbdPoolName:
+ default: images
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
conditions:
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
@@ -62,13 +124,6 @@ resources:
EndpointMap: {get_param: EndpointMap}
EnableInternalTLS: {get_param: EnableInternalTLS}
- GlanceBase:
- type: ./glance-base.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
outputs:
role_data:
description: Role data for the Glance API role.
@@ -80,7 +135,6 @@ outputs:
- glance
config_settings:
map_merge:
- - get_attr: [GlanceBase, role_data, config_settings]
- get_attr: [TLSProxyBase, role_data, config_settings]
- glance::api::database_connection:
list_join:
@@ -91,8 +145,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/glance'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]}
glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
glance::api::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
@@ -133,17 +186,62 @@ outputs:
- use_tls_proxy
- 'localhost'
- {get_param: [ServiceNetMap, GlanceApiNetwork]}
+ glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
+ glance_log_file: {get_param: GlanceLogFile}
+ glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ glance::backend::swift::swift_store_user: service:glance
+ glance::backend::swift::swift_store_key: {get_param: GlancePassword}
+ glance::backend::swift::swift_store_create_container_on_put: true
+ glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+ glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
+ glance_backend: {get_param: GlanceBackend}
+ glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
+ glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
+ glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
+ glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ glance::notify::rabbitmq::notification_driver: messagingv2
+ tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
+ tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
+ tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
+ service_config_settings:
+ keystone:
+ glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
+ glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
+ glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
+ glance::keystone::auth::password: {get_param: GlancePassword }
+ glance::keystone::auth::region: {get_param: KeystoneRegion}
+ glance::keystone::auth::tenant: 'service'
+ mysql:
+ glance::db::mysql::password: {get_param: GlancePassword}
+ glance::db::mysql::user: glance
+ glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ glance::db::mysql::dbname: glance
+ glance::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
step_config: |
include ::tripleo::profile::base::glance::api
- service_config_settings:
- get_attr: [GlanceBase, role_data, service_config_settings]
upgrade_tasks:
+ - name: Check if glance_api is deployed
+ command: systemctl is-enabled openstack-glance-api
+ tags: common
+ ignore_errors: True
+ register: glance_api_enabled
+ #(TODO) Remove all glance-registry bits in Pike.
+ - name: Check if glance_registry is deployed
+ command: systemctl is-enabled openstack-glance-registry
+ tags: common
+ ignore_errors: True
+ register: glance_registry_enabled
- name: "PreUpgrade step0,validation: Check service openstack-glance-api is running"
shell: /usr/bin/systemctl show 'openstack-glance-api' --property ActiveState | grep '\bactive\b'
tags: step0,validation
+ when: glance_api_enabled.rc == 0
- name: Stop glance_api service
- tags: step2
+ tags: step1
+ when: glance_api_enabled.rc == 0
service: name=openstack-glance-api state=stopped
- name: Stop and disable glance registry (removed for Ocata)
- tags: step2
+ tags: step1
+ when: glance_registry_enabled.rc == 0
service: name=openstack-glance-registry state=stopped enabled=no
diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml
deleted file mode 100644
index f5548982..00000000
--- a/puppet/services/glance-base.yaml
+++ /dev/null
@@ -1,126 +0,0 @@
-heat_template_version: ocata
-
-description: >
- OpenStack Glance Common settings with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- CephClientUserName:
- default: openstack
- type: string
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- GlanceNotifierStrategy:
- description: Strategy to use for Glance notification queue
- type: string
- default: noop
- GlanceLogFile:
- description: The filepath of the file to use for logging messages from Glance.
- type: string
- default: ''
- GlancePassword:
- description: The password for the glance service and db account, used by the glance services.
- type: string
- hidden: true
- GlanceBackend:
- default: swift
- description: The short name of the Glance backend to use. Should be one
- of swift, rbd, or file
- type: string
- constraints:
- - allowed_values: ['swift', 'file', 'rbd']
- GlanceNfsEnabled:
- default: false
- description: >
- When using GlanceBackend 'file', mount NFS share for image storage.
- type: boolean
- GlanceNfsShare:
- default: ''
- description: >
- NFS share to mount for image storage (when GlanceNfsEnabled is true)
- type: string
- GlanceNfsOptions:
- default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
- description: >
- NFS mount options for image storage (when GlanceNfsEnabled is true)
- type: string
- GlanceRbdPoolName:
- default: images
- type: string
- RabbitPassword:
- description: The password for RabbitMQ
- type: string
- hidden: true
- RabbitUserName:
- default: guest
- description: The username for RabbitMQ
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
- type: string
- KeystoneRegion:
- type: string
- default: 'regionOne'
- description: Keystone region for endpoint
-
-outputs:
- role_data:
- description: Role data for the Glance common role.
- value:
- service_name: glance_base
- config_settings:
- glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
- glance_log_file: {get_param: GlanceLogFile}
- glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
- glance::backend::swift::swift_store_user: service:glance
- glance::backend::swift::swift_store_key: {get_param: GlancePassword}
- glance::backend::swift::swift_store_create_container_on_put: true
- glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
- glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
- glance_backend: {get_param: GlanceBackend}
- glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
- glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
- glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
- glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- glance::notify::rabbitmq::notification_driver: messagingv2
- tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
- tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
- tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
- service_config_settings:
- keystone:
- glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
- glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
- glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
- glance::keystone::auth::password: {get_param: GlancePassword }
- glance::keystone::auth::region: {get_param: KeystoneRegion}
- glance::keystone::auth::tenant: 'service'
- mysql:
- glance::db::mysql::password: {get_param: GlancePassword}
- glance::db::mysql::user: glance
- glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- glance::db::mysql::dbname: glance
- glance::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml
index 22c0967e..f4629917 100644
--- a/puppet/services/gnocchi-api.yaml
+++ b/puppet/services/gnocchi-api.yaml
@@ -83,10 +83,12 @@ outputs:
gnocchi::api::enabled: true
gnocchi::api::enable_proxy_headers_parsing: true
gnocchi::api::service_name: 'httpd'
- gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
gnocchi::keystone::authtoken::project_name: 'service'
+ gnocchi::keystone::authtoken::user_domain_name: 'Default'
+ gnocchi::keystone::authtoken::project_domain_name: 'Default'
gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS}
gnocchi::wsgi::apache::servername:
str_replace:
@@ -103,10 +105,6 @@ outputs:
# internal_api_subnet - > IP/CIDR
gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
gnocchi::wsgi::apache::wsgi_process_display_name: 'gnocchi_wsgi'
-
- gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri]}
step_config: |
include ::tripleo::profile::base::gnocchi::api
service_config_settings:
@@ -129,5 +127,5 @@ outputs:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- name: Stop gnocchi_api service (running under httpd)
- tags: step2
+ tags: step1
service: name=httpd state=stopped
diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml
index 8fddae4b..b45c084a 100644
--- a/puppet/services/gnocchi-base.yaml
+++ b/puppet/services/gnocchi-base.yaml
@@ -32,10 +32,6 @@ parameters:
CephClientUserName:
default: openstack
type: string
- KeystoneRegion:
- type: string
- default: 'regionOne'
- description: Keystone region for endpoint
RedisPassword:
description: The password for the redis service account.
type: string
@@ -67,12 +63,12 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/gnocchi'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
gnocchi::db::sync::extra_opts: '--skip-storage'
gnocchi::storage::swift::swift_user: 'service:gnocchi'
- gnocchi::storage::swift::swift_auth_version: 2
+ gnocchi::storage::swift::swift_auth_version: 3
gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword}
+ gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName}
gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName}
gnocchi::storage::ceph::ceph_keyring:
diff --git a/puppet/services/gnocchi-metricd.yaml b/puppet/services/gnocchi-metricd.yaml
index 1337b0cb..9d76c2e7 100644
--- a/puppet/services/gnocchi-metricd.yaml
+++ b/puppet/services/gnocchi-metricd.yaml
@@ -47,9 +47,16 @@ outputs:
step_config: |
include ::tripleo::profile::base::gnocchi::metricd
upgrade_tasks:
+ - name: Check if gnocchi_metricd is deployed
+ command: systemctl is-enabled openstack-gnocchi-metricd
+ tags: common
+ ignore_errors: True
+ register: gnocchi_metricd_enabled
- name: "PreUpgrade step0,validation: Check service openstack-gnocchi-metricd is running"
shell: /usr/bin/systemctl show 'openstack-gnocchi-metricd' --property ActiveState | grep '\bactive\b'
+ when: gnocchi_metricd_enabled.rc == 0
tags: step0,validation
- name: Stop gnocchi_metricd service
- tags: step2
+ tags: step1
+ when: gnocchi_metricd_enabled.rc == 0
service: name=openstack-gnocchi-metricd state=stopped
diff --git a/puppet/services/gnocchi-statsd.yaml b/puppet/services/gnocchi-statsd.yaml
index 41222a79..bb8d3bce 100644
--- a/puppet/services/gnocchi-statsd.yaml
+++ b/puppet/services/gnocchi-statsd.yaml
@@ -46,9 +46,16 @@ outputs:
step_config: |
include ::tripleo::profile::base::gnocchi::statsd
upgrade_tasks:
+ - name: Check if gnocchi_statsd is deployed
+ command: systemctl is-enabled openstack-gnocchi-statsd
+ tags: common
+ ignore_errors: True
+ register: gnocchi_statsd_enabled
- name: "PreUpgrade step0,validation: Check service openstack-gnocchi-statsd is running"
shell: /usr/bin/systemctl show 'openstack-gnocchi-statsd' --property ActiveState | grep '\bactive\b'
+ when: gnocchi_statsd_enabled.rc == 0
tags: step0,validation
- name: Stop gnocchi_statsd service
- tags: step2
+ tags: step1
+ when: gnocchi_statsd_enabled.rc == 0
service: name=openstack-gnocchi-statsd state=stopped
diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml
index 358698dd..bd5b9ef6 100644
--- a/puppet/services/haproxy.yaml
+++ b/puppet/services/haproxy.yaml
@@ -78,14 +78,22 @@ outputs:
step_config: |
include ::tripleo::profile::base::haproxy
upgrade_tasks:
+ - name: Check if haproxy is deployed
+ command: systemctl is-enabled haproxy
+ tags: common
+ ignore_errors: True
+ register: haproxy_enabled
- name: "PreUpgrade step0,validation: Check service haproxy is running"
shell: /usr/bin/systemctl show 'haproxy' --property ActiveState | grep '\bactive\b'
+ when: haproxy_enabled.rc == 0
tags: step0,validation
- name: Stop haproxy service
- tags: step1
+ tags: step2
+ when: haproxy_enabled.rc == 0
service: name=haproxy state=stopped
- name: Start haproxy service
tags: step4 # Needed at step 4 for mysql
+ when: haproxy_enabled.rc == 0
service: name=haproxy state=started
metadata_settings:
yaql:
diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml
index 7bd2fcf1..c4d44853 100644
--- a/puppet/services/heat-api-cfn.yaml
+++ b/puppet/services/heat-api-cfn.yaml
@@ -38,8 +38,23 @@ parameters:
default:
tag: openstack.heat.api.cfn
path: /var/log/heat/heat-api-cfn.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
@@ -59,19 +74,32 @@ outputs:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api_cfn::workers: {get_param: HeatWorkers}
- tripleo.heat_api_cfn.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api_cfn.firewall_rules:
'125 heat_cfn':
dport:
- 8000
- 13800
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ heat::wsgi::apache_api_cfn::ssl: {get_param: EnableInternalTLS}
+ heat::api_cfn::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ heat::wsgi::apache_api_cfn::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api_cfn::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api_cfn
service_config_settings:
@@ -85,9 +113,25 @@ outputs:
heat::keystone::auth_cfn::password: {get_param: HeatPassword}
heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
upgrade_tasks:
+ - name: Check if heat_api_cfn is deployed
+ command: systemctl is-enabled openstack-heat-api-cfn
+ tags: common
+ ignore_errors: True
+ register: heat_api_cfn_enabled
- name: "PreUpgrade step0,validation: Check service openstack-heat-api-cfn is running"
- shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b'
+ shell: /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b'
+ when: heat_api_cfn_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api_cfn service
- tags: step2
- service: name=openstack-heat-api-cfn state=stopped
+ - name: check for heat_api_cfn running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cfn_wsgi"
+ register: heat_api_cfn_apache
+ ignore_errors: true
+ - name: Stop heat_api_cfn service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_cfn_apache.rc == 0
+ - name: Stop and disable heat_api_cfn service (pre-upgrade not under httpd)
+ tags: step1
+ when: heat_api_cfn_apache.rc == 0
+ service: name=openstack-heat-api-cfn state=stopped enabled=no
diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml
index 0954ad19..7f8fa1fe 100644
--- a/puppet/services/heat-api-cloudwatch.yaml
+++ b/puppet/services/heat-api-cloudwatch.yaml
@@ -30,8 +30,23 @@ parameters:
default:
tag: openstack.heat.api.cloudwatch
path: /var/log/heat/heat-api-cloudwatch.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
@@ -51,25 +66,56 @@ outputs:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api_cloudwatch::workers: {get_param: HeatWorkers}
- tripleo.heat_api_cloudwatch.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api_cloudwatch.firewall_rules:
'125 heat_cloudwatch':
dport:
- 8003
- 13003
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api_cloudwatch::bind_host:
+ get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]
+ heat::wsgi::apache_api_cloudwatch::ssl: {get_param: EnableInternalTLS}
+ heat::api_cloudwatch::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api_cloudwatch::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api_cloudwatch::bind_host:
+ get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]
+ heat::wsgi::apache_api_cloudwatch::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api_cloudwatch::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api_cloudwatch
upgrade_tasks:
+ - name: Check if heat_api_cloudwatch is deployed
+ command: systemctl is-enabled openstack-heat-api-cloudwatch
+ tags: common
+ ignore_errors: True
+ register: heat_api_cloudwatch_enabled
- name: "PreUpgrade step0,validation: Check service openstack-heat-api-cloudwatch is running"
- shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b'
+ shell: /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b'
+ when: heat_api_cloudwatch_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api_cloudwatch service
- tags: step2
- service: name=openstack-heat-api-cloudwatch state=stopped
+ - name: check for heat_api_cloudwatch running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cloudwatch_wsgi"
+ register: heat_api_cloudwatch_apache
+ ignore_errors: true
+ - name: Stop heat_api_cloudwatch service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_cloudwatch_apache.rc == 0
+ - name: Stop and disable heat_api_cloudwatch service (pre-upgrade not under httpd)
+ tags: step1
+ when: heat_api_cloudwatch_enabled.rc == 0
+ service: name=openstack-heat-api-cloudwatch state=stopped enabled=no
diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml
index ae656b1e..e21369e8 100644
--- a/puppet/services/heat-api.yaml
+++ b/puppet/services/heat-api.yaml
@@ -38,8 +38,23 @@ parameters:
default:
tag: openstack.heat.api
path: /var/log/heat/heat-api.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
@@ -59,19 +74,32 @@ outputs:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api::workers: {get_param: HeatWorkers}
- tripleo.heat_api.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api.firewall_rules:
'125 heat_api':
dport:
- 8004
- 13004
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+ heat::api::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api
service_config_settings:
@@ -85,9 +113,25 @@ outputs:
heat::keystone::auth::password: {get_param: HeatPassword}
heat::keystone::auth::region: {get_param: KeystoneRegion}
upgrade_tasks:
+ - name: Check is heat_api is deployed
+ command: systemctl is-enabled openstack-heat-api
+ tags: common
+ ignore_errors: True
+ register: heat_api_enabled
- name: "PreUpgrade step0,validation: Check service openstack-heat-api is running"
- shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b'
+ shell: /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b'
+ when: heat_api_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api service
- tags: step2
- service: name=openstack-heat-api state=stopped
+ - name: check for heat_api running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_wsgi"
+ register: heat_api_apache
+ ignore_errors: true
+ - name: Stop heat_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_apache.rc == 0
+ - name: Stop and disable heat_api service (pre-upgrade not under httpd)
+ tags: step1
+ when: heat_api_enabled.rc == 0
+ service: name=openstack-heat-api state=stopped enabled=no
diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml
index b4d314f4..6ada9c25 100644
--- a/puppet/services/heat-base.yaml
+++ b/puppet/services/heat-base.yaml
@@ -99,6 +99,10 @@ parameters:
description: >
Cron to purge db entries marked as deleted and older than $age - Log destination
default: '/dev/null'
+ HeatMaxJsonBodySize:
+ default: 1048576
+ description: Maximum raw byte size of the Heat API JSON request body.
+ type: number
outputs:
role_data:
@@ -121,7 +125,9 @@ outputs:
value: 'role:admin'
heat::rabbit_heartbeat_timeout_threshold: 60
heat::keystone::authtoken::project_name: 'service'
- heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ heat::keystone::authtoken::user_domain_name: 'Default'
+ heat::keystone::authtoken::project_domain_name: 'Default'
+ heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
heat::keystone::authtoken::password: {get_param: HeatPassword}
heat::keystone::domain::domain_name: 'heat_stack'
@@ -142,6 +148,7 @@ outputs:
heat::cron::purge_deleted::age: {get_param: HeatCronPurgeDeletedAge}
heat::cron::purge_deleted::age_type: {get_param: HeatCronPurgeDeletedAgeType}
heat::cron::purge_deleted::destination: {get_param: HeatCronPurgeDeletedDestination}
+ heat::max_json_body_size: {get_param: HeatMaxJsonBodySize}
service_config_settings:
keystone:
tripleo::profile::base::keystone::heat_admin_domain: 'heat_stack'
diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml
index e85b7537..a166f3a7 100644
--- a/puppet/services/heat-engine.yaml
+++ b/puppet/services/heat-engine.yaml
@@ -111,8 +111,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/heat'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
heat::engine::auth_encryption_key:
@@ -138,9 +137,16 @@ outputs:
# This is needed because the keystone profile handles creating the domain
tripleo::profile::base::keystone::heat_admin_password: {get_param: HeatStackDomainAdminPassword}
upgrade_tasks:
+ - name: Check if heat_engine is deployed
+ command: systemctl is-enabled openstack-heat-engine
+ tags: common
+ ignore_errors: True
+ register: heat_engine_enabled
- name: "PreUpgrade step0,validation: Check service openstack-heat-engine is running"
- shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-engine' --property ActiveState | grep '\bactive\b'
+ shell: /usr/bin/systemctl show 'openstack-heat-engine' --property ActiveState | grep '\bactive\b'
+ when: heat_engine_enabled.rc == 0
tags: step0,validation
- name: Stop heat_engine service
- tags: step2
+ tags: step1
+ when: heat_engine_enabled.rc == 0
service: name=openstack-heat-engine state=stopped
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index 2111021b..7ae518b5 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -78,7 +78,7 @@ outputs:
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
options: ['FollowSymLinks','MultiViews']
horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
- horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
horizon::password_validator: {get_param: [HorizonPasswordValidator]}
horizon::password_validator_help: {get_param: [HorizonPasswordValidatorHelp]}
horizon::secret_key:
@@ -96,3 +96,20 @@ outputs:
- horizon::django_debug: {get_param: Debug}
step_config: |
include ::tripleo::profile::base::horizon
+ # Ansible tasks to handle upgrade
+ upgrade_tasks:
+ - name: Check if httpd is deployed
+ command: systemctl is-enabled httpd
+ tags: common
+ ignore_errors: True
+ register: httpd_enabled
+ - name: "PreUpgrade step0,validation: Check if httpd is running"
+ shell: >
+ /usr/bin/systemctl show 'httpd' --property ActiveState |
+ grep '\bactive\b'
+ when: httpd_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop Horizon (under httpd)
+ tags: step1
+ when: httpd_enabled.rc == 0
+ service: name=httpd state=stopped
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
index a84df538..e24d0de6 100644
--- a/puppet/services/ironic-api.yaml
+++ b/puppet/services/ironic-api.yaml
@@ -49,8 +49,10 @@ outputs:
- get_attr: [IronicBase, role_data, config_settings]
- ironic::api::authtoken::password: {get_param: IronicPassword}
ironic::api::authtoken::project_name: 'service'
+ ironic::api::authtoken::user_domain_name: 'Default'
+ ironic::api::authtoken::project_domain_name: 'Default'
ironic::api::authtoken::username: 'ironic'
- ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
@@ -88,5 +90,5 @@ outputs:
- "%{hiera('mysql_bind_host')}"
upgrade_tasks:
- name: Stop ironic_api service
- tags: step2
+ tags: step1
service: name=openstack-ironic-api state=stopped
diff --git a/puppet/services/ironic-base.yaml b/puppet/services/ironic-base.yaml
index ad7ef6ea..d186b047 100644
--- a/puppet/services/ironic-base.yaml
+++ b/puppet/services/ironic-base.yaml
@@ -60,8 +60,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ironic'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
ironic::debug: {get_param: Debug}
ironic::rabbit_userid: {get_param: RabbitUserName}
ironic::rabbit_password: {get_param: RabbitPassword}
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index 739db13c..56e1a90b 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -44,6 +44,10 @@ parameters:
default: 8088
description: Port to use for serving images when iPXE is used.
type: string
+ IronicPassword:
+ description: The password for the Ironic service and db account, used by the Ironic services
+ type: string
+ hidden: true
MonitoringSubscriptionIronicConductor:
default: 'overcloud-ironic-conductor'
type: string
@@ -65,9 +69,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
- # FIXME: I have no idea why neutron_url is in "api" manifest
- - ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
- ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
+ - ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase}
ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork}
ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers}
@@ -104,10 +106,43 @@ outputs:
# the VIP, but rather a real IP of the host.
ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
-
+ # Credentials to access other services
+ ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::glance::username: 'ironic'
+ ironic::glance::password: {get_param: IronicPassword}
+ ironic::glance::project_name: 'service'
+ ironic::glance::user_domain_name: 'Default'
+ ironic::glance::project_domain_name: 'Default'
+ ironic::neutron::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::neutron::username: 'ironic'
+ ironic::neutron::password: {get_param: IronicPassword}
+ ironic::neutron::project_name: 'service'
+ ironic::neutron::user_domain_name: 'Default'
+ ironic::neutron::project_domain_name: 'Default'
+ ironic::service_catalog::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::service_catalog::username: 'ironic'
+ ironic::service_catalog::password: {get_param: IronicPassword}
+ ironic::service_catalog::project_name: 'service'
+ ironic::service_catalog::user_domain_name: 'Default'
+ ironic::service_catalog::project_domain_name: 'Default'
+ ironic::swift::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::swift::username: 'ironic'
+ ironic::swift::password: {get_param: IronicPassword}
+ ironic::swift::project_name: 'service'
+ ironic::swift::user_domain_name: 'Default'
+ ironic::swift::project_domain_name: 'Default'
+ # ironic-inspector support is not implemented, but let's configure
+ # the credentials for consistency.
+ ironic::drivers::inspector::enabled: false
+ ironic::drivers::inspector::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::drivers::inspector::username: 'ironic'
+ ironic::drivers::inspector::password: {get_param: IronicPassword}
+ ironic::drivers::inspector::project_name: 'service'
+ ironic::drivers::inspector::user_domain_name: 'Default'
+ ironic::drivers::inspector::project_domain_name: 'Default'
step_config: |
include ::tripleo::profile::base::ironic::conductor
upgrade_tasks:
- name: Stop ironic_conductor service
- tags: step2
+ tags: step1
service: name=openstack-ironic-conductor state=stopped
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
index 29157959..fec455d1 100644
--- a/puppet/services/kernel.yaml
+++ b/puppet/services/kernel.yaml
@@ -31,6 +31,7 @@ outputs:
config_settings:
kernel_modules:
nf_conntrack: {}
+ ip_conntrack_proto_sctp: {}
sysctl_settings:
net.ipv4.tcp_keepalive_intvl:
value: 1
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index b2374ec4..17616867 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -35,7 +35,7 @@ parameters:
KeystoneTokenProvider:
description: The keystone token format
type: string
- default: 'uuid'
+ default: 'fernet'
constraints:
- allowed_values: ['uuid', 'fernet']
ServiceNetMap:
@@ -193,8 +193,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/keystone'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
keystone::admin_token: {get_param: AdminToken}
keystone::admin_password: {get_param: AdminPassword}
keystone::roles::admin::password: {get_param: AdminPassword}
@@ -227,12 +226,13 @@ outputs:
keystone::endpoint::internal_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
keystone::endpoint::region: {get_param: KeystoneRegion}
+ keystone::endpoint::version: ''
keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
keystone::rabbit_heartbeat_timeout_threshold: 60
keystone::cron::token_flush::maxdelay: 3600
keystone::roles::admin::service_tenant: 'service'
keystone::roles::admin::admin_tenant: 'admin'
- keystone::cron::token_flush::destination: '/dev/null'
+ keystone::cron::token_flush::destination: '/var/log/keystone/keystone-tokenflush.log'
keystone::config::keystone_config:
ec2/driver:
value: 'keystone.contrib.ec2.backends.sql.Ec2'
@@ -308,7 +308,7 @@ outputs:
# Ansible tasks to handle upgrade
upgrade_tasks:
- name: Stop keystone service (running under httpd)
- tags: step2
+ tags: step1
service: name=httpd state=stopped
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
diff --git a/puppet/services/logging/fluentd-client.yaml b/puppet/services/logging/fluentd-client.yaml
index 94c63d33..57595b82 100644
--- a/puppet/services/logging/fluentd-client.yaml
+++ b/puppet/services/logging/fluentd-client.yaml
@@ -63,11 +63,22 @@ outputs:
step_config: |
include ::tripleo::profile::base::logging::fluentd
upgrade_tasks:
+ - name: Check if fluentd_client is deployed
+ command: systemctl is-enabled fluentd
+ tags: common
+ ignore_errors: True
+ register: fluentd_client_enabled
- name: Check status of fluentd service
shell: >
/usr/bin/systemctl show fluentd --property ActiveState |
grep '\bactive\b'
+ when: fluentd_client_enabled.rc == 0
tags: step0,validation
- name: Stop fluentd service
- tags: step2
+ tags: step1
+ when: fluentd_client_enabled.rc == 0
service: name=fluentd state=stopped
+ - name: Install fluentd package if it was disabled
+ tags: step3
+ yum: name=fluentd state=latest
+ when: fluentd_client_enabled.rc != 0
diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml
index 7b78c82e..4061ca28 100644
--- a/puppet/services/manila-api.yaml
+++ b/puppet/services/manila-api.yaml
@@ -48,9 +48,11 @@ outputs:
map_merge:
- get_attr: [ManilaBase, role_data, config_settings]
- manila::keystone::authtoken::password: {get_param: ManilaPassword}
- manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ manila::keystone::authtoken::auth_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
manila::keystone::authtoken::project_name: 'service'
+ manila::keystone::authtoken::user_domain_name: 'Default'
+ manila::keystone::authtoken::project_domain_name: 'Default'
tripleo.manila_api.firewall_rules:
'150 manila':
dport:
diff --git a/puppet/services/manila-base.yaml b/puppet/services/manila-base.yaml
index 2a9745a2..c183bc08 100644
--- a/puppet/services/manila-base.yaml
+++ b/puppet/services/manila-base.yaml
@@ -67,8 +67,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/manila'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
service_config_settings:
mysql:
manila::db::mysql::password: {get_param: ManilaPassword}
diff --git a/puppet/services/metrics/collectd.yaml b/puppet/services/metrics/collectd.yaml
index a3e3b842..49b2d4c2 100644
--- a/puppet/services/metrics/collectd.yaml
+++ b/puppet/services/metrics/collectd.yaml
@@ -110,11 +110,22 @@ outputs:
step_config: |
include ::tripleo::profile::base::metrics::collectd
upgrade_tasks:
+ - name: Check if collectd is deployed
+ command: systemctl is-enabled collectd
+ tags: common
+ ignore_errors: True
+ register: collectd_enabled
- name: Check status of collectd service
shell: >
/usr/bin/systemctl show collectd --property ActiveState |
grep '\bactive\b'
+ when: collectd_enabled.rc == 0
tags: step0,validation
- name: Stop collectd service
- tags: step2
+ tags: step1
+ when: collectd_enabled.rc == 0
service: name=collectd state=stopped
+ - name: Install collectd package if it was disabled
+ tags: step3
+ yum: name=collectd state=latest
+ when: collectd_enabled.rc != 0
diff --git a/puppet/services/mistral-api.yaml b/puppet/services/mistral-api.yaml
index daa1dc7c..1c7d6bd3 100644
--- a/puppet/services/mistral-api.yaml
+++ b/puppet/services/mistral-api.yaml
@@ -50,3 +50,22 @@ outputs:
get_attr: [MistralBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::mistral::api
+ upgrade_tasks:
+ - name: Check if mistral api is deployed
+ command: systemctl is-enabled openstack-mistral-api
+ tags: common
+ ignore_errors: True
+ register: mistral_api_enabled
+ - name: "PreUpgrade step0,validation: Check if openstack-mistral-api is running"
+ shell: >
+ /usr/bin/systemctl show 'openstack-mistral-api' --property ActiveState |
+ grep '\bactive\b'
+ when: mistral_api_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop mistral_api service
+ tags: step1
+ service: name=openstack-mistral-api state=stopped
+ - name: Install openstack-mistral-api package if it was disabled
+ tags: step3
+ yum: name=openstack-mistral-api state=latest
+ when: mistral_api_enabled.rc != 0
diff --git a/puppet/services/mistral-base.yaml b/puppet/services/mistral-base.yaml
index 4d020498..e1030346 100644
--- a/puppet/services/mistral-base.yaml
+++ b/puppet/services/mistral-base.yaml
@@ -65,8 +65,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/mistral'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
mistral::rabbit_userid: {get_param: RabbitUserName}
mistral::rabbit_password: {get_param: RabbitPassword}
mistral::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
diff --git a/puppet/services/mistral-engine.yaml b/puppet/services/mistral-engine.yaml
index 4a92b863..03a2a55c 100644
--- a/puppet/services/mistral-engine.yaml
+++ b/puppet/services/mistral-engine.yaml
@@ -36,3 +36,22 @@ outputs:
get_attr: [MistralBase, role_data, config_settings]
step_config: |
include ::tripleo::profile::base::mistral::engine
+ upgrade_tasks:
+ - name: Check if mistral engine is deployed
+ command: systemctl is-enabled openstack-mistral-engine
+ tags: common
+ ignore_errors: True
+ register: mistral_engine_enabled
+ - name: "PreUpgrade step0,validation: Check if openstack-mistral-engine is running"
+ shell: >
+ /usr/bin/systemctl show 'openstack-mistral-engine' --property ActiveState |
+ grep '\bactive\b'
+ when: mistral_engine_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop mistral_engine service
+ tags: step1
+ service: name=openstack-mistral-engine state=stopped
+ - name: Install openstack-mistral-engine package if it was disabled
+ tags: step3
+ yum: name=openstack-mistral-engine state=latest
+ when: mistral_engine_enabled.rc != 0
diff --git a/puppet/services/mistral-executor.yaml b/puppet/services/mistral-executor.yaml
index 6e273b92..0f6adb07 100644
--- a/puppet/services/mistral-executor.yaml
+++ b/puppet/services/mistral-executor.yaml
@@ -36,3 +36,22 @@ outputs:
get_attr: [MistralBase, role_data, config_settings]
step_config: |
include ::tripleo::profile::base::mistral::executor
+ upgrade_tasks:
+ - name: Check if mistral executor is deployed
+ command: systemctl is-enabled openstack-mistral-executor
+ tags: common
+ ignore_errors: True
+ register: mistral_executor_enabled
+ - name: "PreUpgrade step0,validation: Check if openstack-mistral-executor is running"
+ shell: >
+ /usr/bin/systemctl show 'openstack-mistral-executor' --property ActiveState |
+ grep '\bactive\b'
+ when: mistral_executor_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop mistral_executor service
+ tags: step1
+ service: name=openstack-mistral-executor state=stopped
+ - name: Install openstack-mistral-executor package if it was disabled
+ tags: step3
+ yum: name=openstack-mistral-executor state=latest
+ when: mistral_executor_enabled.rc != 0
diff --git a/puppet/services/monitoring/sensu-base.yaml b/puppet/services/monitoring/sensu-base.yaml
index a8303a59..2fa1569c 100644
--- a/puppet/services/monitoring/sensu-base.yaml
+++ b/puppet/services/monitoring/sensu-base.yaml
@@ -29,7 +29,18 @@ parameters:
default: false
description: >
RabbitMQ client subscriber parameter to specify an SSL connection
- to the RabbitMQ host.
+ to the RabbitMQ host. Set MonitoringRabbitUseSSL to true without
+ specifying a private key or cert chain to use SSL transport,
+ but not cert auth.
+ type: string
+ MonitoringRabbitSSLPrivateKey:
+ default: ''
+ description: Private key to be used by Sensu to connect to RabbitMQ host.
+ type: string
+ MonitoringRabbitSSLCertChain:
+ default: ''
+ description: >
+ Private SSL cert chain to be used by Sensu to connect to RabbitMQ host.
type: string
MonitoringRabbitPassword:
description: The RabbitMQ password used for monitoring purposes.
@@ -71,6 +82,8 @@ outputs:
sensu::rabbitmq_password: {get_param: MonitoringRabbitPassword}
sensu::rabbitmq_port: {get_param: MonitoringRabbitPort}
sensu::rabbitmq_ssl: {get_param: MonitoringRabbitUseSSL}
+ sensu::rabbitmq_ssl_private_key: {get_param: MonitoringRabbitSSLPrivateKey}
+ sensu::rabbitmq_ssl_cert_chain: {get_param: MonitoringRabbitSSLCertChain}
sensu::rabbitmq_user: {get_param: MonitoringRabbitUserName}
sensu::rabbitmq_vhost: {get_param: MonitoringRabbitVhost}
sensu::redact: {get_param: SensuRedactVariables}
diff --git a/puppet/services/monitoring/sensu-client.yaml b/puppet/services/monitoring/sensu-client.yaml
index d74a68a2..aba2b1ed 100644
--- a/puppet/services/monitoring/sensu-client.yaml
+++ b/puppet/services/monitoring/sensu-client.yaml
@@ -63,11 +63,22 @@ outputs:
step_config: |
include ::tripleo::profile::base::monitoring::sensu
upgrade_tasks:
+ - name: Check if sensu_client is deployed
+ command: systemctl is-enabled sensu-client
+ tags: common
+ ignore_errors: True
+ register: sensu_client_enabled
- name: Check status of sensu-client service
shell: >
/usr/bin/systemctl show sensu-client --property ActiveState |
grep '\bactive\b'
+ when: sensu_client_enabled.rc == 0
tags: step0,validation
- name: Stop sensu-client service
- tags: step2
+ tags: step1
+ when: sensu_client_enabled.rc == 0
service: name=sensu-client state=stopped
+ - name: Install sensu package if it was disabled
+ tags: step3
+ yum: name=sensu state=latest
+ when: sensu_client.rc != 0
diff --git a/puppet/services/neutron-api.yaml b/puppet/services/neutron-api.yaml
index b3a07fb0..7a24ffdd 100644
--- a/puppet/services/neutron-api.yaml
+++ b/puppet/services/neutron-api.yaml
@@ -57,6 +57,9 @@ parameters:
default:
tag: openstack.neutron.api
path: /var/log/neutron/server.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
# DEPRECATED: the following options are deprecated and are currently maintained
# for backwards compatibility. They will be removed in the Ocata cycle.
@@ -71,10 +74,6 @@ parameters:
removed in Ocata. Future releases will enable L3 HA by default if it is
appropriate for the deployment type. Alternate mechanisms will be
available to override.
- EnableInternalTLS:
- type: boolean
- default: false
-
parameter_groups:
- label: deprecated
description: |
@@ -127,20 +126,21 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ovs_neutron'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
- neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
neutron::server::api_workers: {get_param: NeutronWorkers}
neutron::server::rpc_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron::server::enable_proxy_headers_parsing: true
neutron::keystone::authtoken::password: {get_param: NeutronPassword}
- neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] }
+ neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneInternal, uri_no_suffix ] }
neutron::server::notifications::tenant_name: 'service'
neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_param: NovaPassword}
neutron::keystone::authtoken::project_name: 'service'
+ neutron::keystone::authtoken::user_domain_name: 'Default'
+ neutron::keystone::authtoken::project_domain_name: 'Default'
neutron::server::sync_db: true
tripleo.neutron_api.firewall_rules:
'114 neutron api':
@@ -190,9 +190,18 @@ outputs:
- '%'
- "%{hiera('mysql_bind_host')}"
upgrade_tasks:
+ - name: Check if neutron_server is deployed
+ command: systemctl is-enabled neutron-server
+ tags: common
+ ignore_errors: True
+ register: neutron_server_enabled
- name: "PreUpgrade step0,validation: Check service neutron-server is running"
shell: /usr/bin/systemctl show 'neutron-server' --property ActiveState | grep '\bactive\b'
+ when: neutron_server_enabled.rc == 0
tags: step0,validation
- name: Stop neutron_api service
- tags: step2
+ tags: step1
+ when: neutron_server_enabled.rc == 0
service: name=neutron-server state=stopped
+ metadata_settings:
+ get_attr: [TLSProxyBase, role_data, metadata_settings]
diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml
index 43657bd9..55361939 100644
--- a/puppet/services/neutron-base.yaml
+++ b/puppet/services/neutron-base.yaml
@@ -24,7 +24,7 @@ parameters:
type: number
NeutronDhcpAgentsPerNetwork:
type: number
- default: 3
+ default: 0
description: The number of neutron dhcp agents to schedule per network
NeutronCorePlugin:
default: 'ml2'
@@ -72,24 +72,31 @@ parameters:
via parameter_defaults in the resource registry.
type: json
+conditions:
+ dhcp_agents_zero: {equals : [{get_param: NeutronDhcpAgentsPerNetwork}, 0]}
+
outputs:
role_data:
description: Role data for the Neutron base service.
value:
service_name: neutron_base
config_settings:
- neutron::rabbit_password: {get_param: RabbitPassword}
- neutron::rabbit_user: {get_param: RabbitUserName}
- neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- neutron::rabbit_port: {get_param: RabbitClientPort}
- neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
- neutron::core_plugin: {get_param: NeutronCorePlugin}
- neutron::service_plugins: {get_param: NeutronServicePlugins}
- neutron::debug: {get_param: Debug}
- neutron::purge_config: {get_param: EnableConfigPurge}
- neutron::allow_overlapping_ips: true
- neutron::rabbit_heartbeat_timeout_threshold: 60
- neutron::host: '%{::fqdn}'
- neutron::db::database_db_max_retries: -1
- neutron::db::database_max_retries: -1
- neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu}
+ map_merge:
+ - neutron::rabbit_password: {get_param: RabbitPassword}
+ neutron::rabbit_user: {get_param: RabbitUserName}
+ neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ neutron::rabbit_port: {get_param: RabbitClientPort}
+ neutron::core_plugin: {get_param: NeutronCorePlugin}
+ neutron::service_plugins: {get_param: NeutronServicePlugins}
+ neutron::debug: {get_param: Debug}
+ neutron::purge_config: {get_param: EnableConfigPurge}
+ neutron::allow_overlapping_ips: true
+ neutron::rabbit_heartbeat_timeout_threshold: 60
+ neutron::host: '%{::fqdn}'
+ neutron::db::database_db_max_retries: -1
+ neutron::db::database_max_retries: -1
+ neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu}
+ - if:
+ - dhcp_agents_zero
+ - {}
+ - tripleo::profile::base::neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
diff --git a/puppet/services/neutron-bgpvpn-api.yaml b/puppet/services/neutron-bgpvpn-api.yaml
new file mode 100644
index 00000000..f01cf6f1
--- /dev/null
+++ b/puppet/services/neutron-bgpvpn-api.yaml
@@ -0,0 +1,34 @@
+heat_template_version: ocata
+
+description: >
+ BGPVPN API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ BgpvpnServiceProvider:
+ default: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
+ description: Backend to use as a service provider for BGPVPN
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the BGPVPN role.
+ value:
+ service_name: neutron_bgpvpn_api
+ config_settings:
+ neutron::services::bgpvpn::service_providers: {get_param: BgpvpnServiceProvider}
+ step_config: |
+ include ::tripleo::profile::base::neutron::bgpvpn
diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml
index 062edaa4..fe7f9f31 100644
--- a/puppet/services/neutron-dhcp.yaml
+++ b/puppet/services/neutron-dhcp.yaml
@@ -80,9 +80,16 @@ outputs:
step_config: |
include tripleo::profile::base::neutron::dhcp
upgrade_tasks:
+ - name: Check if neutron_dhcp_agent is deployed
+ command: systemctl is-enabled neutron-dhcp-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_dhcp_agent_enabled
- name: "PreUpgrade step0,validation: Check service neutron-dhcp-agent is running"
shell: /usr/bin/systemctl show 'neutron-dhcp-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_dhcp_agent_enabled.rc == 0
tags: step0,validation
- name: Stop neutron_dhcp service
- tags: step2
+ tags: step1
+ when: neutron_dhcp_agent_enabled.rc == 0
service: name=neutron-dhcp-agent state=stopped
diff --git a/puppet/services/neutron-l3-compute-dvr.yaml b/puppet/services/neutron-l3-compute-dvr.yaml
index 06927fe0..1d6a2371 100644
--- a/puppet/services/neutron-l3-compute-dvr.yaml
+++ b/puppet/services/neutron-l3-compute-dvr.yaml
@@ -22,10 +22,6 @@ parameters:
Debug:
type: string
default: ''
- NeutronExternalNetworkBridge:
- description: Name of bridge used for external network traffic.
- type: string
- default: 'br-ex'
MonitoringSubscriptionNeutronL3Dvr:
default: 'overcloud-neutron-l3-dvr'
type: string
@@ -35,6 +31,19 @@ parameters:
tag: openstack.neutron.agent.l3-compute
path: /var/log/neutron/l3-agent.log
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Pike cycle.
+ NeutronExternalNetworkBridge:
+ description: Name of bridge used for external network traffic. Usually L2
+ agent handles port wiring into external bridge, and hence the
+ parameter should be unset.
+ type: string
+ default: ''
+
+conditions:
+
+ external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]}
+
resources:
NeutronBase:
@@ -56,7 +65,11 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
- neutron::agents::l3::agent_mode : 'dvr'
+ - neutron::agents::l3::agent_mode : 'dvr'
+ -
+ if:
+ - external_network_bridge_empty
+ - {}
+ - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
step_config: |
include tripleo::profile::base::neutron::l3
diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml
index 69803551..cd9870bd 100644
--- a/puppet/services/neutron-l3.yaml
+++ b/puppet/services/neutron-l3.yaml
@@ -21,10 +21,6 @@ parameters:
Debug:
type: string
default: ''
- NeutronExternalNetworkBridge:
- description: Name of bridge used for external network traffic.
- type: string
- default: 'br-ex'
NeutronL3AgentMode:
description: |
Agent mode for L3 agent. Must be one of legacy or dvr_snat.
@@ -43,6 +39,15 @@ parameters:
tag: openstack.neutron.agent.l3
path: /var/log/neutron/l3-agent.log
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Pike cycle.
+ NeutronExternalNetworkBridge:
+ description: Name of bridge used for external network traffic. Usually L2
+ agent handles port wiring into external bridge, and hence the
+ parameter should be unset.
+ type: string
+ default: ''
+
conditions:
external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]}
@@ -80,9 +85,16 @@ outputs:
step_config: |
include tripleo::profile::base::neutron::l3
upgrade_tasks:
+ - name: Check if neutron_l3_agent is deployed
+ command: systemctl is-enabled neutron-l3-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_l3_agent_enabled
- name: "PreUpgrade step0,validation: Check service neutron-l3-agent is running"
shell: /usr/bin/systemctl show 'neutron-l3-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_l3_agent_enabled.rc == 0
tags: step0,validation
- name: Stop neutron_l3 service
- tags: step2
+ tags: step1
+ when: neutron_l3_agent_enabled.rc == 0
service: name=neutron-l3-agent state=stopped
diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml
index 6f5debdd..32ef567c 100644
--- a/puppet/services/neutron-metadata.yaml
+++ b/puppet/services/neutron-metadata.yaml
@@ -76,9 +76,16 @@ outputs:
step_config: |
include tripleo::profile::base::neutron::metadata
upgrade_tasks:
+ - name: Check if neutron_metadata_agent is deployed
+ command: systemctl is-enabled neutron-metadata-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_metadata_agent_enabled
- name: "PreUpgrade step0,validation: Check service neutron-metadata-agent is running"
shell: /usr/bin/systemctl show 'neutron-metadata-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_metadata_agent_enabled.rc == 0
tags: step0,validation
- name: Stop neutron_metadata service
- tags: step2
+ tags: step1
+ when: neutron_metadata_agent_enabled.rc == 0
service: name=neutron-metadata-agent state=stopped
diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml
index c27bb909..01471ba2 100644
--- a/puppet/services/neutron-ovs-agent.yaml
+++ b/puppet/services/neutron-ovs-agent.yaml
@@ -121,9 +121,16 @@ outputs:
step_config: |
include ::tripleo::profile::base::neutron::ovs
upgrade_tasks:
+ - name: Check if neutron_ovs_agent is deployed
+ command: systemctl is-enabled neutron-openvswitch-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_ovs_agent_enabled
- name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_ovs_agent_enabled.rc == 0
tags: step0,validation
- name: Stop neutron_ovs_agent service
- tags: step2
+ tags: step1
+ when: neutron_ovs_agent_enabled.rc == 0
service: name=neutron-openvswitch-agent state=stopped
diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml
index e25bc495..2c7ab57c 100644
--- a/puppet/services/neutron-ovs-dpdk-agent.yaml
+++ b/puppet/services/neutron-ovs-dpdk-agent.yaml
@@ -69,7 +69,10 @@ outputs:
service_name: neutron_ovs_dpdk_agent
config_settings:
map_merge:
- - get_attr: [NeutronOvsAgent, role_data, config_settings]
+ - map_replace:
+ - get_attr: [NeutronOvsAgent, role_data, config_settings]
+ - keys:
+ tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
- neutron::agents::ml2::ovs::enable_dpdk: true
neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType}
neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir}
diff --git a/puppet/services/neutron-plugin-plumgrid.yaml b/puppet/services/neutron-plugin-plumgrid.yaml
index ad1dcfb0..f948dd07 100644
--- a/puppet/services/neutron-plugin-plumgrid.yaml
+++ b/puppet/services/neutron-plugin-plumgrid.yaml
@@ -100,8 +100,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ovs_neutron'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
neutron::plugins::plumgrid::controller_priv_host: {get_param: [EndpointMap, KeystoneInternal, host]}
neutron::plugins::plumgrid::admin_password: {get_param: AdminPassword}
neutron::plugins::plumgrid::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index d18b5b48..473c24b4 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -110,8 +110,10 @@ outputs:
- 13774
- 8775
nova::keystone::authtoken::project_name: 'service'
+ nova::keystone::authtoken::user_domain_name: 'Default'
+ nova::keystone::authtoken::project_domain_name: 'Default'
nova::keystone::authtoken::password: {get_param: NovaPassword}
- nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
nova::api::enabled: true
nova::api::default_floating_pool: {get_param: NovaDefaultFloatingPool}
@@ -194,9 +196,6 @@ outputs:
tags: step0,pre-upgrade
when: is_bootstrap_node
command: nova-manage db online_data_migrations
- - name: update nova api
- tags: step2
- yum: name=openstack-nova-api state=latest
- name: Stop and disable nova_api service (pre-upgrade not under httpd)
tags: step2
service: name=openstack-nova-api state=stopped enabled=no
diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml
index d892c36d..ceacb0b2 100644
--- a/puppet/services/nova-base.yaml
+++ b/puppet/services/nova-base.yaml
@@ -150,6 +150,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/nova'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
nova::api_database_connection:
list_join:
- ''
@@ -159,6 +160,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/nova_api'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
nova::placement_database_connection:
list_join:
- ''
@@ -168,6 +170,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/nova_placement'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
nova::debug: {get_param: Debug}
nova::purge_config: {get_param: EnableConfigPurge}
nova::network::neutron::neutron_project_name: 'service'
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index 9923e833..d208bede 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -152,7 +152,7 @@ outputs:
collectd::plugins::virt::connection: "qemu:///system"
upgrade_tasks:
- name: Stop nova-compute service
- tags: step2
+ tags: step1
service: name=openstack-nova-compute state=stopped
# If not already set by puppet (e.g a pre-ocata version), set the
# upgrade_level for compute to "auto"
diff --git a/puppet/services/nova-conductor.yaml b/puppet/services/nova-conductor.yaml
index 7b086536..4574cae8 100644
--- a/puppet/services/nova-conductor.yaml
+++ b/puppet/services/nova-conductor.yaml
@@ -67,15 +67,12 @@ outputs:
include tripleo::profile::base::nova::conductor
upgrade_tasks:
- name: Stop nova_conductor service
- tags: step2
+ tags: step1
service: name=openstack-nova-conductor state=stopped
- - name: update nova conductor
- tags: step2
- yum: name=openstack-nova-conductor state=latest
# If not already set by puppet (e.g a pre-ocata version), set the
# upgrade_level for compute to "auto"
- name: Set compute upgrade level to auto
- tags: step3
+ tags: step1
ini_file:
str_replace:
template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
diff --git a/puppet/services/nova-consoleauth.yaml b/puppet/services/nova-consoleauth.yaml
index b5a1312a..82f329bc 100644
--- a/puppet/services/nova-consoleauth.yaml
+++ b/puppet/services/nova-consoleauth.yaml
@@ -50,5 +50,5 @@ outputs:
include tripleo::profile::base::nova::consoleauth
upgrade_tasks:
- name: Stop nova_consoleauth service
- tags: step2
+ tags: step1
service: name=openstack-nova-consoleauth state=stopped
diff --git a/puppet/services/nova-ironic.yaml b/puppet/services/nova-ironic.yaml
index 5eb2170a..843f44c5 100644
--- a/puppet/services/nova-ironic.yaml
+++ b/puppet/services/nova-ironic.yaml
@@ -44,7 +44,7 @@ outputs:
nova::compute::vnc_enabled: false
nova::ironic::common::password: {get_param: IronicPassword}
nova::ironic::common::project_name: 'service'
- nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
nova::ironic::common::username: 'ironic'
nova::ironic::common::api_endpoint: {get_param: [EndpointMap, IronicInternal, uri]}
nova::network::neutron::dhcp_domain: ''
diff --git a/puppet/services/nova-placement.yaml b/puppet/services/nova-placement.yaml
index 5564c1b3..b59e2fc6 100644
--- a/puppet/services/nova-placement.yaml
+++ b/puppet/services/nova-placement.yaml
@@ -79,6 +79,10 @@ outputs:
dport:
- 8778
- 13778
+ nova::keystone::authtoken::project_name: 'service'
+ nova::keystone::authtoken::password: {get_param: NovaPassword}
+ nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
nova::wsgi::apache_placement::api_port: '8778'
nova::wsgi::apache_placement::ssl: {get_param: EnableInternalTLS}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
@@ -120,5 +124,10 @@ outputs:
- "%{hiera('mysql_bind_host')}"
upgrade_tasks:
- name: Stop nova_placement service (running under httpd)
- tags: step2
+ tags: step1
service: name=httpd state=stopped
+ # The nova placement API isn't installed in newton images, so install
+ # it on upgrade
+ - name: Install nova-placement packages on upgrade
+ tags: step3
+ yum: name=openstack-nova-placement-api state=latest
diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml
index 0e0b9d1e..e4b6bb43 100644
--- a/puppet/services/nova-scheduler.yaml
+++ b/puppet/services/nova-scheduler.yaml
@@ -65,8 +65,5 @@ outputs:
include tripleo::profile::base::nova::scheduler
upgrade_tasks:
- name: Stop nova_scheduler service
- tags: step2
+ tags: step1
service: name=openstack-nova-scheduler state=stopped
- - name: update nova scheduler
- tags: step2
- yum: name=openstack-nova-scheduler state=latest
diff --git a/puppet/services/nova-vnc-proxy.yaml b/puppet/services/nova-vnc-proxy.yaml
index f6cf9649..42335ade 100644
--- a/puppet/services/nova-vnc-proxy.yaml
+++ b/puppet/services/nova-vnc-proxy.yaml
@@ -66,5 +66,5 @@ outputs:
include tripleo::profile::base::nova::vncproxy
upgrade_tasks:
- name: Stop nova_vnc_proxy service
- tags: step2
+ tags: step1
service: name=openstack-nova-consoleauth state=stopped
diff --git a/puppet/services/octavia-api.yaml b/puppet/services/octavia-api.yaml
index 37ba1f73..909a3030 100644
--- a/puppet/services/octavia-api.yaml
+++ b/puppet/services/octavia-api.yaml
@@ -66,8 +66,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/octavia'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
octavia::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
octavia::keystone::authtoken::project_name: 'service'
octavia::keystone::authtoken::password: {get_param: OctaviaPassword}
diff --git a/puppet/services/octavia-base.yaml b/puppet/services/octavia-base.yaml
index b537a2bc..a3f616ff 100644
--- a/puppet/services/octavia-base.yaml
+++ b/puppet/services/octavia-base.yaml
@@ -56,7 +56,7 @@ outputs:
octavia::debug: {get_param: Debug}
octavia::purge_config: {get_param: EnableConfigPurge}
octavia::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- tripleo::profile::base::octavia::rabbit_user: {get_param: RabbitUserName}
- tripleo::profile::base::octavia::rabbit_password: {get_param: RabbitPassword}
- tripleo::profile::base::octavia::rabbit_port: {get_param: RabbitClientPort}
+ octavia::rabbit_userid: {get_param: RabbitUserName}
+ octavia::rabbit_password: {get_param: RabbitPassword}
+ octavia::rabbit_port: {get_param: RabbitClientPort}
diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml
index 0ed9d206..6882aeff 100644
--- a/puppet/services/opendaylight-api.yaml
+++ b/puppet/services/opendaylight-api.yaml
@@ -17,6 +17,10 @@ parameters:
type: string
description: The password for the opendaylight server.
hidden: true
+ OpenDaylightConnectionProtocol:
+ description: L7 protocol used for REST access
+ type: string
+ default: 'http'
OpenDaylightEnableDHCP:
description: Knob to enable/disable ODL DHCP Server
type: boolean
@@ -24,7 +28,7 @@ parameters:
OpenDaylightFeatures:
description: List of features to install with ODL
type: comma_delimited_list
- default: ["odl-netvirt-openstack","odl-netvirt-ui"]
+ default: ["odl-netvirt-openstack","odl-netvirt-ui","odl-jolokia"]
OpenDaylightApiVirtualIP:
type: string
default: ''
@@ -55,11 +59,36 @@ outputs:
opendaylight::extra_features: {get_param: OpenDaylightFeatures}
opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
+ opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
tripleo.opendaylight_api.firewall_rules:
'137 opendaylight api':
dport:
- {get_param: OpenDaylightPort}
- 6640
- 6653
+ - 2550
step_config: |
include tripleo::profile::base::neutron::opendaylight
+ upgrade_tasks:
+ - name: Check if opendaylight is deployed
+ command: systemctl is-enabled opendaylight
+ tags: common
+ ignore_errors: True
+ register: opendaylight_enabled
+ - name: "PreUpgrade step0,validation: Check service opendaylight is running"
+ shell: /usr/bin/systemctl show 'opendaylight' --property ActiveState | grep '\bactive\b'
+ when: opendaylight_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop opendaylight service
+ tags: step1
+ when: opendaylight_enabled.rc == 0
+ service: name=opendaylight state=stopped
+ - name: Removes ODL snapshots, data, journal directories
+ file:
+ state: absent
+ path: /opt/opendaylight/{{item}}
+ tags: step2
+ with_items:
+ - snapshots
+ - data
+ - journal
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index cfec3c48..5cf416f3 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -60,11 +60,7 @@ outputs:
opendaylight_check_url: {get_param: OpenDaylightCheckURL}
opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::plugins::ovs::opendaylight::provider_mappings:
- str_replace:
- template: MAPPINGS
- params:
- MAPPINGS: {get_param: OpenDaylightProviderMappings}
+ neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
tripleo.opendaylight_ovs.firewall_rules:
'118 neutron vxlan networks':
proto: 'udp'
@@ -73,3 +69,17 @@ outputs:
proto: 'gre'
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
+ upgrade_tasks:
+ - name: Check if openvswitch is deployed
+ command: systemctl is-enabled openvswitch
+ tags: common
+ ignore_errors: True
+ register: openvswitch_enabled
+ - name: "PreUpgrade step0,validation: Check service openvswitch is running"
+ shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b'
+ when: openvswitch_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop openvswitch service
+ tags: step1
+ when: openvswitch_enabled.rc == 0
+ service: name=openvswitch state=stopped
diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml
index ca21cfbe..762d0092 100644
--- a/puppet/services/pacemaker.yaml
+++ b/puppet/services/pacemaker.yaml
@@ -90,7 +90,7 @@ parameters:
PacemakerResources:
type: comma_delimited_list
description: List of resources managed by pacemaker
- default: ['rabbitmq','haproxy']
+ default: ['rabbitmq','haproxy','galera']
outputs:
role_data:
@@ -136,12 +136,14 @@ outputs:
tags: step0,validation
pacemaker_cluster: state=online check_and_fail=true
- name: Stop pacemaker cluster
- tags: step1
+ tags: step2
pacemaker_cluster: state=offline
- name: Start pacemaker cluster
tags: step4
pacemaker_cluster: state=online
- name: Check pacemaker resource
tags: step4
- pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=200
+ pacemaker_is_active:
+ resource: "{{ item }}"
+ max_wait: 500
with_items: {get_param: PacemakerResources}
diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml
index 03c2c83f..caada950 100644
--- a/puppet/services/pacemaker/rabbitmq.yaml
+++ b/puppet/services/pacemaker/rabbitmq.yaml
@@ -39,3 +39,34 @@ outputs:
- rabbitmq::service_manage: false
step_config: |
include ::tripleo::profile::pacemaker::rabbitmq
+ upgrade_tasks:
+ - name: get bootstrap nodeid
+ tags: common
+ command: hiera bootstrap_nodeid
+ register: bootstrap_node
+ - name: set is_bootstrap_node fact
+ tags: common
+ set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}}
+ - name: get rabbitmq policy
+ tags: common
+ shell: pcs resource show rabbitmq | grep -q -E "Attributes:.*\"ha-mode\":\"all\""
+ register: rabbit_ha_mode
+ when: is_bootstrap_node
+ ignore_errors: true
+ - name: set migrate_rabbit_ha_mode fact
+ tags: common
+ set_fact: migrate_rabbit_ha_mode={{rabbit_ha_mode.rc == 0}}
+ when: is_bootstrap_node
+ - name: Fixup for rabbitmq ha-queues LP#1668600
+ tags: step0,pre-upgrade
+ shell: |
+ nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
+ nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
+ if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
+ echo "ERROR: The nr. of HA queues during the rabbit upgrade is out of range: $nr_queues"
+ exit 1
+ fi
+ pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
+ when: is_bootstrap_node and migrate_rabbit_ha_mode
+ metadata_settings:
+ get_attr: [RabbitMQServiceBase, role_data, metadata_settings]
diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml
index 4b74ad45..eed98257 100644
--- a/puppet/services/panko-api.yaml
+++ b/puppet/services/panko-api.yaml
@@ -84,3 +84,22 @@ outputs:
include tripleo::profile::base::panko::api
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: Check if httpd is deployed
+ command: systemctl is-enabled httpd
+ tags: common
+ ignore_errors: True
+ register: httpd_enabled
+ - name: "PreUpgrade step0,validation: Check if httpd is running"
+ shell: >
+ /usr/bin/systemctl show 'httpd' --property ActiveState |
+ grep '\bactive\b'
+ when: httpd_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop panko-api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: httpd_enabled.rc == 0
+ - name: Install openstack-panko-api package if it was not installed
+ tags: step3
+ yum: name=openstack-panko-api state=latest
diff --git a/puppet/services/panko-base.yaml b/puppet/services/panko-base.yaml
index 2c2586af..fda13450 100644
--- a/puppet/services/panko-base.yaml
+++ b/puppet/services/panko-base.yaml
@@ -46,13 +46,14 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/panko'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
panko::debug: {get_param: Debug}
panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::project_name: 'service'
+ panko::keystone::authtoken::user_domain_name: 'Default'
+ panko::keystone::authtoken::project_domain_name: 'Default'
panko::keystone::authtoken::password: {get_param: PankoPassword}
- panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::auth::auth_password: {get_param: PankoPassword}
panko::auth::auth_region: 'regionOne'
diff --git a/puppet/services/rabbitmq-internal-tls-certmonger.yaml b/puppet/services/rabbitmq-internal-tls-certmonger.yaml
new file mode 100644
index 00000000..39d6b903
--- /dev/null
+++ b/puppet/services/rabbitmq-internal-tls-certmonger.yaml
@@ -0,0 +1,47 @@
+heat_template_version: ocata
+
+description: >
+ RabbitMQ configurations for using TLS via certmonger.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ # The following parameters are not needed by the template but are
+ # required to pass the pep8 tests
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: RabbitMQ configurations for using TLS via certmonger.
+ value:
+ service_name: rabbitmq_internal_tls_certmonger
+ config_settings:
+ generate_service_certificates: true
+ tripleo::profile::base::rabbitmq::certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/rabbitmq.crt'
+ service_key: '/etc/pki/tls/private/rabbitmq.key'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ principal:
+ str_replace:
+ template: "rabbitmq/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ metadata_settings:
+ - service: rabbitmq
+ network: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ type: node
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index 2c4ccbc9..92a0015a 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -48,6 +48,18 @@ parameters:
MonitoringSubscriptionRabbitmq:
default: 'overcloud-rabbitmq'
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+resources:
+
+ RabbitMQTLS:
+ type: OS::TripleO::Services::RabbitMQTLS
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
@@ -56,51 +68,62 @@ outputs:
service_name: rabbitmq
monitoring_subscription: {get_param: MonitoringSubscriptionRabbitmq}
config_settings:
- rabbitmq::file_limit: {get_param: RabbitFDLimit}
- rabbitmq::default_user: {get_param: RabbitUserName}
- rabbitmq::default_pass: {get_param: RabbitPassword}
- rabbit_ipv6: {get_param: RabbitIPv6}
- tripleo.rabbitmq.firewall_rules:
- '109 rabbitmq':
- dport:
- - 4369
- - 5672
- - 25672
- rabbitmq::delete_guest_user: false
- rabbitmq::wipe_db_on_cookie_change: true
- rabbitmq::port: '5672'
- rabbitmq::package_provider: yum
- rabbitmq::package_source: undef
- rabbitmq::repos_ensure: false
- rabbitmq::tcp_keepalive: true
- rabbitmq_environment:
- NODE_PORT: ''
- NODE_IP_ADDRESS: ''
- RABBITMQ_NODENAME: "rabbit@%{::hostname}"
- RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
- 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}"
- rabbitmq_kernel_variables:
- inet_dist_listen_min: '25672'
- inet_dist_listen_max: '25672'
- rabbitmq_config_variables:
- cluster_partition_handling: 'pause_minority'
- queue_master_locator: '<<"min-masters">>'
- loopback_users: '[]'
- rabbitmq::erlang_cookie:
- yaql:
- expression: $.data.passwords.where($ != '').first()
- data:
- passwords:
- - {get_param: RabbitCookie}
- - {get_param: [DefaultPasswords, rabbit_cookie]}
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
- rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
+ map_merge:
+ - get_attr: [RabbitMQTLS, role_data, config_settings]
+ -
+ rabbitmq::file_limit: {get_param: RabbitFDLimit}
+ rabbitmq::default_user: {get_param: RabbitUserName}
+ rabbitmq::default_pass: {get_param: RabbitPassword}
+ rabbit_ipv6: {get_param: RabbitIPv6}
+ tripleo.rabbitmq.firewall_rules:
+ '109 rabbitmq':
+ dport:
+ - 4369
+ - 5672
+ - 25672
+ rabbitmq::delete_guest_user: false
+ rabbitmq::wipe_db_on_cookie_change: true
+ rabbitmq::port: '5672'
+ rabbitmq::package_provider: yum
+ rabbitmq::package_source: undef
+ rabbitmq::repos_ensure: false
+ rabbitmq::tcp_keepalive: true
+ rabbitmq_environment:
+ NODE_PORT: ''
+ NODE_IP_ADDRESS: ''
+ RABBITMQ_NODENAME: "rabbit@%{::hostname}"
+ RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+ 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}"
+ rabbitmq_kernel_variables:
+ inet_dist_listen_min: '25672'
+ inet_dist_listen_max: '25672'
+ rabbitmq_config_variables:
+ cluster_partition_handling: 'pause_minority'
+ queue_master_locator: '<<"min-masters">>'
+ loopback_users: '[]'
+ rabbitmq::erlang_cookie:
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: RabbitCookie}
+ - {get_param: [DefaultPasswords, rabbit_cookie]}
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
+ rabbitmq::ssl: {get_param: EnableInternalTLS}
+ rabbitmq::ssl_port: '5672'
+ rabbitmq::ssl_depth: 1
+ rabbitmq::ssl_only: {get_param: EnableInternalTLS}
+ rabbitmq::ssl_interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ # TODO(jaosorior): Remove this once we set a proper default in
+ # puppet-tripleo
+ tripleo::profile::base::rabbitmq::enable_internal_tls: {get_param: EnableInternalTLS}
step_config: |
include ::tripleo::profile::base::rabbitmq
upgrade_tasks:
@@ -110,4 +133,5 @@ outputs:
- name: Start rabbitmq service
tags: step4
service: name=rabbitmq-server state=started
-
+ metadata_settings:
+ get_attr: [RabbitMQTLS, role_data, metadata_settings]
diff --git a/puppet/services/sahara-api.yaml b/puppet/services/sahara-api.yaml
index 8573ea81..96b3d6e3 100644
--- a/puppet/services/sahara-api.yaml
+++ b/puppet/services/sahara-api.yaml
@@ -92,5 +92,5 @@ outputs:
- "%{hiera('mysql_bind_host')}"
upgrade_tasks:
- name: Stop sahara_api service
- tags: step2
+ tags: step1
service: name=openstack-sahara-api state=stopped
diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml
index e2084186..d5131f61 100644
--- a/puppet/services/sahara-base.yaml
+++ b/puppet/services/sahara-base.yaml
@@ -64,19 +64,20 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/sahara'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
sahara::rabbit_password: {get_param: RabbitPassword}
sahara::rabbit_user: {get_param: RabbitUserName}
sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
sahara::rabbit_port: {get_param: RabbitClientPort}
sahara::debug: {get_param: Debug}
+ # Remove admin_password when https://review.openstack.org/442619 is merged.
sahara::admin_password: {get_param: SaharaPassword}
- sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- sahara::identity_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
sahara::use_neutron: true
sahara::plugins: {get_param: SaharaPlugins}
sahara::rpc_backend: rabbit
- sahara::admin_tenant_name: 'service'
sahara::db::database_db_max_retries: -1
sahara::db::database_max_retries: -1
+ sahara::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ sahara::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ sahara::keystone::authtoken::password: {get_param: SaharaPassword}
+ sahara::keystone::authtoken::project_name: 'service'
diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml
index 987fe25b..c0b6b3e6 100644
--- a/puppet/services/sahara-engine.yaml
+++ b/puppet/services/sahara-engine.yaml
@@ -51,8 +51,5 @@ outputs:
include ::tripleo::profile::base::sahara::engine
upgrade_tasks:
- name: Stop sahara_engine service
- tags: step2
+ tags: step1
service: name=openstack-sahara-engine state=stopped
- - name: Sync sahara_engine DB
- tags: step5
- command: sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
index 80da5352..a2286d16 100644
--- a/puppet/services/services.yaml
+++ b/puppet/services/services.yaml
@@ -52,11 +52,7 @@ outputs:
description: Combined Role data for this set of services.
value:
service_names:
- # Filter any null/None service_names which may be present due to mapping
- # of services to OS::Heat::None
- yaql:
- expression: list($.data.s_names.where($ != null))
- data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}}
+ {get_attr: [ServiceChain, role_data, service_name]}
monitoring_subscriptions:
yaql:
expression: list($.data.role_data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
@@ -112,7 +108,7 @@ outputs:
yaql:
expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
data: {role_data: {get_attr: [ServiceChain, role_data]}}
- step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
+ step_config: {get_attr: [ServiceChain, role_data, step_config]}
upgrade_tasks:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
diff --git a/puppet/services/snmp.yaml b/puppet/services/snmp.yaml
index fd6ed818..80c29f95 100644
--- a/puppet/services/snmp.yaml
+++ b/puppet/services/snmp.yaml
@@ -45,5 +45,5 @@ outputs:
include ::tripleo::profile::base::snmp
upgrade_tasks:
- name: Stop snmp service
- tags: step2
+ tags: step1
service: name=snmpd state=stopped
diff --git a/puppet/services/sshd.yaml b/puppet/services/sshd.yaml
index 41e144a0..12998c33 100644
--- a/puppet/services/sshd.yaml
+++ b/puppet/services/sshd.yaml
@@ -29,6 +29,6 @@ outputs:
value:
service_name: sshd
config_settings:
- BannerText: {get_param: BannerText}
+ tripleo::profile::base::sshd::bannertext: {get_param: BannerText}
step_config: |
include ::tripleo::profile::base::sshd
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index 526fa888..0c3cc1ec 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -31,9 +31,9 @@ parameters:
description: Timeout for requests going from swift-proxy to swift a/c/o services.
type: number
SwiftWorkers:
- default: 0
+ default: auto
description: Number of workers for Swift service.
- type: number
+ type: string
KeystoneRegion:
type: string
default: 'regionOne'
@@ -138,6 +138,7 @@ outputs:
- ''
- 'proxy-logging'
- 'proxy-server'
+ swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
swift::proxy::account_autocreate: true
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
@@ -166,5 +167,5 @@ outputs:
- ResellerAdmin
upgrade_tasks:
- name: Stop swift_proxy service
- tags: step2
+ tags: step1
service: name=openstack-swift-proxy state=stopped
diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml
index 247b23ff..261aadeb 100644
--- a/puppet/services/swift-storage.yaml
+++ b/puppet/services/swift-storage.yaml
@@ -103,7 +103,7 @@ outputs:
include ::tripleo::profile::base::swift::storage
upgrade_tasks:
- name: Stop swift storage services
- tags: step2
+ tags: step1
service: name={{ item }} state=stopped
with_items:
- openstack-swift-account-auditor
diff --git a/puppet/services/tacker.yaml b/puppet/services/tacker.yaml
index 5cf09a6d..a4c139b5 100644
--- a/puppet/services/tacker.yaml
+++ b/puppet/services/tacker.yaml
@@ -64,12 +64,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/tacker'
- - '?bind_address='
- - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
- tacker::keystone::auth::tenant: 'service'
- tacker::keystone::auth::password: {get_param: TackerPassword}
- tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
tacker::debug: {get_param: Debug}
tacker::rpc_backend: rabbit
tacker::rabbit_userid: {get_param: RabbitUserName}
@@ -78,6 +74,12 @@ outputs:
tacker::rabbit_port: {get_param: RabbitClientPort}
tacker::server::bind_host: {get_param: [ServiceNetMap, TackerApiNetwork]}
+ tacker::keystone::authtoken::project_name: 'service'
+ tacker::keystone::authtoken::user_domain_name: 'Default'
+ tacker::keystone::authtoken::project_domain_name: 'Default'
+ tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+
tacker::db::mysql::password: {get_param: TackerPassword}
tacker::db::mysql::user: tacker
tacker::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
@@ -86,6 +88,31 @@ outputs:
- '%'
- {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ service_config_settings:
+ keystone:
+ tacker::keystone::auth::tenant: 'service'
+ tacker::keystone::auth::password: {get_param: TackerPassword}
+ tacker::keystone::auth::public_url: {get_param: [EndpointMap, TackerPublic, uri]}
+ tacker::keystone::auth::internal_url: {get_param: [EndpointMap, TackerInternal, uri]}
+ tacker::keystone::auth::admin_url: {get_param: [EndpointMap, TackerAdmin, uri]}
step_config: |
include ::tripleo::profile::base::tacker
+ upgrade_tasks:
+ - name: Check if tacker is deployed
+ command: systemctl is-enabled openstack-tacker-server
+ tags: common
+ ignore_errors: True
+ register: tacker_enabled
+ - name: "PreUpgrade step0,validation: Check service openstack-tacker-server is running"
+ shell: /usr/bin/systemctl show 'openstack-tacker-server' --property ActiveState | grep '\bactive\b'
+ when: tacker_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop tacker service
+ tags: step1
+ when: tacker_enabled.rc == 0
+ service: name=openstack-tacker-server state=stopped
+ - name: Install openstack-tacker package if it was disabled
+ tags: step3
+ yum: name=openstack-tacker state=latest
+ when: tacker_enabled.rc != 0
diff --git a/puppet/services/vpp.yaml b/puppet/services/vpp.yaml
new file mode 100644
index 00000000..7c8f8a28
--- /dev/null
+++ b/puppet/services/vpp.yaml
@@ -0,0 +1,57 @@
+heat_template_version: ocata
+
+description: >
+ Vpp service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ VppCpuMainCore:
+ default: ''
+ description: VPP main thread core pinning.
+ type: string
+ VppCpuCorelistWorkers:
+ default: ''
+ description: List of cores for VPP worker thread pinning
+ type: string
+ MonitoringSubscriptionVpp:
+ default: 'overcloud-vpp'
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the Vpp role.
+ value:
+ service_name: vpp
+ monitoring_subscription: {get_param: MonitoringSubscriptionVpp}
+ config_settings:
+ fdio::vpp_cpu_main_core: {get_param: VppCpuMainCore}
+ fdio::vpp_cpu_corelist_workers: {get_param: VppCpuCorelistWorkers}
+ step_config: |
+ include ::tripleo::profile::base::vpp
+ upgrade_tasks:
+ - name: Check if vpp is deployed
+ command: systemctl is-enabled vpp
+ tags: common
+ ignore_errors: True
+ register: vpp_enabled
+ - name: "PreUpgrade step0,validation: Check service vpp is running"
+ shell: /usr/bin/systemctl show 'vpp' --property ActiveState | grep '\bactive\b'
+ when: vpp_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop vpp service
+ tags: step1
+ when: vpp_enabled.rc == 0
+ service: name=vpp state=stopped
diff --git a/puppet/services/zaqar.yaml b/puppet/services/zaqar.yaml
index cb860fa8..a320f694 100644
--- a/puppet/services/zaqar.yaml
+++ b/puppet/services/zaqar.yaml
@@ -64,3 +64,23 @@ outputs:
step_config: |
include ::tripleo::profile::base::zaqar
+ upgrade_tasks:
+ - name: Check if zaqar is deployed
+ command: systemctl is-enabled openstack-zaqar
+ tags: common
+ ignore_errors: True
+ register: zaqar_enabled
+ - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running"
+ shell: >
+ /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState |
+ grep '\bactive\b'
+ when: zaqar_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop zaqar service
+ tags: step1
+ when: zaqar_enabled.rc == 0
+ service: name=openstack-zaqar state=stopped
+ - name: Install openstack-zaqar package if it was disabled
+ tags: step3
+ yum: name=openstack-zaqar state=latest
+ when: zaqar_enabled.rc != 0
diff --git a/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml
index f9afb18d..9343d99e 100644
--- a/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml
+++ b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml
@@ -64,6 +64,8 @@ features:
- Support for Octavia composable services for LBaaS with Neutron.
- Support for Collectd composable services for performance monitoring.
- Support for Tacker composable service for VNF management.
+ - Add the plan-environment.yaml file which will facilitate deployment plan
+ import and export.
upgrade:
- Update OpenDaylight deployment to use networking-odl v2 as a mechanism
driver.
diff --git a/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml b/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml
new file mode 100644
index 00000000..50b8167e
--- /dev/null
+++ b/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Keystone's default token provider is now fernet instead of UUID
+upgrade:
+ - When upgrading, old tokens will not work anymore due to the provider
+ changing from UUID to fernet.
diff --git a/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml b/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml
new file mode 100644
index 00000000..2af6aa72
--- /dev/null
+++ b/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Add support for BGPVPN Neutron service plugin
diff --git a/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml b/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml
new file mode 100644
index 00000000..882ee4e5
--- /dev/null
+++ b/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Adds support for OpenDaylight HA clustering. Now when specifying
+ three or more ODL roles, ODL will be deployed in a cluster, and
+ use port 2550 for cluster communication.
diff --git a/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml b/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml
new file mode 100644
index 00000000..b3a62ced
--- /dev/null
+++ b/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - The relevant parameters have been added to deploy the heat APIs over httpd.
+ This means that the HeatWorkers now affect httpd instead of the heat API
+ themselves, and that the apache hieradata will also be deployed in the
+ nodes where the heat APIs run.
diff --git a/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml
new file mode 100644
index 00000000..ec22942a
--- /dev/null
+++ b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ NeutronDhcpAgents had a default value of 3 that, even though unused in
+ practice was a bad default value. Changing the default value to a
+ sentinel value and making the hiera conditional allows deploy-time
+ logic in puppet to provide a default value based on the number of dhcp
+ agents being deployed.
diff --git a/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml b/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml
new file mode 100644
index 00000000..09067296
--- /dev/null
+++ b/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - The ``NeutronExternalNetworkBridge`` parameter changed its default value
+ from ``br-ex`` to an empty string value. It means that by default Neutron
+ L3 agent will be able to serve multiple external networks. (It was always
+ the case for those who were using templates with the value of the parameter
+ overridden by an empty string value.)
+deprecations:
+ - The ``NeutronExternalNetworkBridge`` parameter is deprecated and will be
+ removed in a next release.
diff --git a/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml
new file mode 100644
index 00000000..da995949
--- /dev/null
+++ b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml
@@ -0,0 +1,6 @@
+---
+security:
+ - |
+ Secure EtcdInitialClusterToken by removing the default value
+ and make the parameter hidden.
+ Fixes `bug 1673266 <https://bugs.launchpad.net/tripleo/+bug/1673266>`__.
diff --git a/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml b/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml
new file mode 100644
index 00000000..682171c1
--- /dev/null
+++ b/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - Fixes an issue when using the CinderNfsServers
+ parameter_defaults setting. It now works using a
+ single share as well as a comma-separated list of
+ shares.
diff --git a/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml b/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml
new file mode 100644
index 00000000..bb18aed8
--- /dev/null
+++ b/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - Fixes firewall rules from neutron OVS agent not being
+ inherited correctly and applied in neutron OVS DPDK
+ template.
diff --git a/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml b/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml
new file mode 100644
index 00000000..79cea05e
--- /dev/null
+++ b/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - Fixes OpenDaylightProviderMappings parsing on a
+ comma delimited list.
diff --git a/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml b/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml
new file mode 100644
index 00000000..2f2513c9
--- /dev/null
+++ b/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Deploy Gnocchi with Keystone v3 endpoints and make
+ sure it doesn't rely on Keystone v2 anymore.
diff --git a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml b/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml
deleted file mode 100644
index edcc1250..00000000
--- a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-deprecations:
- - The environments/puppet-pacemaker.yaml file is now deprecated and the HA
- deployment is now the default. In order to get the non-HA deployment use
- environments/nonha-arch.yaml explicitly.
diff --git a/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml b/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml
new file mode 100644
index 00000000..d2b2eb94
--- /dev/null
+++ b/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - openstack-selinux is now installed by the deployed-server
+ bootstrap scripts. Previously, it was not installed, so
+ if SELinux was set to enforcing, all OpenStack policy
+ was missing.
diff --git a/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml b/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml
new file mode 100644
index 00000000..59f1fb99
--- /dev/null
+++ b/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml
@@ -0,0 +1,11 @@
+---
+prelude: >
+ Support for Manila/CephFS with TripleO managed Ceph cluster
+features:
+ - |
+ It is now possible to configure Manila with CephFS to use a
+ TripleO managed Ceph cluster. When using the Heat environment
+ file at environments/manila-cephfsnative-config.yaml Manila
+ will be configured to use the TripleO managed Ceph cluster
+ if CephMDS is deployed as well, which can be done using the
+ file environments/services/ceph-mds.yaml \ No newline at end of file
diff --git a/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml b/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml
new file mode 100644
index 00000000..c744e0f7
--- /dev/null
+++ b/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Sahara is now deployed with keystone_authtoken parameters and move
+ forward with Keystone v3 version.
diff --git a/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml b/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml
new file mode 100644
index 00000000..e9974a20
--- /dev/null
+++ b/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ Swift rings created or updated on the overcloud nodes will now be
+ stored on the undercloud at the end of each deployment. They will be
+ retrieved before any deployment update, and by doing this the Swift
+ rings will be in a consistent state across the cluster all the time.
+ This makes it possible to add, remove or replace nodes without
+ manual operator interaction.
diff --git a/releasenotes/notes/vpp-84d35e51ff62a58c.yaml b/releasenotes/notes/vpp-84d35e51ff62a58c.yaml
new file mode 100644
index 00000000..b78df17d
--- /dev/null
+++ b/releasenotes/notes/vpp-84d35e51ff62a58c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Add the ability to deploy VPP. Vector Packet Processing (VPP) is a high
+ performance packet processing stack that runs in user space in Linux.
+ VPP is used as an alternative to kernel networking stack for
+ accelerated network data path.
diff --git a/roles_data.yaml b/roles_data.yaml
index 31b12986..130451ff 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -17,6 +17,10 @@
# disable_constraints: (boolean) optional, whether to disable Nova and Glance
# constraints for each role specified in the templates.
#
+# disable_upgrade_deployment: (boolean) optional, whether to run the
+# ansible upgrade steps for all services that are deployed on the role. If set
+# to True, the operator will drive the upgrade for this role's nodes.
+#
# upgrade_batch_size: (number): batch size for upgrades where tasks are
# specified by services to run in batches vs all nodes at once.
# This defaults to 1, but larger batches may be specified here.
@@ -29,6 +33,7 @@
CountDefault: 1
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
@@ -47,6 +52,8 @@
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronBgpvpnApi
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -120,6 +127,8 @@
- OS::TripleO::Services::OctaviaHealthManager
- OS::TripleO::Services::OctaviaHousekeeping
- OS::TripleO::Services::OctaviaWorker
+ - OS::TripleO::Services::Vpp
+ - OS::TripleO::Services::Docker
- name: Compute
CountDefault: 1
@@ -127,6 +136,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
@@ -149,10 +159,13 @@
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
+ - OS::TripleO::Services::Vpp
+ - OS::TripleO::Services::MySQLClient
- name: BlockStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -165,11 +178,13 @@
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
+ - OS::TripleO::Services::MySQLClient
- name: ObjectStorage
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
@@ -183,10 +198,12 @@
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
+ - OS::TripleO::Services::MySQLClient
- name: CephStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -199,3 +216,4 @@
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
+ - OS::TripleO::Services::MySQLClient
diff --git a/roles_data_undercloud.yaml b/roles_data_undercloud.yaml
index 2759429c..8e830711 100644
--- a/roles_data_undercloud.yaml
+++ b/roles_data_undercloud.yaml
@@ -26,6 +26,7 @@
- OS::TripleO::Services::MistralExecutor
- OS::TripleO::Services::IronicApi
- OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::IronicPxe
- OS::TripleO::Services::NovaIronic
- OS::TripleO::Services::Zaqar
- OS::TripleO::Services::NeutronServer
@@ -33,3 +34,11 @@
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::AodhApi
+ - OS::TripleO::Services::AodhEvaluator
+ - OS::TripleO::Services::AodhNotifier
+ - OS::TripleO::Services::AodhListener
+ - OS::TripleO::Services::GnocchiApi
+ - OS::TripleO::Services::GnocchiMetricd
+ - OS::TripleO::Services::GnocchiStatsd
+ - OS::TripleO::Services::PankoApi
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index 0eacbc60..5ff6f134 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -23,6 +23,14 @@ envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
'tls-endpoints-public-ip.yaml',
'tls-everywhere-endpoints-dns.yaml']
ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
+REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
+ 'config_settings', 'step_config']
+OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
+ 'service_config_settings', 'host_prep_tasks',
+ 'metadata_settings', 'kolla_config']
+DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'puppet_tags', 'step_config',
+ 'config_image']
+
def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0])
@@ -54,6 +62,22 @@ def validate_endpoint_map(base_map, env_map):
return sorted(base_map.keys()) == sorted(env_map.keys())
+def validate_hci_compute_services_default(env_filename, env_tpl):
+ env_services_list = env_tpl['parameter_defaults']['ComputeServices']
+ env_services_list.remove('OS::TripleO::Services::CephOSD')
+ roles_filename = os.path.join(os.path.dirname(env_filename),
+ '../roles_data.yaml')
+ roles_tpl = yaml.load(open(roles_filename).read())
+ for role in roles_tpl:
+ if role['name'] == 'Compute':
+ roles_services_list = role['ServicesDefault']
+ if sorted(env_services_list) != sorted(roles_services_list):
+ print('ERROR: ComputeServices in %s is different '
+ 'from ServicesDefault in roles_data.yaml' % env_filename)
+ return 1
+ return 0
+
+
def validate_mysql_connection(settings):
no_op = lambda *args: False
error_status = [0]
@@ -62,14 +86,12 @@ def validate_mysql_connection(settings):
return items == ['EndpointMap', 'MysqlInternal', 'protocol']
def client_bind_address(item):
- return 'bind_address' in item
+ return 'read_default_file' in item and \
+ 'read_default_group' in item
def validate_mysql_uri(key, items):
# Only consider a connection if it targets mysql
- # TODO(owalsh): skip nova mysql uris,temporary workaround for
- # tripleo/+bug/1662344
- if not key.startswith('nova') and \
- key.endswith('connection') and \
+ if key.endswith('connection') and \
search(items, mysql_protocol, no_op):
# Assume the "bind_address" option is one of
# the token that made up the uri
@@ -96,6 +118,55 @@ def validate_mysql_connection(settings):
return error_status[0]
+def validate_docker_service(filename, tpl):
+ if 'outputs' in tpl and 'role_data' in tpl['outputs']:
+ if 'value' not in tpl['outputs']['role_data']:
+ print('ERROR: invalid role_data for filename: %s'
+ % filename)
+ return 1
+ role_data = tpl['outputs']['role_data']['value']
+
+ for section_name in REQUIRED_DOCKER_SECTIONS:
+ if section_name not in role_data:
+ print('ERROR: %s is required in role_data for %s.'
+ % (section_name, filename))
+ return 1
+
+ for section_name in role_data.keys():
+ if section_name in REQUIRED_DOCKER_SECTIONS:
+ continue
+ else:
+ if section_name in OPTIONAL_DOCKER_SECTIONS:
+ continue
+ else:
+ print('ERROR: %s is extra in role_data for %s.'
+ % (section_name, filename))
+ return 1
+
+ if 'puppet_config' in role_data:
+ puppet_config = role_data['puppet_config']
+ for key in puppet_config:
+ if key in DOCKER_PUPPET_CONFIG_SECTIONS:
+ continue
+ else:
+ print('ERROR: %s should not be in puppet_config section.'
+ % key)
+ return 1
+ for key in DOCKER_PUPPET_CONFIG_SECTIONS:
+ if key not in puppet_config:
+ print('ERROR: %s is required in puppet_config for %s.'
+ % (key, filename))
+ return 1
+
+ if 'parameters' in tpl:
+ for param in required_params:
+ if param not in tpl['parameters']:
+ print('ERROR: parameter %s is required for %s.'
+ % (param, filename))
+ return 1
+ return 0
+
+
def validate_service(filename, tpl):
if 'outputs' in tpl and 'role_data' in tpl['outputs']:
if 'value' not in tpl['outputs']['role_data']:
@@ -145,6 +216,13 @@ def validate(filename):
filename != './puppet/services/services.yaml'):
retval = validate_service(filename, tpl)
+ if (filename.startswith('./docker/services/') and
+ filename != './docker/services/services.yaml'):
+ retval = validate_docker_service(filename, tpl)
+
+ if filename.endswith('hyperconverged-ceph.yaml'):
+ retval = validate_hci_compute_services_default(filename, tpl)
+
except Exception:
print(traceback.format_exc())
return 1
diff --git a/validation-scripts/all-nodes.sh b/validation-scripts/all-nodes.sh
index 0b8b3523..f1f4cc11 100644
--- a/validation-scripts/all-nodes.sh
+++ b/validation-scripts/all-nodes.sh
@@ -67,5 +67,23 @@ function ping_default_gateways() {
echo "SUCCESS"
}
+# Verify the FQDN from the nova/ironic deployment matches
+# FQDN in the heat templates.
+function fqdn_check() {
+ HOSTNAME=$(hostname)
+ SHORT_NAME=$(hostname -s)
+ FQDN_FROM_HOSTS=$(awk '$3 == "'${SHORT_NAME}'"{print $2}' /etc/hosts)
+ echo -n "Checking hostname vs /etc/hosts entry..."
+ if [[ $HOSTNAME != $FQDN_FROM_HOSTS ]]; then
+ echo "FAILURE"
+ echo -e "System hostname: ${HOSTNAME}\nEntry from /etc/hosts: ${FQDN_FROM_HOSTS}\n"
+ exit 1
+ fi
+ echo "SUCCESS"
+}
+
ping_controller_ips "$ping_test_ips"
ping_default_gateways
+if [[ $validate_fqdn == "True" ]];then
+ fqdn_check
+fi