aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.rst10
-rw-r--r--all-nodes-validation.yaml6
-rw-r--r--bindep.txt2
-rw-r--r--capabilities-map.yaml51
-rw-r--r--ci/environments/multinode-3nodes.yaml4
-rw-r--r--ci/environments/multinode-container-upgrade.yaml61
-rw-r--r--ci/environments/multinode.yaml1
-rw-r--r--ci/environments/multinode_major_upgrade.yaml1
-rw-r--r--ci/environments/scenario001-multinode.yaml23
-rw-r--r--ci/environments/scenario002-multinode.yaml1
-rw-r--r--ci/environments/scenario003-multinode.yaml1
-rw-r--r--ci/environments/scenario004-multinode.yaml5
-rw-r--r--deployed-server/README.rst4
-rw-r--r--deployed-server/deployed-server-bootstrap-centos.sh6
-rw-r--r--deployed-server/deployed-server-bootstrap-rhel.sh6
-rw-r--r--deployed-server/deployed-server-roles-data.yaml5
-rw-r--r--deployed-server/deployed-server.yaml2
-rwxr-xr-xdeployed-server/scripts/get-occ-config.sh31
-rwxr-xr-xdocker/docker-puppet.py37
-rw-r--r--docker/docker-steps.j2357
-rwxr-xr-xdocker/firstboot/setup_docker_host.sh24
-rw-r--r--docker/post-upgrade.j2.yaml4
-rw-r--r--docker/post.j2.yaml335
-rw-r--r--docker/services/README.rst45
-rw-r--r--docker/services/aodh-api.yaml112
-rw-r--r--docker/services/aodh-evaluator.yaml85
-rw-r--r--docker/services/aodh-listener.yaml85
-rw-r--r--docker/services/aodh-notifier.yaml85
-rw-r--r--docker/services/containers-common.yaml16
-rw-r--r--docker/services/database/mongodb.yaml53
-rw-r--r--docker/services/database/mysql.yaml55
-rw-r--r--docker/services/glance-api.yaml55
-rw-r--r--docker/services/gnocchi-api.yaml111
-rw-r--r--docker/services/gnocchi-metricd.yaml83
-rw-r--r--docker/services/gnocchi-statsd.yaml83
-rw-r--r--docker/services/heat-api-cfn.yaml59
-rw-r--r--docker/services/heat-api.yaml59
-rw-r--r--docker/services/heat-engine.yaml56
-rw-r--r--docker/services/ironic-api.yaml64
-rw-r--r--docker/services/ironic-conductor.yaml114
-rw-r--r--docker/services/ironic-pxe.yaml124
-rw-r--r--docker/services/keystone.yaml111
-rw-r--r--docker/services/memcached.yaml34
-rw-r--r--docker/services/mistral-api.yaml75
-rw-r--r--docker/services/mistral-engine.yaml55
-rw-r--r--docker/services/mistral-executor.yaml63
-rw-r--r--docker/services/neutron-api.yaml68
-rw-r--r--docker/services/neutron-dhcp.yaml63
-rw-r--r--docker/services/neutron-l3.yaml54
-rw-r--r--docker/services/neutron-ovs-agent.yaml52
-rw-r--r--docker/services/neutron-plugin-ml2.yaml18
-rw-r--r--docker/services/nova-api.yaml59
-rw-r--r--docker/services/nova-compute.yaml62
-rw-r--r--docker/services/nova-conductor.yaml55
-rw-r--r--docker/services/nova-ironic.yaml61
-rw-r--r--docker/services/nova-libvirt.yaml78
-rw-r--r--docker/services/nova-metadata.yaml12
-rw-r--r--docker/services/nova-placement.yaml63
-rw-r--r--docker/services/nova-scheduler.yaml55
-rw-r--r--docker/services/panko-api.yaml108
-rw-r--r--docker/services/rabbitmq.yaml73
-rw-r--r--docker/services/services.yaml22
-rw-r--r--docker/services/swift-proxy.yaml51
-rw-r--r--docker/services/swift-ringbuilder.yaml18
-rw-r--r--docker/services/swift-storage.yaml342
-rw-r--r--docker/services/zaqar.yaml62
-rw-r--r--environments/cadf.yaml2
-rw-r--r--environments/cinder-netapp-config.yaml2
-rw-r--r--environments/contrail/contrail-net.yaml4
-rw-r--r--environments/contrail/roles_data_contrail.yaml9
-rw-r--r--environments/deployed-server-environment.j2.yaml11
-rw-r--r--environments/deployed-server-environment.yaml4
-rw-r--r--environments/docker-services-tls-everywhere.yaml28
-rw-r--r--environments/docker.yaml27
-rw-r--r--environments/enable-internal-tls.yaml6
-rw-r--r--environments/external-loadbalancer-vip-v6.yaml7
-rw-r--r--environments/external-loadbalancer-vip.yaml7
-rw-r--r--environments/fixed-ip-vips-v6.yaml21
-rw-r--r--environments/fixed-ip-vips.yaml21
-rw-r--r--environments/hyperconverged-ceph.yaml3
-rw-r--r--environments/logging-environment.yaml2
-rw-r--r--environments/major-upgrade-all-in-one.yaml2
-rw-r--r--environments/major-upgrade-composable-steps-docker.yaml11
-rw-r--r--environments/major-upgrade-composable-steps.yaml2
-rw-r--r--environments/major-upgrade-converge-docker.yaml8
-rw-r--r--environments/major-upgrade-converge.yaml1
-rw-r--r--environments/network-environment.yaml4
-rw-r--r--environments/neutron-bgpvpn.yaml16
-rw-r--r--environments/neutron-l2gw-api.yaml20
-rw-r--r--environments/neutron-ml2-bigswitch.yaml13
-rw-r--r--environments/neutron-ml2-cisco-n1kv.yaml4
-rw-r--r--environments/neutron-nuage-config.yaml1
-rw-r--r--environments/neutron-opendaylight.yaml1
-rw-r--r--environments/nova-api-policy.yaml10
-rw-r--r--environments/securetty.yaml12
-rw-r--r--environments/services-docker/ironic.yaml5
-rw-r--r--environments/services-docker/mistral.yaml4
-rw-r--r--environments/services-docker/zaqar.yaml2
-rw-r--r--environments/services/ceilometer-api.yaml6
-rw-r--r--environments/services/disable-ceilometer-api.yaml2
-rw-r--r--environments/services/keystone_domain_specific_ldap_backend.yaml18
-rw-r--r--environments/services/panko.yaml2
-rw-r--r--environments/services/qdr.yaml2
-rw-r--r--environments/swift-external.yaml12
-rw-r--r--environments/undercloud.yaml1
-rw-r--r--environments/updates/update-from-192_0_2-subnet.yaml3
-rw-r--r--extraconfig/nova_metadata/krb-service-principals.yaml6
-rw-r--r--extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml37
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration49
-rw-r--r--extraconfig/tasks/aodh_data_migration.sh19
-rw-r--r--extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml62
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh109
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh36
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh177
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_3.sh68
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_4.sh17
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_5.sh8
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_6.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml175
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh200
-rw-r--r--extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml25
-rw-r--r--extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp103
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh9
-rwxr-xr-xextraconfig/tasks/run_puppet.sh5
-rw-r--r--extraconfig/tasks/ssh/host_public_key.yaml42
-rw-r--r--extraconfig/tasks/ssh/known_hosts_config.yaml36
-rw-r--r--extraconfig/tasks/swift-ring-deploy.yaml31
-rw-r--r--extraconfig/tasks/swift-ring-update.yaml42
-rw-r--r--extraconfig/tasks/tripleo_upgrade_node.sh14
-rwxr-xr-xextraconfig/tasks/yum_update.sh49
-rw-r--r--hosts-config.yaml2
-rw-r--r--net-config-linux-bridge.yaml2
-rw-r--r--network/endpoints/endpoint_data.yaml1
-rw-r--r--network/endpoints/endpoint_map.yaml82
-rw-r--r--network/service_net_map.j2.yaml1
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml22
-rw-r--r--overcloud.j2.yaml71
-rw-r--r--plan-environment.yaml13
-rw-r--r--puppet/blockstorage-role.yaml39
-rw-r--r--puppet/cephstorage-role.yaml39
-rw-r--r--puppet/compute-role.yaml41
-rw-r--r--puppet/config.role.j2.yaml2
-rw-r--r--puppet/controller-role.yaml40
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml65
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml47
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml22
-rw-r--r--puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml29
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml158
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml35
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml85
-rw-r--r--puppet/major_upgrade_steps.j2.yaml57
-rw-r--r--puppet/objectstorage-role.yaml39
-rw-r--r--puppet/puppet-steps.j220
-rw-r--r--puppet/role.role.j2.yaml40
-rw-r--r--puppet/services/aodh-api.yaml7
-rw-r--r--puppet/services/aodh-base.yaml4
-rw-r--r--puppet/services/apache-internal-tls-certmonger.yaml75
-rw-r--r--puppet/services/apache.yaml46
-rw-r--r--puppet/services/barbican-api.yaml13
-rw-r--r--puppet/services/ceilometer-agent-ipmi.yaml77
-rw-r--r--puppet/services/ceilometer-api.yaml7
-rw-r--r--puppet/services/ceilometer-base.yaml15
-rw-r--r--puppet/services/ceph-rgw.yaml2
-rw-r--r--puppet/services/certmonger-user.yaml28
-rw-r--r--puppet/services/cinder-api.yaml15
-rw-r--r--puppet/services/cinder-backend-netapp.yaml129
-rw-r--r--puppet/services/cinder-backend-scaleio.yaml2
-rw-r--r--puppet/services/cinder-volume.yaml6
-rw-r--r--puppet/services/congress.yaml15
-rw-r--r--puppet/services/database/mongodb.yaml5
-rw-r--r--puppet/services/database/mysql-internal-tls-certmonger.yaml47
-rw-r--r--puppet/services/database/mysql.yaml39
-rw-r--r--puppet/services/database/redis-base.yaml1
-rw-r--r--puppet/services/docker.yaml43
-rw-r--r--puppet/services/ec2-api.yaml16
-rw-r--r--puppet/services/etcd.yaml2
-rw-r--r--puppet/services/external-swift-proxy.yaml70
-rw-r--r--puppet/services/glance-api.yaml112
-rw-r--r--puppet/services/glance-base.yaml126
-rw-r--r--puppet/services/gnocchi-api.yaml15
-rw-r--r--puppet/services/gnocchi-base.yaml7
-rw-r--r--puppet/services/heat-api-cfn.yaml55
-rw-r--r--puppet/services/heat-api-cloudwatch.yaml55
-rw-r--r--puppet/services/heat-api.yaml60
-rw-r--r--puppet/services/heat-base.yaml4
-rw-r--r--puppet/services/heat-engine.yaml6
-rw-r--r--puppet/services/horizon.yaml7
-rw-r--r--puppet/services/ironic-api.yaml11
-rw-r--r--puppet/services/ironic-conductor.yaml64
-rw-r--r--puppet/services/kernel.yaml32
-rw-r--r--puppet/services/keystone.yaml50
-rw-r--r--puppet/services/manila-api.yaml4
-rw-r--r--puppet/services/mistral-api.yaml7
-rw-r--r--puppet/services/mistral-base.yaml6
-rw-r--r--puppet/services/monitoring/sensu-base.yaml15
-rw-r--r--puppet/services/monitoring/sensu-client.yaml2
-rw-r--r--puppet/services/network/contrail-vrouter.yaml2
-rw-r--r--puppet/services/neutron-api.yaml22
-rw-r--r--puppet/services/neutron-base.yaml55
-rw-r--r--puppet/services/neutron-bgpvpn-api.yaml34
-rw-r--r--puppet/services/neutron-bigswitch-agent.yaml29
-rw-r--r--puppet/services/neutron-compute-plugin-nuage.yaml10
-rw-r--r--puppet/services/neutron-compute-plugin-ovn.yaml1
-rw-r--r--puppet/services/neutron-l2gw-api.yaml54
-rw-r--r--puppet/services/neutron-ovs-agent.yaml35
-rw-r--r--puppet/services/neutron-ovs-dpdk-agent.yaml10
-rw-r--r--puppet/services/neutron-plugin-ml2-odl.yaml45
-rw-r--r--puppet/services/neutron-plugin-nuage.yaml7
-rw-r--r--puppet/services/nova-api.yaml23
-rw-r--r--puppet/services/nova-base.yaml22
-rw-r--r--puppet/services/nova-compute.yaml16
-rw-r--r--puppet/services/nova-ironic.yaml6
-rw-r--r--puppet/services/nova-libvirt.yaml83
-rw-r--r--puppet/services/octavia-api.yaml7
-rw-r--r--puppet/services/octavia-base.yaml12
-rw-r--r--puppet/services/opendaylight-api.yaml28
-rw-r--r--puppet/services/opendaylight-ovs.yaml30
-rw-r--r--puppet/services/openvswitch-upgrade.yaml50
-rw-r--r--puppet/services/ovn-dbs.yaml6
-rw-r--r--puppet/services/pacemaker.yaml20
-rw-r--r--puppet/services/pacemaker/rabbitmq.yaml2
-rw-r--r--puppet/services/panko-api.yaml7
-rw-r--r--puppet/services/panko-base.yaml4
-rw-r--r--puppet/services/qdr.yaml60
-rw-r--r--puppet/services/rabbitmq.yaml133
-rw-r--r--puppet/services/sahara-api.yaml7
-rw-r--r--puppet/services/sahara-base.yaml8
-rw-r--r--puppet/services/securetty.yaml36
-rw-r--r--puppet/services/services.yaml11
-rw-r--r--puppet/services/sshd.yaml2
-rw-r--r--puppet/services/swift-proxy.yaml37
-rw-r--r--puppet/services/swift-ringbuilder.yaml10
-rw-r--r--puppet/services/tacker.yaml14
-rw-r--r--puppet/services/tripleo-firewall.yaml6
-rw-r--r--puppet/services/vpp.yaml12
-rw-r--r--puppet/services/zaqar.yaml7
-rw-r--r--releasenotes/notes/Enable-TLS-for-libvirt-0aab48cd8339da0f.yaml6
-rw-r--r--releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml6
-rw-r--r--releasenotes/notes/add-all-hosts-to-hostsentry-20a8ee8a1a210ce2.yaml9
-rw-r--r--releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml3
-rw-r--r--releasenotes/notes/add-ceilometer-agent-ipmi-2c86726d0373d354.yaml3
-rw-r--r--releasenotes/notes/add-ipv6-diable-options-9aaee219bb87ac6a.yaml7
-rw-r--r--releasenotes/notes/add-l2gw-api-support-2206d3d14f409088.yaml3
-rw-r--r--releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml5
-rw-r--r--releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml5
-rw-r--r--releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml6
-rw-r--r--releasenotes/notes/add-qdr-99a27dffef42c13e.yaml8
-rw-r--r--releasenotes/notes/add_db_sync_timeout-c9b2f401cca0b37d.yaml3
-rw-r--r--releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml8
-rw-r--r--releasenotes/notes/api-policy-4ca739519537f6f4.yaml13
-rw-r--r--releasenotes/notes/big-switch-agent-4c743a2112251234.yaml5
-rw-r--r--releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml6
-rw-r--r--releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml10
-rw-r--r--releasenotes/notes/disable-ceilo-api-dfe5d0947563bbe0.yaml4
-rw-r--r--releasenotes/notes/disable-core-dump-for-setuid-programs-e83a2a5da908b9c3.yaml12
-rw-r--r--releasenotes/notes/disable-kernel-parameter-for-icmp-redirects-f325f91d71b58b5f.yaml19
-rw-r--r--releasenotes/notes/docker-service-all-roles-5c22a018caeafcf0.yaml5
-rw-r--r--releasenotes/notes/enable-logging-suspicious-packets-d5545586f917d2ca.yaml9
-rw-r--r--releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml5
-rw-r--r--releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml6
-rw-r--r--releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml6
-rw-r--r--releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml5
-rw-r--r--releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml4
-rw-r--r--releasenotes/notes/get-occ-config-local-connector-5bbec3f591a9f311.yaml10
-rw-r--r--releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml4
-rw-r--r--releasenotes/notes/ha-by-default-55326e699ee8602c.yaml5
-rw-r--r--releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml6
-rw-r--r--releasenotes/notes/ironic-neutron-integration-76c4f9e0d10785e4.yaml9
-rw-r--r--releasenotes/notes/leave-satellite-repo-enabled-8b60528bd5450c7b.yaml6
-rw-r--r--releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml4
-rw-r--r--releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml14
-rw-r--r--releasenotes/notes/ovn-fcd4b0168e6745a8.yaml6
-rw-r--r--releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml12
-rw-r--r--releasenotes/notes/pluggable-server-type-per-role-314f38f8e5d4c84e.yaml8
-rw-r--r--releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml20
-rw-r--r--releasenotes/notes/restrict-access-to-kernel-message-buffer-809160674b92a073.yaml11
-rw-r--r--releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml3
-rw-r--r--releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml18
-rw-r--r--releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml4
-rw-r--r--releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml5
-rw-r--r--releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml4
-rw-r--r--releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml4
-rw-r--r--releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml7
-rw-r--r--releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml6
-rw-r--r--releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml3
-rw-r--r--releasenotes/source/conf.py4
-rw-r--r--requirements.txt2
-rw-r--r--roles_data.yaml36
-rw-r--r--roles_data_undercloud.yaml13
-rwxr-xr-xscripts/hosts-config.sh18
-rw-r--r--setup.py2
-rwxr-xr-xtools/yaml-validate.py70
-rw-r--r--validation-scripts/all-nodes.sh18
293 files changed, 5829 insertions, 3586 deletions
diff --git a/README.rst b/README.rst
index 68fdd0ec..4eed715e 100644
--- a/README.rst
+++ b/README.rst
@@ -66,7 +66,7 @@ and should be executed according to the following table:
+================+=============+=============+=============+=============+=================+
| keystone | X | X | X | X | X |
+----------------+-------------+-------------+-------------+-------------+-----------------+
-| glance | file | swift | file | file | swift |
+| glance | rbd | swift | file | swift + rbd | swift |
+----------------+-------------+-------------+-------------+-------------+-----------------+
| cinder | rbd | iscsi | | | iscsi |
+----------------+-------------+-------------+-------------+-------------+-----------------+
@@ -76,6 +76,8 @@ and should be executed according to the following table:
+----------------+-------------+-------------+-------------+-------------+-----------------+
| neutron | ovs | ovs | ovs | ovs | X |
+----------------+-------------+-------------+-------------+-------------+-----------------+
+| neutron-bgpvpn | | | | X | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
| rabbitmq | X | X | X | X | X |
+----------------+-------------+-------------+-------------+-------------+-----------------+
| mongodb | X | X | | | |
@@ -128,3 +130,9 @@ and should be executed according to the following table:
+----------------+-------------+-------------+-------------+-------------+-----------------+
| manila | | | | X | |
+----------------+-------------+-------------+-------------+-------------+-----------------+
+| collectd | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| fluentd | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| sensu-client | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
diff --git a/all-nodes-validation.yaml b/all-nodes-validation.yaml
index 65d01d0f..eea3e40a 100644
--- a/all-nodes-validation.yaml
+++ b/all-nodes-validation.yaml
@@ -10,6 +10,10 @@ parameters:
default: ''
description: A string containing a space separated list of IP addresses used to ping test each available network interface.
type: string
+ ValidateFqdn:
+ default: false
+ description: Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts.
+ type: boolean
resources:
AllNodesValidationsImpl:
@@ -19,6 +23,8 @@ resources:
inputs:
- name: ping_test_ips
default: {get_param: PingTestIps}
+ - name: validate_fqdn
+ default: {get_param: ValidateFqdn}
config: {get_file: ./validation-scripts/all-nodes.sh}
outputs:
diff --git a/bindep.txt b/bindep.txt
new file mode 100644
index 00000000..4f9b4254
--- /dev/null
+++ b/bindep.txt
@@ -0,0 +1,2 @@
+# This is a cross-platform list tracking distribution packages needed by tests;
+# see http://docs.openstack.org/infra/bindep/ for additional information.
diff --git a/capabilities-map.yaml b/capabilities-map.yaml
index 3028dcfe..e510d679 100644
--- a/capabilities-map.yaml
+++ b/capabilities-map.yaml
@@ -2,12 +2,6 @@
# repository for deployment using puppet. It groups configuration by topic,
# describes possible combinations of environments and resource capabilities.
-# root_template: identifies repository's root template
-# root_environment: identifies root_environment, this one is special in terms of
-# order in which the environments are merged before deploying. This one serves as
-# a base and it's parameters/resource_registry gets overridden by other environments
-# if used.
-
# topics:
# High Level grouping by purpose of environments
# Attributes:
@@ -38,8 +32,6 @@
# only when that given environment is used. (resource_type of that environment can
# be implemented using multiple templates).
-root_template: overcloud.yaml
-root_environment: overcloud-resource-registry-puppet.yaml
topics:
- title: Base Resources Configuration
description:
@@ -308,6 +300,11 @@ topics:
description: >
Enable various Neutron plugins and backends
environments:
+ - file: environments/neutron-bgpvpn.yaml
+ title: Neutron BGPVPN Service Plugin
+ description: Enables Neutron BGPVPN Service Plugin
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- file: environments/neutron-ml2-bigswitch.yaml
title: BigSwitch Extensions
description: >
@@ -340,11 +337,6 @@ topics:
description: Enables OpenDaylight
requires:
- overcloud-resource-registry-puppet.yaml
- - file: environments/neutron-opendaylight-l3.yaml
- title: OpenDaylight with L3 DVR
- description: Enables OpenDaylight with L3 DVR
- requires:
- - overcloud-resource-registry-puppet.yaml
- file: environments/neutron-ovs-dpdk.yaml
title: DPDK with OVS
description: Deploy DPDK with OVS
@@ -370,6 +362,11 @@ topics:
description: Enable FOS in the overcloud
requires:
- overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-l2gw.yaml
+ title: Neutron L2 gateway Service Plugin
+ description: Enables Neutron L2 gateway Service Plugin
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- title: Nova Extensions
description:
@@ -539,14 +536,6 @@ topics:
description:
requires:
- overcloud-resource-registry-puppet.yaml
- - title: Manage Firewall
- description:
- environments:
- - file: environments/manage-firewall.yaml
- title: Manage Firewall
- description:
- requires:
- - overcloud-resource-registry-puppet.yaml
- title: Operational Tools
description:
@@ -555,7 +544,7 @@ topics:
description: Enable monitoring agents
environments:
- file: environments/monitoring-environment.yaml
- title: enable monitoring agents
+ title: Enable monitoring agents
description:
requires:
- overcloud-resource-registry-puppet.yaml
@@ -567,6 +556,14 @@ topics:
description:
requires:
- overcloud-resource-registry-puppet.yaml
+ - title: Performance monitoring
+ description: Enable performance monitoring agents
+ environments:
+ - file: environments/collectd-environment.yaml
+ title: Enable performance monitoring agents
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- title: Security Options
description: Security Hardening Options
@@ -595,3 +592,13 @@ topics:
description:
requires:
- overcloud-resource-registry-puppet.yaml
+ - title: Keystone CADF auditing
+ description: Enable CADF notifications in Keystone for auditing
+ environments:
+ - file: environments/cadf.yaml
+ title: Keystone CADF auditing
+ - title: SecureTTY Values
+ description: Set values within /etc/securetty
+ environments:
+ - file: environments/securetty.yaml
+ title: SecureTTY Values
diff --git a/ci/environments/multinode-3nodes.yaml b/ci/environments/multinode-3nodes.yaml
index d6e2376a..56013adf 100644
--- a/ci/environments/multinode-3nodes.yaml
+++ b/ci/environments/multinode-3nodes.yaml
@@ -24,7 +24,7 @@
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
@@ -63,7 +63,7 @@
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderVolume
- - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
diff --git a/ci/environments/multinode-container-upgrade.yaml b/ci/environments/multinode-container-upgrade.yaml
new file mode 100644
index 00000000..44a0ce73
--- /dev/null
+++ b/ci/environments/multinode-container-upgrade.yaml
@@ -0,0 +1,61 @@
+# NOTE: This is an environment specific for containers upgrade
+# CI. Mainly we deploy non-pacemakerized overcloud, as at the time
+# being containerization of services managed by pacemaker is not
+# complete, so we deploy and upgrade the non-HA services for now.
+
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Docker
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ # Required for Centos 7.3 and Qemu 2.6.0
+ nova::compute::libvirt::libvirt_cpu_mode: 'none'
+ #NOTE(gfidente): not great but we need this to deploy on ext4
+ #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+ ceph::profile::params::osd_max_object_name_len: 256
+ ceph::profile::params::osd_max_object_namespace_len: 64
+ SwiftCeilometerPipelineEnabled: False
+ Debug: True
diff --git a/ci/environments/multinode.yaml b/ci/environments/multinode.yaml
index c946ec8a..d0d6ba99 100644
--- a/ci/environments/multinode.yaml
+++ b/ci/environments/multinode.yaml
@@ -18,6 +18,7 @@ parameter_defaults:
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
diff --git a/ci/environments/multinode_major_upgrade.yaml b/ci/environments/multinode_major_upgrade.yaml
index 2251cc0c..c97080fb 100644
--- a/ci/environments/multinode_major_upgrade.yaml
+++ b/ci/environments/multinode_major_upgrade.yaml
@@ -14,6 +14,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml
index a6f35711..0282c385 100644
--- a/ci/environments/scenario001-multinode.yaml
+++ b/ci/environments/scenario001-multinode.yaml
@@ -4,7 +4,6 @@ resource_registry:
OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
OS::TripleO::Services::CephClient: ../../puppet/services/ceph-client.yaml
- OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
OS::TripleO::Services::Collectd: ../../puppet/services/metrics/collectd.yaml
OS::TripleO::Services::Tacker: ../../puppet/services/tacker.yaml
OS::TripleO::Services::Congress: ../../puppet/services/congress.yaml
@@ -19,9 +18,12 @@ resource_registry:
OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
+ OS::TripleO::Services::FluentdClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/logging/fluentd-client.yaml
+ OS::TripleO::Services::SensuClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/monitoring/sensu-client.yaml
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
@@ -50,6 +52,7 @@ parameter_defaults:
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
@@ -63,6 +66,7 @@ parameter_defaults:
- OS::TripleO::Services::CeilometerCollector
- OS::TripleO::Services::CeilometerExpirer
- OS::TripleO::Services::CeilometerAgentCentral
+ - OS::TripleO::Services::CeilometerAgentIpmi
- OS::TripleO::Services::CeilometerAgentNotification
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
@@ -80,6 +84,9 @@ parameter_defaults:
- OS::TripleO::Services::Congress
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::SensuClient
+
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
@@ -113,3 +120,17 @@ parameter_defaults:
******************************************************************
CollectdExtraPlugins:
- rrdtool
+ LoggingServers:
+ - host: 127.0.0.1
+ port: 24224
+ MonitoringRabbitHost: 127.0.0.1
+ MonitoringRabbitPort: 5676
+ MonitoringRabbitPassword: sensu
+ TtyValues:
+ - console
+ - tty1
+ - tty2
+ - tty3
+ - tty4
+ - tty5
+ - tty6
diff --git a/ci/environments/scenario002-multinode.yaml b/ci/environments/scenario002-multinode.yaml
index cbcfa9b3..38d24ee1 100644
--- a/ci/environments/scenario002-multinode.yaml
+++ b/ci/environments/scenario002-multinode.yaml
@@ -17,6 +17,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
diff --git a/ci/environments/scenario003-multinode.yaml b/ci/environments/scenario003-multinode.yaml
index 6e926f74..5472b494 100644
--- a/ci/environments/scenario003-multinode.yaml
+++ b/ci/environments/scenario003-multinode.yaml
@@ -17,6 +17,7 @@ resource_registry:
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml
index dc05ab4e..25fad4bb 100644
--- a/ci/environments/scenario004-multinode.yaml
+++ b/ci/environments/scenario004-multinode.yaml
@@ -12,6 +12,7 @@ resource_registry:
OS::TripleO::Services::ManilaScheduler: ../../puppet/services/manila-scheduler.yaml
OS::TripleO::Services::ManilaShare: ../../puppet/services/pacemaker/manila-share.yaml
OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
+ OS::TripleO::Services::NeutronBgpVpnApi: ../../puppet/services/neutron-bgpvpn-api.yaml
# These enable Pacemaker
OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
@@ -30,6 +31,7 @@ parameter_defaults:
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephRgw
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
@@ -39,6 +41,7 @@ parameter_defaults:
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronBgpVpnApi
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -83,3 +86,5 @@ parameter_defaults:
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
SwiftCeilometerPipelineEnabled: false
+ NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+ BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
diff --git a/deployed-server/README.rst b/deployed-server/README.rst
index e4d8299b..8638818b 100644
--- a/deployed-server/README.rst
+++ b/deployed-server/README.rst
@@ -67,11 +67,11 @@ example:
parameter_defaults:
ControlPlaneDefaultRoute: 192.168.122.130
ControlPlaneSubnetCidr: "24"
- EC2MetadataIp: "192.0.2.1"
+ EC2MetadataIp: "192.168.24.1"
In this example, 192.168.122.130 is the external management IP of an
undercloud, thus it is the default route for the configured local_ip value of
-192.0.2.1.
+192.168.24.1.
os-collect-config
diff --git a/deployed-server/deployed-server-bootstrap-centos.sh b/deployed-server/deployed-server-bootstrap-centos.sh
index 7266ca57..6f2bb124 100644
--- a/deployed-server/deployed-server-bootstrap-centos.sh
+++ b/deployed-server/deployed-server-bootstrap-centos.sh
@@ -8,9 +8,13 @@ yum install -y \
openstack-puppet-modules \
os-net-config \
openvswitch \
- python-heat-agent*
+ python-heat-agent* \
+ openstack-selinux
ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config
+
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/iptables
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/ip6tables
diff --git a/deployed-server/deployed-server-bootstrap-rhel.sh b/deployed-server/deployed-server-bootstrap-rhel.sh
index 36ff0077..9e9e9b3b 100644
--- a/deployed-server/deployed-server-bootstrap-rhel.sh
+++ b/deployed-server/deployed-server-bootstrap-rhel.sh
@@ -8,6 +8,10 @@ yum install -y \
openstack-puppet-modules \
os-net-config \
openvswitch \
- python-heat-agent*
+ python-heat-agent* \
+ openstack-selinux
ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
+
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/iptables
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/ip6tables
diff --git a/deployed-server/deployed-server-roles-data.yaml b/deployed-server/deployed-server-roles-data.yaml
index 04da5565..084c2f8f 100644
--- a/deployed-server/deployed-server-roles-data.yaml
+++ b/deployed-server/deployed-server-roles-data.yaml
@@ -26,6 +26,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::CephRgw
@@ -109,6 +110,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
@@ -133,6 +135,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -147,6 +150,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
@@ -162,6 +166,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
diff --git a/deployed-server/deployed-server.yaml b/deployed-server/deployed-server.yaml
index 1e8afb25..afdb5d0c 100644
--- a/deployed-server/deployed-server.yaml
+++ b/deployed-server/deployed-server.yaml
@@ -81,6 +81,7 @@ resources:
InstanceIdDeployment:
type: OS::Heat::StructuredDeployment
properties:
+ name: InstanceIdDeployment
config: {get_resource: InstanceIdConfig}
server: {get_resource: deployed-server}
depends_on: UpgradeInitDeployment
@@ -103,6 +104,7 @@ resources:
HostsEntryDeployment:
type: OS::Heat::SoftwareDeployment
properties:
+ name: HostsEntryDeployment
config: {get_resource: HostsEntryConfig}
server: {get_resource: deployed-server}
diff --git a/deployed-server/scripts/get-occ-config.sh b/deployed-server/scripts/get-occ-config.sh
index 6c196f97..28f038ce 100755
--- a/deployed-server/scripts/get-occ-config.sh
+++ b/deployed-server/scripts/get-occ-config.sh
@@ -63,7 +63,7 @@ for role in $OVERCLOUD_ROLES; do
rg_stack=$(openstack stack resource show overcloud $role -c physical_resource_id -f value)
done
- stacks=$(openstack stack resource list $rg_stack -c physical_resource_id -f value)
+ stacks=$(openstack stack resource list $rg_stack -c resource_name -c physical_resource_id -f json | jq -r "sort_by(.resource_name) | .[] | .physical_resource_id")
i=0
@@ -89,16 +89,16 @@ for role in $OVERCLOUD_ROLES; do
done
echo "======================"
- echo "$role$i os-collect-config.conf configuration:"
+ echo "$role$i deployed-server.json configuration:"
- config="
-[DEFAULT]
-collectors=request
-command=os-refresh-config
-polling_interval=30
-
-[request]
-metadata_url=$deployed_server_metadata_url"
+ config="{
+ \"os-collect-config\": {
+ \"collectors\": [\"request\", \"local\"],
+ \"request\": {
+ \"metadata_url\": \"$deployed_server_metadata_url\"
+ }
+ }
+}"
echo "$config"
echo "======================"
@@ -108,12 +108,11 @@ metadata_url=$deployed_server_metadata_url"
host=
eval host=\${${role}_hosts_a[i]}
if [ -n "$host" ]; then
- # Delete the os-collect-config.conf template so our file won't get
- # overwritten
- ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo /bin/rm -f /usr/libexec/os-apply-config/templates/etc/os-collect-config.conf
- ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host "echo \"$config\" > os-collect-config.conf"
- ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo cp os-collect-config.conf /etc/os-collect-config.conf
- ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo systemctl restart os-collect-config
+ ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host "echo '$config' > deployed-server.json"
+ ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo mkdir -p -m 0700 /var/lib/os-collect-config/local-data/ || true
+ ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo cp deployed-server.json /var/lib/os-collect-config/local-data/deployed-server.json
+ ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo systemctl start os-collect-config
+ ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo systemctl enable os-collect-config
fi
let i+=1
diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py
index d9496af6..909a2c8a 100755
--- a/docker/docker-puppet.py
+++ b/docker/docker-puppet.py
@@ -61,7 +61,10 @@ def rm_container(name):
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
print(cmd_stdout)
- print(cmd_stderr)
+ if cmd_stderr and \
+ cmd_stderr != 'Error response from daemon: ' \
+ 'No such container: {}\n'.format(name):
+ print(cmd_stderr)
process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count()))
@@ -102,6 +105,9 @@ for service in (json_data or []):
config_image = service[3] or ''
volumes = service[4] if len(service) > 4 else []
+ if not manifest or not config_image:
+ continue
+
print('---------')
print('config_volume %s' % config_volume)
print('puppet_tags %s' % puppet_tags)
@@ -199,14 +205,26 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
'--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
'--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',
'--volume', 'tripleo_logs:/var/log/tripleo/',
+ # OpenSSL trusted CA injection
+ '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro',
+ '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro',
+ '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro',
+ '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',
+ # script injection
'--volume', '%s:%s:rw' % (sh_script, sh_script) ]
for volume in volumes:
- dcmd.extend(['--volume', volume])
+ if volume:
+ dcmd.extend(['--volume', volume])
dcmd.extend(['--entrypoint', sh_script])
env = {}
+ # NOTE(flaper87): Always copy the DOCKER_* environment variables as
+ # they contain the access data for the docker daemon.
+ for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
+ env[k] = os.environ.get(k)
+
if os.environ.get('NET_HOST', 'false') == 'true':
print('NET_HOST enabled')
dcmd.extend(['--net', 'host', '--volume',
@@ -238,9 +256,9 @@ for config_volume in configs:
volumes = service[4] if len(service) > 4 else []
if puppet_tags:
- puppet_tags = "file,file_line,concat,%s" % puppet_tags
+ puppet_tags = "file,file_line,concat,augeas,%s" % puppet_tags
else:
- puppet_tags = "file,file_line,concat"
+ puppet_tags = "file,file_line,concat,augeas"
process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
@@ -250,4 +268,13 @@ for p in process_map:
# Fire off processes to perform each configuration. Defaults
# to the number of CPUs on the system.
p = multiprocessing.Pool(process_count)
-p.map(mp_puppet_config, process_map)
+returncodes = list(p.map(mp_puppet_config, process_map))
+config_volumes = [pm[0] for pm in process_map]
+success = True
+for returncode, config_volume in zip(returncodes, config_volumes):
+ if returncode != 0:
+ print('ERROR configuring %s' % config_volume)
+ success = False
+
+if not success:
+ sys.exit(1)
diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2
new file mode 100644
index 00000000..f0af8e25
--- /dev/null
+++ b/docker/docker-steps.j2
@@ -0,0 +1,357 @@
+# certain initialization steps (run in a container) will occur
+# on the role marked as primary controller or the first role listed
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+ {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+ {%- set _ = primary_role.pop() -%}
+ {%- set _ = primary_role.append(role) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
+heat_template_version: ocata
+
+description: >
+ Post-deploy configuration steps via puppet for all roles,
+ as defined in ../roles_data.yaml
+
+parameters:
+ servers:
+ type: json
+ description: Mapping of Role name e.g Controller to a list of servers
+ role_data:
+ type: json
+ description: Mapping of Role name e.g Controller to the per-role data
+ DeployIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ # These utility tasks use docker-puppet.py to execute tasks via puppet
+ # We only execute these on the first node in the primary role
+ {{primary_role_name}}DockerPuppetTasks:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ yaql:
+ expression:
+ dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1]))
+ data:
+ docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]}
+
+# BEGIN primary_role_name docker-puppet-tasks (run only on a single node)
+{% for step in range(1, 6) %}
+
+ {{primary_role_name}}DockerPuppetJsonConfig{{step}}:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json:
+ {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']}
+
+ {{primary_role_name}}DockerPuppetJsonDeployment{{step}}:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ server: {get_param: [servers, {{primary_role_name}}, '0']}
+ config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}}
+
+ {{primary_role_name}}DockerPuppetTasksConfig{{step}}:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: docker-puppet.py}
+ inputs:
+ - name: CONFIG
+ - name: NET_HOST
+ - name: NO_ARCHIVE
+ - name: STEP
+
+ {{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
+ type: OS::Heat::SoftwareDeployment
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step}}
+ - {{dep.name}}ContainersDeployment_Step{{step}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+ properties:
+ name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+ server: {get_param: [servers, {{primary_role_name}}, '0']}
+ config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}}
+ input_values:
+ CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
+ NET_HOST: 'true'
+ NO_ARCHIVE: 'true'
+ STEP: {{step}}
+
+{% endfor %}
+# END primary_role_name docker-puppet-tasks
+
+{% for role in roles %}
+ # Post deployment steps for all roles
+ # A single config is re-applied with an incrementing step number
+ # {{role.name}} Role steps
+ {{role.name}}ArtifactsConfig:
+ type: ../puppet/deploy-artifacts.yaml
+
+ {{role.name}}ArtifactsDeploy:
+ type: OS::Heat::StructuredDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ArtifactsConfig}
+
+ {{role.name}}PreConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PreConfig
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}CreateConfigDir:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: create-config-dir.sh}
+
+ {{role.name}}CreateConfigDirDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}CreateConfigDir}
+
+ {{role.name}}HostPrepAnsible:
+ type: OS::Heat::Value
+ properties:
+ value:
+ str_replace:
+ template: CONFIG
+ params:
+ CONFIG:
+ - hosts: localhost
+ connection: local
+ tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
+
+ {{role.name}}HostPrepConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ansible
+ options:
+ modulepath: /usr/share/ansible-modules
+ config: {get_attr: [{{role.name}}HostPrepAnsible, value]}
+
+ {{role.name}}HostPrepDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}HostPrepConfig}
+
+ # this creates a JSON config file for our docker-puppet.py script
+ {{role.name}}GenPuppetConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-puppet/docker-puppet.json:
+ {get_param: [role_data, {{role.name}}, puppet_config]}
+
+ {{role.name}}GenPuppetDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}GenPuppetConfig}
+
+ {{role.name}}GenerateConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: docker-puppet.py}
+
+ {{role.name}}GenerateConfigDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment, {{role.name}}HostPrepDeployment]
+ properties:
+ name: {{role.name}}GenerateConfigDeployment
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}GenerateConfig}
+
+ {{role.name}}PuppetStepConfig:
+ type: OS::Heat::Value
+ properties:
+ type: string
+ value:
+ yaql:
+ expression:
+ # select 'step_config' only from services that do not have a docker_config
+ $.data.service_names.zip($.data.step_config, $.data.docker_config).where($[2] = null).where($[1] != null).select($[1]).join("\n")
+ data:
+ service_names: {get_param: [role_data, {{role.name}}, service_names]}
+ step_config: {get_param: [role_data, {{role.name}}, step_config]}
+ docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
+
+ {{role.name}}DockerConfig:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ yaql:
+ expression:
+ # select 'docker_config' only from services that have it
+ $.data.service_names.zip($.data.docker_config).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {})
+ data:
+ service_names: {get_param: [role_data, {{role.name}}, service_names]}
+ docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
+
+ # Here we are dumping all the docker container startup configuration data
+ # so that we can have access to how they are started outside of heat
+ # and docker-cmd. This lets us create command line tools to start and
+ # test these containers.
+ {{role.name}}DockerConfigJsonStartupData:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-container-startup-configs.json:
+ {get_attr: [{{role.name}}DockerConfig, value]}
+
+ {{role.name}}DockerConfigJsonStartupDataDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: {{role.name}}DockerConfigJsonStartupData}
+ servers: {get_param: [servers, {{role.name}}]}
+
+ {{role.name}}KollaJsonConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ {get_param: [role_data, {{role.name}}, kolla_config]}
+
+ {{role.name}}KollaJsonDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ name: {{role.name}}KollaJsonDeployment
+ config: {get_resource: {{role.name}}KollaJsonConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+
+ # BEGIN BAREMETAL CONFIG STEPS
+
+ {% if role.name == 'Controller' %}
+ ControllerPrePuppet:
+ type: OS::TripleO::Tasks::ControllerPrePuppet
+ properties:
+ servers: {get_param: [servers, Controller]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+ {% endif %}
+
+ {{role.name}}Config:
+ type: OS::TripleO::{{role.name}}Config
+ properties:
+ StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]}
+
+ {% for step in range(1, 6) %}
+
+ {{role.name}}Deployment_Step{{step}}:
+ type: OS::Heat::StructuredDeploymentGroup
+ {% if step == 1 %}
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ {% else %}
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ - {{dep.name}}ContainersDeployment_Step{{step -1}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
+ {% endif %}
+ properties:
+ name: {{role.name}}Deployment_Step{{step}}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: {{step}}
+ update_identifier: {get_param: DeployIdentifier}
+
+ {% endfor %}
+ # END BAREMETAL CONFIG STEPS
+
+ # BEGIN CONTAINER CONFIG STEPS
+ {% for step in range(1, 6) %}
+
+ {{role.name}}ContainersConfig_Step{{step}}:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: docker-cmd
+ config:
+ {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]}
+
+ {{role.name}}ContainersDeployment_Step{{step}}:
+ type: OS::Heat::StructuredDeploymentGroup
+ {% if step == 1 %}
+ depends_on:
+ - {{role.name}}PreConfig
+ - {{role.name}}KollaJsonDeployment
+ - {{role.name}}GenPuppetDeployment
+ - {{role.name}}GenerateConfigDeployment
+ {% else %}
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}ContainersDeployment_Step{{step -1}}
+ - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
+ {% endif %}
+ properties:
+ name: {{role.name}}ContainersDeployment_Step{{step}}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}}
+
+ {% endfor %}
+ # END CONTAINER CONFIG STEPS
+
+ {{role.name}}PostConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PostConfig
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ - {{primary_role_name}}DockerPuppetTasksDeployment5
+ {% endfor %}
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ # Note, this should come last, so use depends_on to ensure
+ # this is created after any other resources.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}PostConfig
+ {% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+
+ {% if role.name == 'Controller' %}
+ ControllerPostPuppet:
+ depends_on:
+ - ControllerExtraConfigPost
+ type: OS::TripleO::Tasks::ControllerPostPuppet
+ properties:
+ servers: {get_param: [servers, Controller]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+ {% endif %}
+
+{% endfor %}
diff --git a/docker/firstboot/setup_docker_host.sh b/docker/firstboot/setup_docker_host.sh
index b2287e91..8b4c6a03 100755
--- a/docker/firstboot/setup_docker_host.sh
+++ b/docker/firstboot/setup_docker_host.sh
@@ -1,26 +1,8 @@
#!/bin/bash
set -eux
-# TODO This would be better in puppet
+# This file contains setup steps that can't be or have not yet been moved to
+# puppet
-# TODO remove this when built image includes docker
-if [ ! -f "/usr/bin/docker" ]; then
- yum -y install docker
-fi
-
-# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is
-# a place holder for text replacement done via heat
-if [ "$docker_namespace_is_registry" = "True" ]; then
- /usr/bin/systemctl stop docker.service
- # if namespace is used with local registry, trim all namespacing
- trim_var=$docker_registry
- registry_host="${trim_var%%/*}"
- /bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker
-fi
-
-# enable and start docker
-/usr/bin/systemctl enable docker.service
-/usr/bin/systemctl start docker.service
-
-# Disable libvirtd
+# Disable libvirtd since it conflicts with nova_libvirt container
/usr/bin/systemctl disable libvirtd.service
/usr/bin/systemctl stop libvirtd.service
diff --git a/docker/post-upgrade.j2.yaml b/docker/post-upgrade.j2.yaml
new file mode 100644
index 00000000..4477f868
--- /dev/null
+++ b/docker/post-upgrade.j2.yaml
@@ -0,0 +1,4 @@
+# Note the include here is the same as post.j2.yaml but the data used at
+# # the time of rendering is different if any roles disable upgrades
+{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% include 'docker-steps.j2' %}
diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml
index 65d0c4ee..fd956215 100644
--- a/docker/post.j2.yaml
+++ b/docker/post.j2.yaml
@@ -1,334 +1 @@
-# certain initialization steps (run in a container) will occur
-# on the first role listed in the roles file
-{% set primary_role_name = roles[0].name -%}
-
-heat_template_version: ocata
-
-description: >
- Post-deploy configuration steps via puppet for all roles,
- as defined in ../roles_data.yaml
-
-parameters:
- servers:
- type: json
- description: Mapping of Role name e.g Controller to a list of servers
- role_data:
- type: json
- description: Mapping of Role name e.g Controller to the per-role data
- DeployIdentifier:
- default: ''
- type: string
- description: >
- Setting this to a unique value will re-run any deployment tasks which
- perform configuration on a Heat stack-update.
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- # These utility tasks use docker-puppet.py to execute tasks via puppet
- # We only execute these on the first node in the primary role
- {{primary_role_name}}DockerPuppetTasks:
- type: OS::Heat::Value
- properties:
- type: json
- value:
- yaql:
- expression:
- dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1]))
- data:
- docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]}
-
-# BEGIN primary_role_name docker-puppet-tasks (run only on a single node)
-{% for step in range(1, 6) %}
-
- {{primary_role_name}}DockerPuppetJsonConfig{{step}}:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json:
- {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']}
-
- {{primary_role_name}}DockerPuppetJsonDeployment{{step}}:
- type: OS::Heat::SoftwareDeployment
- properties:
- server: {get_param: [servers, {{primary_role_name}}, '0']}
- config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}}
-
- {{primary_role_name}}DockerPuppetTasksConfig{{step}}:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: docker-puppet.py}
- inputs:
- - name: CONFIG
- - name: NET_HOST
- - name: NO_ARCHIVE
- - name: STEP
-
- {{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
- type: OS::Heat::SoftwareDeployment
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step{{step}}
- - {{dep.name}}ContainersDeployment_Step{{step}}
- {% endfor %}
- - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
- properties:
- name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
- server: {get_param: [servers, {{primary_role_name}}, '0']}
- config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}}
- input_values:
- CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
- NET_HOST: 'true'
- NO_ARCHIVE: 'true'
- STEP: {{step}}
-
-{% endfor %}
-# END primary_role_name docker-puppet-tasks
-
-{% for role in roles %}
- # Post deployment steps for all roles
- # A single config is re-applied with an incrementing step number
- # {{role.name}} Role steps
- {{role.name}}ArtifactsConfig:
- type: ../puppet/deploy-artifacts.yaml
-
- {{role.name}}ArtifactsDeploy:
- type: OS::Heat::StructuredDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ArtifactsConfig}
-
- {{role.name}}PreConfig:
- type: OS::TripleO::Tasks::{{role.name}}PreConfig
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}CreateConfigDir:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: create-config-dir.sh}
-
- {{role.name}}CreateConfigDirDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}CreateConfigDir}
-
- # this creates a JSON config file for our docker-puppet.py script
- {{role.name}}GenPuppetConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- /var/lib/docker-puppet/docker-puppet.json:
- yaql:
- # select only services that have a non-null config_image with
- # a step_config as well
- expression:
- $.data.config_volume.zip($.data.puppet_tags, $.data.step_config, $.data.config_image).where($[3] != null and $[1] != null)
- data:
- config_volume: {get_param: [role_data, {{role.name}}, config_volume]}
- step_config: {get_param: [role_data, {{role.name}}, step_config]}
- puppet_tags: {get_param: [role_data, {{role.name}}, puppet_tags]}
- config_image: {get_param: [role_data, {{role.name}}, config_image]}
-
- {{role.name}}GenPuppetDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}GenPuppetConfig}
-
- {{role.name}}GenerateConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: docker-puppet.py}
-
- {{role.name}}GenerateConfigDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment]
- properties:
- name: {{role.name}}GenerateConfigDeployment
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}GenerateConfig}
-
- {{role.name}}PuppetStepConfig:
- type: OS::Heat::Value
- properties:
- type: string
- value:
- yaql:
- expression:
- # select 'step_config' only from services that do not have a docker_image
- $.data.service_names.zip($.data.step_config, $.data.docker_image).where($[2] = null).where($[1] != null).select($[1]).join("\n")
- data:
- service_names: {get_param: [role_data, {{role.name}}, service_names]}
- step_config: {get_param: [role_data, {{role.name}}, step_config]}
- docker_image: {get_param: [role_data, {{role.name}}, docker_image]}
-
- {{role.name}}DockerConfig:
- type: OS::Heat::Value
- properties:
- type: json
- value:
- yaql:
- expression:
- # select 'docker_config' only from services that have a docker_image
- $.data.service_names.zip($.data.docker_config, $.data.docker_image).where($[2] != null).select($[1]).reduce($1.mergeWith($2), {})
- data:
- service_names: {get_param: [role_data, {{role.name}}, service_names]}
- docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
- docker_image: {get_param: [role_data, {{role.name}}, docker_image]}
-
- # Here we are dumping all the docker container startup configuration data
- # so that we can have access to how they are started outside of heat
- # and docker-cmd. This lets us create command line tools to start and
- # test these containers.
- {{role.name}}DockerConfigJsonStartupData:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- /var/lib/docker-container-startup-configs.json:
- {get_attr: [{{role.name}}DockerConfig, value]}
-
- {{role.name}}DockerConfigJsonStartupDataDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: {{role.name}}DockerConfigJsonStartupData}
- servers: {get_param: [servers, {{role.name}}]}
-
- {{role.name}}KollaJsonConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- {get_param: [role_data, {{role.name}}, kolla_config]}
-
- {{role.name}}KollaJsonDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: {{role.name}}KollaJsonDeployment
- config: {get_resource: {{role.name}}KollaJsonConfig}
- servers: {get_param: [servers, {{role.name}}]}
-
- # BEGIN BAREMETAL CONFIG STEPS
-
- {% if role.name == 'Controller' %}
- ControllerPrePuppet:
- type: OS::TripleO::Tasks::ControllerPrePuppet
- properties:
- servers: {get_param: [servers, Controller]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
- {% endif %}
-
- {{role.name}}Config:
- type: OS::TripleO::{{role.name}}Config
- properties:
- StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]}
-
- {% for step in range(1, 6) %}
-
- {{role.name}}Deployment_Step{{step}}:
- type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step{{step -1}}
- - {{dep.name}}ContainersDeployment_Step{{step -1}}
- {% endfor %}
- - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
- {% endif %}
- properties:
- name: {{role.name}}Deployment_Step{{step}}
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: {{step}}
- update_identifier: {get_param: DeployIdentifier}
-
- {% endfor %}
- # END BAREMETAL CONFIG STEPS
-
- # BEGIN CONTAINER CONFIG STEPS
- {% for step in range(1, 6) %}
-
- {{role.name}}ContainersConfig_Step{{step}}:
- type: OS::Heat::StructuredConfig
- properties:
- group: docker-cmd
- config:
- {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]}
-
- {{role.name}}ContainersDeployment_Step{{step}}:
- type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on:
- - {{role.name}}PreConfig
- - {{role.name}}KollaJsonDeployment
- - {{role.name}}GenPuppetDeployment
- - {{role.name}}GenerateConfigDeployment
- {% else %}
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}ContainersDeployment_Step{{step -1}}
- - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
- - {{dep.name}}Deployment_Step{{step -1}}
- {% endfor %}
- - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
- {% endif %}
- properties:
- name: {{role.name}}ContainersDeployment_Step{{step}}
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}}
-
- {% endfor %}
- # END CONTAINER CONFIG STEPS
-
- {{role.name}}PostConfig:
- type: OS::TripleO::Tasks::{{role.name}}PostConfig
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step5
- - {{primary_role_name}}DockerPuppetTasksDeployment5
- {% endfor %}
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- {{role.name}}ExtraConfigPost:
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}PostConfig
- {% endfor %}
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: [servers, {{role.name}}]}
-
- {% if role.name == 'Controller' %}
- ControllerPostPuppet:
- depends_on:
- - ControllerExtraConfigPost
- type: OS::TripleO::Tasks::ControllerPostPuppet
- properties:
- servers: {get_param: [servers, Controller]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
- {% endif %}
-
-{% endfor %}
+{% include 'docker-steps.j2' %}
diff --git a/docker/services/README.rst b/docker/services/README.rst
index 881a2a37..84ac842e 100644
--- a/docker/services/README.rst
+++ b/docker/services/README.rst
@@ -23,7 +23,7 @@ puppet (our configuration tool of choice) into the Kolla base images. The
undercloud nova-scheduler also requires openstack-tripleo-common to
provide custom filters.
-To build Kolla images for TripleO adjust your kolla config to build your
+To build Kolla images for TripleO adjust your kolla config [*]_ to build your
centos base image with puppet using the example below:
.. code-block::
@@ -37,6 +37,10 @@ kolla-build --base centos --template-override template-overrides.j2
..
+.. [*] See the
+ `override file <https://github.com/openstack/tripleo-common/blob/master/contrib/tripleo_kolla_template_overrides.j2>`_
+ which can be used to build Kolla packages that work with TripleO, and an
+ `example build script <https://github.com/dprince/undercloud_containers/blob/master/build_kolla.sh>_.
Docker settings
---------------
@@ -58,27 +62,34 @@ are re-asserted when applying latter ones.
the container itself at the /var/lib/kolla/config_files/config.json
location and drives how kolla's external config mechanisms work.
- * docker_image: The full name of the docker image that will be used.
-
* docker_config: Data that is passed to the docker-cmd hook to configure
a container, or step of containers at each step. See the available steps
below and the related docker-cmd hook documentation in the heat-agents
project.
- * puppet_tags: Puppet resource tag names that are used to generate config
- files with puppet. Only the named config resources are used to generate
- a config file. Any service that specifies tags will have the default
- tags of 'file,concat,file_line' appended to the setting.
- Example: keystone_config
-
- * config_volume: The name of the volume (directory) where config files
- will be generated for this service. Use this as the location to
- bind mount into the running Kolla container for configuration.
-
- * config_image: The name of the docker image that will be used for
- generating configuration files. This is often the same value as
- 'docker_image' above but some containers share a common set of
- config files which are generated in a common base container.
+ * puppet_config: This section is a nested set of key value pairs
+ that drive the creation of config files using puppet.
+ Required parameters include:
+
+ * puppet_tags: Puppet resource tag names that are used to generate config
+ files with puppet. Only the named config resources are used to generate
+ a config file. Any service that specifies tags will have the default
+ tags of 'file,concat,file_line,augeas' appended to the setting.
+ Example: keystone_config
+
+ * config_volume: The name of the volume (directory) where config files
+ will be generated for this service. Use this as the location to
+ bind mount into the running Kolla container for configuration.
+
+ * config_image: The name of the docker image that will be used for
+ generating configuration files. This is often the same container
+ that the runtime service uses. Some services share a common set of
+ config files which are generated in a common base container.
+
+ * step_config: This setting controls the manifest that is used to
+ create docker config files via puppet. The puppet tags below are
+ used along with this manifest to generate a config directory for
+ this container.
* docker_puppet_tasks: This section provides data to drive the
docker-puppet.py tool directly. The task is executed only once
diff --git a/docker/services/aodh-api.yaml b/docker/services/aodh-api.yaml
new file mode 100644
index 00000000..3181fad7
--- /dev/null
+++ b/docker/services/aodh-api.yaml
@@ -0,0 +1,112 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized aodh service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhApiImage:
+ description: image
+ default: 'centos-binary-aodh-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ AodhApiPuppetBase:
+ type: ../../puppet/services/aodh-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the aodh API role.
+ value:
+ service_name: {get_attr: [AodhApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [AodhApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_api_paste_ini,aodh_config
+ step_config: *step_config
+ config_image: &aodh_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ docker_config:
+ step_3:
+ aodh_init_log:
+ start_order: 0
+ image: *aodh_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/aodh && chown aodh:aodh /var/log/aodh']
+ volumes:
+ - logs:/var/log
+ aodh_db_sync:
+ start_order: 1
+ image: *aodh_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - logs:/var/log
+ command: /usr/bin/aodh-dbsync
+ step_4:
+ aodh_api:
+ image: *aodh_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/aodh-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/lib/config-data/aodh/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/aodh/var/www/:/var/www/:ro
+ - logs:/var/log
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable aodh service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped enabled=no
diff --git a/docker/services/aodh-evaluator.yaml b/docker/services/aodh-evaluator.yaml
new file mode 100644
index 00000000..13d6cf21
--- /dev/null
+++ b/docker/services/aodh-evaluator.yaml
@@ -0,0 +1,85 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Aodh Evaluator service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhEvaluatorImage:
+ description: image
+ default: 'centos-binary-aodh-evaluator:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ AodhEvaluatorBase:
+ type: ../../puppet/services/aodh-evaluator.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhEvaluatorBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhEvaluatorBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhEvaluatorBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhEvaluatorBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_evaluator_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-evaluator.json:
+ command: /usr/bin/aodh-evaluator
+ docker_config:
+ step_4:
+ aodh_evaluator:
+ image: *aodh_evaluator_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/aodh-evaluator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-evaluator service
+ tags: step2
+ service: name=openstack-aodh-evaluator.service state=stopped enabled=no
diff --git a/docker/services/aodh-listener.yaml b/docker/services/aodh-listener.yaml
new file mode 100644
index 00000000..63c45aad
--- /dev/null
+++ b/docker/services/aodh-listener.yaml
@@ -0,0 +1,85 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Aodh Listener service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhListenerImage:
+ description: image
+ default: 'centos-binary-aodh-listener:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ AodhListenerBase:
+ type: ../../puppet/services/aodh-listener.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhListenerBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhListenerBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhListenerBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhListenerBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_listener_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-listener.json:
+ command: /usr/bin/aodh-listener
+ docker_config:
+ step_4:
+ aodh_listener:
+ image: *aodh_listener_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/aodh-listener.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-listener service
+ tags: step2
+ service: name=openstack-aodh-listener.service state=stopped enabled=no
diff --git a/docker/services/aodh-notifier.yaml b/docker/services/aodh-notifier.yaml
new file mode 100644
index 00000000..dbe31b65
--- /dev/null
+++ b/docker/services/aodh-notifier.yaml
@@ -0,0 +1,85 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Aodh Notifier service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhNotifierImage:
+ description: image
+ default: 'centos-binary-aodh-notifier:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ AodhNotifierBase:
+ type: ../../puppet/services/aodh-notifier.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhNotifierBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhNotifierBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhNotifierBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhNotifierBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_notifier_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-notifier.json:
+ command: /usr/bin/aodh-notifier
+ docker_config:
+ step_4:
+ aodh_notifier:
+ image: *aodh_notifier_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/aodh-notifier.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-notifier service
+ tags: step2
+ service: name=openstack-aodh-notifier.service state=stopped enabled=no
diff --git a/docker/services/containers-common.yaml b/docker/services/containers-common.yaml
new file mode 100644
index 00000000..d3561f6b
--- /dev/null
+++ b/docker/services/containers-common.yaml
@@ -0,0 +1,16 @@
+heat_template_version: ocata
+
+description: >
+ Contains a static list of common things necessary for containers
+
+outputs:
+ volumes:
+ description: Common volumes for the containers.
+ value:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ # OpenSSL trusted CAs
+ - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
+ - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
+ - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
+ - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml
index 127f8839..7d2d1a15 100644
--- a/docker/services/database/mongodb.yaml
+++ b/docker/services/database/mongodb.yaml
@@ -45,32 +45,27 @@ outputs:
map_merge:
- get_attr: [MongodbPuppetBase, role_data, config_settings]
- mongodb::server::fork: false
- step_config:
+ step_config: &step_config
list_join:
- "\n"
- - "['Mongodb_database', 'Mongodb_user', 'Mongodb_replset'].each |String $val| { noop_resource($val) }"
- {get_attr: [MongodbPuppetBase, role_data, step_config]}
- upgrade_tasks: {get_attr: [MongodbPuppetBase, role_data, upgrade_tasks]}
# BEGIN DOCKER SETTINGS #
- docker_image: &mongodb_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
- puppet_tags: file # set this even though file is the default
- config_volume: mongodb
- config_image: *mongodb_image
+ puppet_config:
+ config_volume: mongodb
+ puppet_tags: file # set this even though file is the default
+ step_config: *step_config
+ config_image: &mongodb_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
kolla_config:
/var/lib/kolla/config_files/mongodb.json:
command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run
- config_files:
- - dest: /etc/mongod.conf
- source: /var/lib/kolla/config_files/src/etc/mongod.conf
- owner: mongodb
- perm: '0600'
- - dest: /etc/mongos.conf
- source: /var/lib/kolla/config_files/src/etc/mongos.conf
- owner: mongodb
- perm: '0600'
+ permissions:
+ - path: /var/lib/mongodb
+ owner: mongodb:mongodb
+ recurse: true
docker_config:
step_2:
mongodb:
@@ -79,10 +74,10 @@ outputs:
privileged: false
volumes: &mongodb_volumes
- /var/lib/kolla/config_files/mongodb.json:/var/lib/kolla/config_files/config.json
- - /var/lib/config-data/mongodb/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/mongodb/etc/:/etc/:ro
- /etc/localtime:/etc/localtime:ro
- logs:/var/log/kolla
- - mongodb:/var/lib/mongodb/
+ - /var/lib/mongodb:/var/lib/mongodb
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
docker_puppet_tasks:
@@ -91,10 +86,16 @@ outputs:
config_volume: 'mongodb_init_tasks'
puppet_tags: 'mongodb_database,mongodb_user,mongodb_replset'
step_config: 'include ::tripleo::profile::base::database::mongodb'
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
+ config_image: *mongodb_image
volumes:
- - "mongodb:/var/lib/mongodb/"
- - "logs:/var/log/kolla:ro"
+ - /var/lib/mongodb:/var/lib/mongodb
+ - logs:/var/log/kolla:ro
+ host_prep_tasks:
+ - name: create /var/lib/mongodb
+ file:
+ path: /var/lib/mongodb
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable mongodb service
+ tags: step2
+ service: name=mongod state=stopped enabled=no
diff --git a/docker/services/database/mysql.yaml b/docker/services/database/mysql.yaml
index 5809396e..cba2070d 100644
--- a/docker/services/database/mysql.yaml
+++ b/docker/services/database/mysql.yaml
@@ -54,32 +54,27 @@ outputs:
pid-file: /var/lib/mysql/mariadb.pid
mysqld_safe:
pid-file: /var/lib/mysql/mariadb.pid
- step_config:
+ step_config: &step_config
list_join:
- "\n"
- - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }"
- {get_attr: [MysqlPuppetBase, role_data, step_config]}
- upgrade_tasks: {get_attr: [MysqlPuppetBase, role_data, upgrade_tasks]}
# BEGIN DOCKER SETTINGS #
- docker_image: &mysql_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
- puppet_tags: file # set this even though file is the default
- config_volume: mysql
- config_image: *mysql_image
+ puppet_config:
+ config_volume: mysql
+ puppet_tags: file # set this even though file is the default
+ step_config: *step_config
+ config_image: &mysql_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
kolla_config:
/var/lib/kolla/config_files/mysql.json:
command: /usr/bin/mysqld_safe
- config_files:
- - dest: /etc/mysql/my.cnf
- source: /var/lib/kolla/config_files/src/etc/my.cnf
- owner: mysql
- perm: '0644'
- - dest: /etc/my.cnf.d/galera.cnf
- source: /var/lib/kolla/config_files/src/etc/my.cnf.d/galera.cnf
- owner: mysql
- perm: '0644'
+ permissions:
+ - path: /var/lib/mysql
+ owner: mysql:mysql
+ recurse: true
docker_config:
step_2:
mysql_bootstrap:
@@ -87,12 +82,14 @@ outputs:
detach: false
image: *mysql_image
net: host
+ # Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done
+ command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start']
volumes: &mysql_volumes
- /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json
- - /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/mysql/etc/:/etc/:ro
- /etc/localtime:/etc/localtime:ro
- /etc/hosts:/etc/hosts:ro
- - mariadb:/var/lib/mysql/
+ - /var/lib/mysql:/var/lib/mysql
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- KOLLA_BOOTSTRAP=True
@@ -123,10 +120,16 @@ outputs:
config_volume: 'mysql_init_tasks'
puppet_tags: 'mysql_database,mysql_grant,mysql_user'
step_config: 'include ::tripleo::profile::base::database::mysql'
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
+ config_image: *mysql_image
volumes:
- - "mariadb:/var/lib/mysql/:ro"
- - "/var/lib/config-data/mysql/root:/root:ro" #provides .my.cnf
+ - /var/lib/mysql:/var/lib/mysql/:ro
+ - /var/lib/config-data/mysql/root:/root:ro #provides .my.cnf
+ host_prep_tasks:
+ - name: create /var/lib/mysql
+ file:
+ path: /var/lib/mysql
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable mysql service
+ tags: step2
+ service: name=mariadb state=stopped enabled=no
diff --git a/docker/services/glance-api.yaml b/docker/services/glance-api.yaml
index b8ab9622..0b4f81ed 100644
--- a/docker/services/glance-api.yaml
+++ b/docker/services/glance-api.yaml
@@ -29,6 +29,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
GlanceApiPuppetBase:
type: ../../puppet/services/glance-api.yaml
properties:
@@ -45,28 +48,21 @@ outputs:
map_merge:
- get_attr: [GlanceApiPuppetBase, role_data, config_settings]
- glance::api::sync_db: false
- step_config: {get_attr: [GlanceApiPuppetBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [GlanceApiPuppetBase, role_data, step_config]
service_config_settings: {get_attr: [GlanceApiPuppetBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS #
- docker_image: &glance_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
- puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config
- config_volume: glance_api
- config_image: *glance_image
+ puppet_config:
+ config_volume: glance_api
+ puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config
+ step_config: *step_config
+ config_image: &glance_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
kolla_config:
- /var/lib/kolla/config_files/glance-api.json:
- command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
- config_files:
- - dest: /etc/glance/glance-api.conf
- owner: glance
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/glance/glance-api.conf
- - dest: /etc/glance/glance-swift.conf
- owner: glance
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/glance/glance-swift.conf
+ /var/lib/kolla/config_files/glance-api.json:
+ command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
docker_config:
step_3:
glance_api_db_sync:
@@ -75,13 +71,16 @@ outputs:
privileged: false
detach: false
volumes: &glance_volumes
- - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /var/lib/config-data/glance_api/:/var/lib/kolla/config_files/src:ro
- - /run:/run
- - /dev:/dev
- - /etc/hosts:/etc/hosts:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/glance_api/etc/glance/:/etc/glance/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /dev:/dev
environment:
- KOLLA_BOOTSTRAP=True
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
@@ -94,3 +93,7 @@ outputs:
volumes: *glance_volumes
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable glance_api service
+ tags: step2
+ service: name=openstack-glance-api state=stopped enabled=no
diff --git a/docker/services/gnocchi-api.yaml b/docker/services/gnocchi-api.yaml
new file mode 100644
index 00000000..1c61fa3e
--- /dev/null
+++ b/docker/services/gnocchi-api.yaml
@@ -0,0 +1,111 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized gnocchi service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiApiImage:
+ description: image
+ default: 'centos-binary-gnocchi-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ GnocchiApiPuppetBase:
+ type: ../../puppet/services/gnocchi-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [GnocchiApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [GnocchiApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_api_paste_ini,gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ docker_config:
+ step_3:
+ gnocchi_init_log:
+ start_order: 0
+ image: *gnocchi_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/gnocchi && chown gnocchi:gnocchi /var/log/gnocchi']
+ volumes:
+ - logs:/var/log
+ gnocchi_db_sync:
+ start_order: 1
+ image: *gnocchi_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ - logs:/var/log
+ command: ["/usr/bin/gnocchi-upgrade", "--skip-storage"]
+ step_4:
+ gnocchi_api:
+ image: *gnocchi_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/gnocchi-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ - /var/lib/config-data/gnocchi/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/gnocchi/var/www/:/var/www/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable httpd service
+ tags: step2
+ service: name=httpd state=stopped enabled=no
diff --git a/docker/services/gnocchi-metricd.yaml b/docker/services/gnocchi-metricd.yaml
new file mode 100644
index 00000000..5ce7e12a
--- /dev/null
+++ b/docker/services/gnocchi-metricd.yaml
@@ -0,0 +1,83 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Gnocchi Metricd service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiMetricdImage:
+ description: image
+ default: 'centos-binary-gnocchi-metricd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ GnocchiMetricdBase:
+ type: ../../puppet/services/gnocchi-metricd.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiMetricdBase, role_data, service_name]}
+ config_settings: {get_attr: [GnocchiMetricdBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [GnocchiMetricdBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiMetricdBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_metricd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-metricd.json:
+ command: /usr/bin/gnocchi-metricd
+ docker_config:
+ step_4:
+ gnocchi_metricd:
+ image: *gnocchi_metricd_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/gnocchi-metricd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-gnocchi-metricd service
+ tags: step2
+ service: name=openstack-gnocchi-metricd.service state=stopped enabled=no
diff --git a/docker/services/gnocchi-statsd.yaml b/docker/services/gnocchi-statsd.yaml
new file mode 100644
index 00000000..40023a60
--- /dev/null
+++ b/docker/services/gnocchi-statsd.yaml
@@ -0,0 +1,83 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Gnocchi Statsd service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiStatsdImage:
+ description: image
+ default: 'centos-binary-gnocchi-statsd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ GnocchiStatsdBase:
+ type: ../../puppet/services/gnocchi-statsd.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiStatsdBase, role_data, service_name]}
+ config_settings: {get_attr: [GnocchiStatsdBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [GnocchiStatsdBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiStatsdBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_statsd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-statsd.json:
+ command: /usr/bin/gnocchi-statsd
+ docker_config:
+ step_4:
+ gnocchi_statsd:
+ image: *gnocchi_statsd_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/gnocchi-statsd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-gnocchi-statsd service
+ tags: step2
+ service: name=openstack-gnocchi-statsd.service state=stopped enabled=no
diff --git a/docker/services/heat-api-cfn.yaml b/docker/services/heat-api-cfn.yaml
index 93632166..8f7bb144 100644
--- a/docker/services/heat-api-cfn.yaml
+++ b/docker/services/heat-api-cfn.yaml
@@ -13,7 +13,7 @@ parameters:
default: 'centos-binary-heat-api-cfn:latest'
type: string
# we configure all heat services in the same heat engine container
- DockerHeatEngineImage:
+ DockerHeatConfigImage:
description: image
default: 'centos-binary-heat-engine:latest'
type: string
@@ -35,6 +35,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
HeatBase:
type: ../../puppet/services/heat-api-cfn.yaml
properties:
@@ -51,40 +54,44 @@ outputs:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- apache::default_vhost: false
- step_config: {get_attr: [HeatBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [HeatBase, role_data, step_config]
service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &heat_api_cfn_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ]
- puppet_tags: heat_config,file,concat,file_line
- config_volume: heat
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ puppet_config:
+ config_volume: heat
+ puppet_tags: heat_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/heat_api_cfn.json:
- command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
- config_files:
- - dest: /etc/heat/heat.conf
- owner: heat
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ /var/lib/kolla/config_files/heat_api_cfn.json:
+ command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
docker_config:
step_4:
heat_api_cfn:
- image: *heat_api_cfn_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ]
net: host
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
+ - /dev:/dev
+ - /run:/run
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable heat_api_cfn service
+ tags: step2
+ service: name=openstack-heat-api-cfn state=stopped enabled=no
diff --git a/docker/services/heat-api.yaml b/docker/services/heat-api.yaml
index 2efabb61..0e668ce1 100644
--- a/docker/services/heat-api.yaml
+++ b/docker/services/heat-api.yaml
@@ -13,7 +13,7 @@ parameters:
default: 'centos-binary-heat-api:latest'
type: string
# we configure all heat services in the same heat engine container
- DockerHeatEngineImage:
+ DockerHeatConfigImage:
description: image
default: 'centos-binary-heat-engine:latest'
type: string
@@ -35,6 +35,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
HeatBase:
type: ../../puppet/services/heat-api.yaml
properties:
@@ -51,40 +54,44 @@ outputs:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- apache::default_vhost: false
- step_config: {get_attr: [HeatBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [HeatBase, role_data, step_config]
service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &heat_api_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ]
- puppet_tags: heat_config,file,concat,file_line
- config_volume: heat
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ puppet_config:
+ config_volume: heat
+ puppet_tags: heat_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/heat_api.json:
- command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
- config_files:
- - dest: /etc/heat/heat.conf
- owner: heat
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ /var/lib/kolla/config_files/heat_api.json:
+ command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
docker_config:
step_4:
heat_api:
- image: *heat_api_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ]
net: host
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
+ - /dev:/dev
+ - /run:/run
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable heat_api service
+ tags: step2
+ service: name=openstack-heat-api state=stopped enabled=no
diff --git a/docker/services/heat-engine.yaml b/docker/services/heat-engine.yaml
index db8c2be5..5a1f011d 100644
--- a/docker/services/heat-engine.yaml
+++ b/docker/services/heat-engine.yaml
@@ -30,6 +30,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
HeatBase:
type: ../../puppet/services/heat-engine.yaml
properties:
@@ -46,24 +49,21 @@ outputs:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- apache::default_vhost: false
- step_config: {get_attr: [HeatBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [HeatBase, role_data, step_config]
service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &heat_engine_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
- puppet_tags: heat_config,file,concat,file_line
- config_volume: heat
- config_image: *heat_engine_image
+ puppet_config:
+ config_volume: heat
+ puppet_tags: heat_config,file,concat,file_line
+ step_config: *step_config
+ config_image: &heat_engine_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
kolla_config:
- /var/lib/kolla/config_files/heat_engine.json:
- command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
- config_files:
- - dest: /etc/heat/heat.conf
- owner: heat
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ /var/lib/kolla/config_files/heat_engine.json:
+ command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
docker_config:
step_3:
heat_engine_db_sync:
@@ -72,9 +72,12 @@ outputs:
privileged: false
detach: false
volumes:
- - /var/lib/config-data/heat/etc/heat:/etc/heat:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
command: ['heat-manage', 'db_sync']
step_4:
heat_engine:
@@ -83,10 +86,17 @@ outputs:
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
+ - /run:/run
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable heat_engine service
+ tags: step2
+ service: name=openstack-heat-engine state=stopped enabled=no
diff --git a/docker/services/ironic-api.yaml b/docker/services/ironic-api.yaml
index 80120568..a019a61e 100644
--- a/docker/services/ironic-api.yaml
+++ b/docker/services/ironic-api.yaml
@@ -33,6 +33,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
IronicApiBase:
type: ../../puppet/services/ironic-api.yaml
properties:
@@ -48,40 +51,38 @@ outputs:
config_settings:
map_merge:
- get_attr: [IronicApiBase, role_data, config_settings]
- step_config: {get_attr: [IronicApiBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [IronicApiBase, role_data, step_config]
service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &ironic_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
- puppet_tags: ironic_config
- config_volume: ironic
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
+ puppet_config:
+ config_volume: ironic
+ puppet_tags: ironic_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/ironic_api.json:
- command: /usr/bin/ironic-api
- config_files:
- - dest: /etc/ironic/ironic.conf
- owner: ironic
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+ /var/lib/kolla/config_files/ironic_api.json:
+ command: /usr/bin/ironic-api
docker_config:
step_3:
ironic_db_sync:
- image: *ironic_image
+ image: &ironic_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
net: host
privileged: false
detach: false
volumes:
- - /var/lib/config-data/ironic/etc/:/etc/:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/config-data/ironic/etc/:/etc/:ro
command: ['ironic-dbsync', '--config-file', '/etc/ironic/ironic.conf']
step_4:
ironic_api:
@@ -91,9 +92,16 @@ outputs:
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/:/etc/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable ironic_api service
+ tags: step2
+ service: name=openstack-ironic-api state=stopped enabled=no
diff --git a/docker/services/ironic-conductor.yaml b/docker/services/ironic-conductor.yaml
index 945ef3fc..1e1316f3 100644
--- a/docker/services/ironic-conductor.yaml
+++ b/docker/services/ironic-conductor.yaml
@@ -33,6 +33,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
IronicConductorBase:
type: ../../puppet/services/ironic-conductor.yaml
properties:
@@ -50,62 +53,91 @@ outputs:
- get_attr: [IronicConductorBase, role_data, config_settings]
# to avoid hard linking errors we store these on the same
# volume/device as the ironic master_path
+ # https://github.com/docker/docker/issues/7457
- ironic::drivers::pxe::tftp_root: /var/lib/ironic/tftpboot
- ironic::drivers::pxe::tftp_master_path: /var/lib/ironic/tftpboot/master_images
- ironic::pxe::tftp_root: /var/lib/ironic/tftpboot
- ironic::pxe::http_root: /var/lib/ironic/httpboot
- ironic::conductor::http_root: /var/lib/ironic/httpboot
- step_config: {get_attr: [IronicConductorBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [IronicConductorBase, role_data, step_config]
service_config_settings: {get_attr: [IronicConductorBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &ironic_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ]
- puppet_tags: ironic_config
- config_volume: ironic
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
+ puppet_config:
+ config_volume: ironic
+ puppet_tags: ironic_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/ironic_conductor.json:
- command: /usr/bin/ironic-conductor
- config_files:
- - dest: /etc/ironic/ironic.conf
- owner: ironic
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
- permissions:
- - path: /var/lib/ironic/httpboot
- owner: ironic:ironic
- recurse: true
- - path: /var/lib/ironic/tftpboot
- owner: ironic:ironic
- recurse: true
+ /var/lib/kolla/config_files/ironic_conductor.json:
+ command: /usr/bin/ironic-conductor
+ permissions:
+ - path: /var/lib/ironic
+ owner: ironic:ironic
+ recurse: true
docker_config:
step_4:
- ironic-init-dirs:
- image: *ironic_image
- user: root
- command: ['/bin/bash', '-c', 'mkdir /var/lib/ironic/httpboot && mkdir /var/lib/ironic/tftpboot']
- volumes:
- - ironic:/var/lib/ironic
ironic_conductor:
start_order: 80
- image: *ironic_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ]
net: host
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /sys:/sys
- - /dev:/dev
- - /run:/run #shared?
- - ironic:/var/lib/ironic
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+ - /lib/modules:/lib/modules:ro
+ - /sys:/sys
+ - /dev:/dev
+ - /run:/run #shared?
+ - /var/lib/ironic:/var/lib/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create ironic persistent data directory
+ file:
+ path: /var/lib/ironic
+ state: directory
+ - name: stat /httpboot
+ stat: path=/httpboot
+ register: stat_httpboot
+ - name: stat /tftpboot
+ stat: path=/tftpboot
+ register: stat_tftpboot
+ - name: stat /var/lib/ironic/httpboot
+ stat: path=/var/lib/ironic/httpboot
+ register: stat_ironic_httpboot
+ - name: stat /var/lib/ironic/tftpboot
+ stat: path=/var/lib/ironic/tftpboot
+ register: stat_ironic_tftpboot
+ # cannot use 'copy' module as with 'remote_src' it doesn't support recursion
+ - name: migrate /httpboot to containerized (if applicable)
+ command: /bin/cp -R /httpboot /var/lib/ironic/httpboot
+ when: stat_httpboot.stat.exists and not stat_ironic_httpboot.stat.exists
+ - name: migrate /tftpboot to containerized (if applicable)
+ command: /bin/cp -R /tftpboot /var/lib/ironic/tftpboot
+ when: stat_tftpboot.stat.exists and not stat_ironic_tftpboot.stat.exists
+ # Even if there was nothing to copy from original locations,
+ # we need to create the dirs before starting the containers
+ - name: ensure ironic pxe directories exist
+ file:
+ path: /var/lib/ironic/{{ item }}
+ state: directory
+ with_items:
+ - httpboot
+ - tftpboot
+ upgrade_tasks:
+ - name: Stop and disable ironic_conductor service
+ tags: step2
+ service: name=openstack-ironic-conductor state=stopped enabled=no
diff --git a/docker/services/ironic-pxe.yaml b/docker/services/ironic-pxe.yaml
index bc7b4677..6ec80397 100644
--- a/docker/services/ironic-pxe.yaml
+++ b/docker/services/ironic-pxe.yaml
@@ -31,87 +31,63 @@ parameters:
default: {}
type: json
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
outputs:
role_data:
description: Role data for the Ironic PXE role.
value:
service_name: ironic_pxe
config_settings: {}
- step_config: ''
+ step_config: &step_config ''
service_config_settings: {}
# BEGIN DOCKER SETTINGS
- docker_image: &ironic_pxe_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ]
- puppet_tags: ironic_config
- config_volume: ironic
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
+ puppet_config:
+ config_volume: ironic
+ puppet_tags: ironic_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/ironic_pxe_http.json:
- command: /usr/sbin/httpd -DFOREGROUND
- config_files:
- - dest: /etc/ironic/ironic.conf
- owner: ironic
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
- - dest: /etc/httpd/conf.d/10-ipxe_vhost.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-ipxe_vhost.conf
- - dest: /etc/httpd/conf/httpd.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
- - dest: /etc/httpd/conf/ports.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
- /var/lib/kolla/config_files/ironic_pxe_tftp.json:
- command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot
- config_files:
- - dest: /etc/ironic/ironic.conf
- owner: ironic
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
- - dest: /var/lib/ironic/tftpboot/chain.c32
- owner: ironic
- perm: '0744'
- source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/chain.c32
- - dest: /var/lib/ironic/tftpboot/pxelinux.0
- owner: ironic
- perm: '0744'
- source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/pxelinux.0
- - dest: /var/lib/ironic/tftpboot/ipxe.efi
- owner: ironic
- perm: '0744'
- source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/ipxe.efi
- - dest: /var/lib/ironic/tftpboot/undionly.kpxe
- owner: ironic
- perm: '0744'
- source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/undionly.kpxe
- - dest: /var/lib/ironic/tftpboot/map-file
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/map-file
+ /var/lib/kolla/config_files/ironic_pxe_http.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ /var/lib/kolla/config_files/ironic_pxe_tftp.json:
+ command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot
docker_config:
step_4:
ironic_pxe_tftp:
start_order: 90
- image: *ironic_pxe_image
+ image: &ironic_pxe_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ]
net: host
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /dev/log:/dev/log
- - ironic:/var/lib/ironic/
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+ # TODO(mandre) check how docker like mounting in a bind-mounted tree
+ # This directory may contain migrated data from BM
+ - /var/lib/ironic:/var/lib/ironic/
+ # These files were generated by puppet inside the config container
+ # TODO(mandre) check the mount permission (ro/rw)
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/chain.c32:/var/lib/ironic/tftpboot/chain.c32
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/pxelinux.0:/var/lib/ironic/tftpboot/pxelinux.0
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/ipxe.efi:/var/lib/ironic/tftpboot/ipxe.efi
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/undionly.kpxe:/var/lib/ironic/tftpboot/undionly.kpxe
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/map-file:/var/lib/ironic/tftpboot/map-file
+ - /dev/log:/dev/log
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
ironic_pxe_http:
@@ -121,11 +97,19 @@ outputs:
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/ironic/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - ironic:/var/lib/ironic/
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+ - /var/lib/config-data/ironic/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/ironic:/var/lib/ironic/
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create ironic persistent data directory
+ file:
+ path: /var/lib/ironic
+ state: directory
diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml
index e09fd769..e7717ab0 100644
--- a/docker/services/keystone.yaml
+++ b/docker/services/keystone.yaml
@@ -30,9 +30,21 @@ parameters:
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
+ KeystoneTokenProvider:
+ description: The keystone token format
+ type: string
+ default: 'fernet'
+ constraints:
+ - allowed_values: ['uuid', 'fernet']
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
KeystoneBase:
type: ../../puppet/services/keystone.yaml
properties:
@@ -40,6 +52,10 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
outputs:
role_data:
description: Role data for the Keystone API role.
@@ -49,60 +65,24 @@ outputs:
map_merge:
- get_attr: [KeystoneBase, role_data, config_settings]
- apache::default_vhost: false
- step_config:
+ step_config: &step_config
list_join:
- "\n"
- - "['Keystone_user', 'Keystone_endpoint', 'Keystone_domain', 'Keystone_tenant', 'Keystone_user_role', 'Keystone_role', 'Keystone_service'].each |String $val| { noop_resource($val) }"
- {get_attr: [KeystoneBase, role_data, step_config]}
service_config_settings: {get_attr: [KeystoneBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &keystone_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
- puppet_tags: keystone_config
- config_volume: keystone
- config_image: *keystone_image
+ puppet_config:
+ config_volume: keystone
+ puppet_tags: keystone_config
+ step_config: *step_config
+ config_image: &keystone_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
kolla_config:
- /var/lib/kolla/config_files/keystone.json:
- command: /usr/sbin/httpd -DFOREGROUND
- config_files:
- - dest: /etc/keystone/keystone.conf
- owner: keystone
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/keystone/keystone.conf
- - dest: /etc/keystone/credential-keys/0
- owner: keystone
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/0
- - dest: /etc/keystone/credential-keys/1
- owner: keystone
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/1
- - dest: /etc/httpd/conf.d/10-keystone_wsgi_admin.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_admin.conf
- - dest: /etc/httpd/conf.d/10-keystone_wsgi_main.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_main.conf
- - dest: /etc/httpd/conf/httpd.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
- - dest: /etc/httpd/conf/ports.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
- - dest: /var/www/cgi-bin/keystone/keystone-admin
- owner: keystone
- perm: '0644'
- source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-admin
- - dest: /var/www/cgi-bin/keystone/keystone-public
- owner: keystone
- perm: '0644'
- source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-public
+ /var/lib/kolla/config_files/keystone.json:
+ command: /usr/sbin/httpd -DFOREGROUND
docker_config:
step_3:
keystone-init-log:
@@ -119,12 +99,26 @@ outputs:
privileged: false
detach: false
volumes: &keystone_volumes
- - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/keystone/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/keystone/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - logs:/var/log
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/keystone/var/www/:/var/www/:ro
+ - /var/lib/config-data/keystone/etc/keystone/:/etc/keystone/:ro
+ - /var/lib/config-data/keystone/etc/httpd/:/etc/httpd/:ro
+ - logs:/var/log
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_BOOTSTRAP=True
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
@@ -148,7 +142,10 @@ outputs:
config_volume: 'keystone_init_tasks'
puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
step_config: 'include ::tripleo::profile::base::keystone'
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
+ config_image: *keystone_image
+ upgrade_tasks:
+ - name: Stop and disable keystone service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped enabled=no
+ metadata_settings:
+ get_attr: [KeystoneBase, role_data, metadata_settings]
diff --git a/docker/services/memcached.yaml b/docker/services/memcached.yaml
index d459c825..87b5f408 100644
--- a/docker/services/memcached.yaml
+++ b/docker/services/memcached.yaml
@@ -29,6 +29,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
MemcachedBase:
type: ../../puppet/services/memcached.yaml
properties:
@@ -42,16 +45,18 @@ outputs:
value:
service_name: {get_attr: [MemcachedBase, role_data, service_name]}
config_settings: {get_attr: [MemcachedBase, role_data, config_settings]}
- step_config: {get_attr: [MemcachedBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [MemcachedBase, role_data, step_config]
service_config_settings: {get_attr: [MemcachedBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &memcached_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
- puppet_tags: 'file'
- config_volume: 'memcached'
- config_image: *memcached_image
+ puppet_config:
+ config_volume: 'memcached'
+ puppet_tags: 'file'
+ step_config: *step_config
+ config_image: &memcached_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
kolla_config: {}
docker_config:
step_1:
@@ -61,9 +66,16 @@ outputs:
privileged: false
restart: always
volumes:
- - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS']
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable memcached service
+ tags: step2
+ service: name=memcached state=stopped enabled=no
diff --git a/docker/services/mistral-api.yaml b/docker/services/mistral-api.yaml
index e535a817..7c2413dd 100644
--- a/docker/services/mistral-api.yaml
+++ b/docker/services/mistral-api.yaml
@@ -33,6 +33,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
MistralApiBase:
type: ../../puppet/services/mistral-api.yaml
properties:
@@ -48,41 +51,39 @@ outputs:
config_settings:
map_merge:
- get_attr: [MistralApiBase, role_data, config_settings]
- step_config: {get_attr: [MistralApiBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [MistralApiBase, role_data, step_config]
service_config_settings: {get_attr: [MistralApiBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &mistral_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
- puppet_tags: mistral_config
- config_volume: mistral
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
+ puppet_config:
+ config_volume: mistral
+ puppet_tags: mistral_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/mistral_api.json:
- command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api
- config_files:
- - dest: /etc/mistral/mistral.conf
- owner: mistral
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ /var/lib/kolla/config_files/mistral_api.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api
docker_config:
step_3:
mistral_db_sync:
start_order: 1
- image: *mistral_image
+ image: &mistral_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
net: host
privileged: false
detach: false
volumes:
- - /var/lib/config-data/mistral/etc/:/etc/:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/config-data/mistral/etc/:/etc/:ro
command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head']
mistral_db_populate:
start_order: 2
@@ -91,11 +92,12 @@ outputs:
privileged: false
detach: false
volumes:
- - /var/lib/config-data/mistral/etc/:/etc/:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/config-data/mistral/etc/:/etc/:ro
# NOTE: dprince this requires that we install openstack-tripleo-common into
# the Mistral API image so that we get tripleo* actions
command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate']
@@ -107,9 +109,16 @@ outputs:
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable mistral_api service
+ tags: step2
+ service: name=openstack-mistral-api state=stopped enabled=no
diff --git a/docker/services/mistral-engine.yaml b/docker/services/mistral-engine.yaml
index be4c8af7..01ca3f0a 100644
--- a/docker/services/mistral-engine.yaml
+++ b/docker/services/mistral-engine.yaml
@@ -34,6 +34,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
MistralBase:
type: ../../puppet/services/mistral-engine.yaml
properties:
@@ -49,39 +52,43 @@ outputs:
config_settings:
map_merge:
- get_attr: [MistralBase, role_data, config_settings]
- step_config: {get_attr: [MistralBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [MistralBase, role_data, step_config]
service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &mistral_engine_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ]
- puppet_tags: mistral_config
- config_volume: mistral
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
+ puppet_config:
+ config_volume: mistral
+ puppet_tags: mistral_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/mistral_engine.json:
- command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine
- config_files:
- - dest: /etc/mistral/mistral.conf
- owner: mistral
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ /var/lib/kolla/config_files/mistral_engine.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine
docker_config:
step_4:
mistral_engine:
- image: *mistral_engine_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ]
net: host
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /run:/run
+ - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable mistral_engine service
+ tags: step2
+ service: name=openstack-mistral-engine state=stopped enabled=no
diff --git a/docker/services/mistral-executor.yaml b/docker/services/mistral-executor.yaml
index 33608a42..374b0be7 100644
--- a/docker/services/mistral-executor.yaml
+++ b/docker/services/mistral-executor.yaml
@@ -34,6 +34,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
MistralBase:
type: ../../puppet/services/mistral-executor.yaml
properties:
@@ -49,43 +52,47 @@ outputs:
config_settings:
map_merge:
- get_attr: [MistralBase, role_data, config_settings]
- step_config: {get_attr: [MistralBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [MistralBase, role_data, step_config]
service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &mistral_executor_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ]
- puppet_tags: mistral_config
- config_volume: mistral
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
+ puppet_config:
+ config_volume: mistral
+ puppet_tags: mistral_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/mistral_executor.json:
- command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor
- config_files:
- - dest: /etc/mistral/mistral.conf
- owner: mistral
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ /var/lib/kolla/config_files/mistral_executor.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor
docker_config:
step_4:
mistral_executor:
- image: *mistral_executor_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ]
net: host
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- # FIXME: this is required in order for Nova cells
- # initialization workflows on the Undercloud. Need to
- # exclude this on the overcloud for security reasons.
- - /var/lib/config-data/nova/etc/nova:/etc/nova:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+ - /run:/run
+ # FIXME: this is required in order for Nova cells
+ # initialization workflows on the Undercloud. Need to
+ # exclude this on the overcloud for security reasons.
+ - /var/lib/config-data/nova/etc/nova:/etc/nova:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable mistral_executor service
+ tags: step2
+ service: name=openstack-mistral-executor state=stopped enabled=no
diff --git a/docker/services/neutron-api.yaml b/docker/services/neutron-api.yaml
index dfd1d5c0..00b1f857 100644
--- a/docker/services/neutron-api.yaml
+++ b/docker/services/neutron-api.yaml
@@ -34,6 +34,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NeutronBase:
type: ../../puppet/services/neutron-api.yaml
properties:
@@ -49,35 +52,28 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- step_config: {get_attr: [NeutronBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [NeutronBase, role_data, step_config]
service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &neutron_api_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
- puppet_tags: neutron_config,neutron_api_config
- config_volume: neutron
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ puppet_config:
+ config_volume: neutron
+ puppet_tags: neutron_config,neutron_api_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/neutron_api.json:
- command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
- config_files:
- - dest: /etc/neutron/neutron.conf
- owner: neutron
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
- - dest: /etc/neutron/plugin.ini
- owner: neutron
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini
+ /var/lib/kolla/config_files/neutron_api.json:
+ command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
docker_config:
step_3:
neutron_db_sync:
- image: *neutron_api_image
+ image: &neutron_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
net: host
privileged: false
detach: false
@@ -85,10 +81,13 @@ outputs:
# and run as neutron user
user: root
volumes:
- - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
- - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
+ - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
command: ['neutron-db-manage', 'upgrade', 'heads']
step_4:
neutron_api:
@@ -97,9 +96,16 @@ outputs:
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable neutron_api service
+ tags: step2
+ service: name=neutron-server state=stopped enabled=no
diff --git a/docker/services/neutron-dhcp.yaml b/docker/services/neutron-dhcp.yaml
index fc13b3d1..e48f53b4 100644
--- a/docker/services/neutron-dhcp.yaml
+++ b/docker/services/neutron-dhcp.yaml
@@ -8,7 +8,7 @@ parameters:
description: namespace
default: 'tripleoupstream'
type: string
- DockerNeutronApiImage:
+ DockerNeutronDHCPImage:
description: image
default: 'centos-binary-neutron-dhcp-agent:latest'
type: string
@@ -34,6 +34,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NeutronBase:
type: ../../puppet/services/neutron-dhcp.yaml
properties:
@@ -49,45 +52,45 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- step_config: {get_attr: [NeutronBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [NeutronBase, role_data, step_config]
service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &neutron_dhcp_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
- puppet_tags: neutron_config,neutron_dhcp_agent_config
- config_volume: neutron
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ puppet_config:
+ config_volume: neutron
+ puppet_tags: neutron_config,neutron_dhcp_agent_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/neutron_dhcp.json:
- command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
- config_files:
- - dest: /etc/neutron/neutron.conf
- owner: neutron
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
- - dest: /etc/neutron/dhcp_agent.ini
- owner: neutron
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/neutron/dhcp_agent.ini
+ /var/lib/kolla/config_files/neutron_dhcp.json:
+ command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
docker_config:
step_4:
neutron_dhcp:
- image: *neutron_dhcp_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronDHCPImage} ]
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro
- - /etc/localtime:/etc/localtime:ro
- - /etc/hosts:/etc/hosts:ro
- - /lib/modules:/lib/modules:ro
- - /run/:/run
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run/:/run
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable neutron_dhcp service
+ tags: step2
+ service: name=neutron-dhcp-agent state=stopped enabled=no
diff --git a/docker/services/neutron-l3.yaml b/docker/services/neutron-l3.yaml
index c74ab4fe..90fe65f6 100644
--- a/docker/services/neutron-l3.yaml
+++ b/docker/services/neutron-l3.yaml
@@ -34,6 +34,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NeutronL3Base:
type: ../../puppet/services/neutron-l3.yaml
properties:
@@ -47,42 +50,39 @@ outputs:
value:
service_name: {get_attr: [NeutronL3Base, role_data, service_name]}
config_settings: {get_attr: [NeutronL3Base, role_data, config_settings]}
- step_config: {get_attr: [NeutronL3Base, role_data, step_config]}
- docker_image: &neutron_l3_agent_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ]
- puppet_tags: neutron_config,neutron_l3_agent_config
- config_volume: neutron
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ step_config: &step_config
+ get_attr: [NeutronL3Base, role_data, step_config]
+ puppet_config:
+ puppet_tags: neutron_config,neutron_l3_agent_config
+ config_volume: neutron
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/neutron-l3-agent.json:
- command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
- config_files:
- - dest: /etc/neutron/neutron.conf
- owner: neutron
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
- - dest: /etc/neutron/l3_agent.ini
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/neutron/l3_agent.ini
+ command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
docker_config:
step_4:
neutronl3agent:
- image: *neutron_l3_agent_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ]
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/neutron-ovs-agent.yaml b/docker/services/neutron-ovs-agent.yaml
index ab99da5e..c40ef8bf 100644
--- a/docker/services/neutron-ovs-agent.yaml
+++ b/docker/services/neutron-ovs-agent.yaml
@@ -29,6 +29,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NeutronOvsAgentBase:
type: ../../puppet/services/neutron-ovs-agent.yaml
properties:
@@ -42,30 +45,19 @@ outputs:
value:
service_name: {get_attr: [NeutronOvsAgentBase, role_data, service_name]}
config_settings: {get_attr: [NeutronOvsAgentBase, role_data, config_settings]}
- step_config: {get_attr: [NeutronOvsAgentBase, role_data, step_config]}
- docker_image: &neutron_ovs_agent_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
- puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
- config_volume: neutron
- config_image: *neutron_ovs_agent_image
+ step_config: &step_config
+ get_attr: [NeutronOvsAgentBase, role_data, step_config]
+ puppet_config:
+ config_volume: neutron
+ puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
+ step_config: *step_config
+ config_image: &neutron_ovs_agent_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
kolla_config:
/var/lib/kolla/config_files/neutron-openvswitch-agent.json:
command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
- config_files:
- - dest: /etc/neutron/neutron.conf
- owner: neutron
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
- - dest: /etc/neutron/plugins/ml2/openvswitch_agent.ini
- owner: neutron
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/openvswitch_agent.ini
- - dest: /etc/neutron/plugins/ml2/ml2_conf.ini
- owner: neutron
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini
docker_config:
step_4:
neutronovsagent:
@@ -75,10 +67,18 @@ outputs:
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable neutron_ovs_agent service
+ tags: step2
+ service: name=neutron-openvswitch-agent state=stopped enabled=no
diff --git a/docker/services/neutron-plugin-ml2.yaml b/docker/services/neutron-plugin-ml2.yaml
index 37ab8db2..34864d3a 100644
--- a/docker/services/neutron-plugin-ml2.yaml
+++ b/docker/services/neutron-plugin-ml2.yaml
@@ -44,15 +44,17 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- step_config: {get_attr: [NeutronBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [NeutronBase, role_data, step_config]
service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &docker_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
- puppet_tags: ''
- config_volume: 'neutron'
- config_image: *docker_image
+ puppet_config:
+ config_volume: 'neutron'
+ puppet_tags: ''
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config: {}
docker_config: {}
diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml
index 0cf1b859..8621bb65 100644
--- a/docker/services/nova-api.yaml
+++ b/docker/services/nova-api.yaml
@@ -12,7 +12,7 @@ parameters:
description: image
default: 'centos-binary-nova-api:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
@@ -33,6 +33,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NovaApiBase:
type: ../../puppet/services/nova-api.yaml
properties:
@@ -49,32 +52,32 @@ outputs:
map_merge:
- get_attr: [NovaApiBase, role_data, config_settings]
- apache::default_vhost: false
- step_config: {get_attr: [NovaApiBase, role_data, step_config]}
+ step_config: &step_config
+ list_join:
+ - "\n"
+ - - "['Nova_cell_v2'].each |String $val| { noop_resource($val) }"
+ - {get_attr: [NovaApiBase, role_data, step_config]}
service_config_settings: {get_attr: [NovaApiBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &nova_api_image
- list_join:
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ]
- puppet_tags: nova_config
- config_volume: nova
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/nova_api.json:
- command: /usr/bin/nova-api
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ /var/lib/kolla/config_files/nova_api.json:
+ command: /usr/bin/nova-api
docker_config:
step_3:
nova_api_db_sync:
start_order: 1
- image: *nova_api_image
+ image: &nova_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ]
net: host
detach: false
volumes: &nova_api_volumes
@@ -126,14 +129,18 @@ outputs:
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_5:
nova_api_discover_hosts:
- start_order: 3
+ start_order: 1
image: *nova_api_image
net: host
detach: false
@@ -142,3 +149,7 @@ outputs:
- '/usr/bin/nova-manage'
- 'cell_v2'
- 'discover_hosts'
+ upgrade_tasks:
+ - name: Stop and disable nova_api service
+ tags: step2
+ service: name=openstack-nova-api state=stopped enabled=no
diff --git a/docker/services/nova-compute.yaml b/docker/services/nova-compute.yaml
index 570df95f..c347b113 100644
--- a/docker/services/nova-compute.yaml
+++ b/docker/services/nova-compute.yaml
@@ -29,6 +29,8 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
NovaComputeBase:
type: ../../puppet/services/nova-compute.yaml
@@ -43,26 +45,19 @@ outputs:
value:
service_name: {get_attr: [NovaComputeBase, role_data, service_name]}
config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]}
- step_config: {get_attr: [NovaComputeBase, role_data, step_config]}
- puppet_tags: nova_config,nova_paste_api_ini
- docker_image: &nova_compute_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
- config_volume: nova_libvirt
- config_image: *nova_compute_image
+ step_config: &step_config
+ get_attr: [NovaComputeBase, role_data, step_config]
+ puppet_config:
+ config_volume: nova_libvirt
+ puppet_tags: nova_config,nova_paste_api_ini
+ step_config: *step_config
+ config_image: &nova_compute_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
kolla_config:
/var/lib/kolla/config_files/nova-compute.json:
- command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
- - dest: /etc/nova/rootwrap.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf
+ command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
docker_config:
# FIXME: run discover hosts here
step_4:
@@ -73,14 +68,27 @@ outputs:
user: root
restart: always
volumes:
- - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro
- - /dev:/dev
- - /etc/iscsi:/etc/iscsi
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
- - /var/lib/nova:/var/lib/nova
- - libvirtd:/var/lib/libvirt
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_libvirt/etc/nova/:/etc/nova/:ro
+ - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /var/lib/nova:/var/lib/nova
+ - /var/lib/libvirt:/var/lib/libvirt
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/lib/libvirt
+ file:
+ path: /var/lib/libvirt
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable nova-compute service
+ tags: step2
+ service: name=openstack-nova-compute state=stopped enabled=no
diff --git a/docker/services/nova-conductor.yaml b/docker/services/nova-conductor.yaml
index aa009b4f..e414b216 100644
--- a/docker/services/nova-conductor.yaml
+++ b/docker/services/nova-conductor.yaml
@@ -12,7 +12,7 @@ parameters:
description: image
default: 'centos-binary-nova-conductor:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
@@ -34,6 +34,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NovaConductorBase:
type: ../../puppet/services/nova-conductor.yaml
properties:
@@ -47,39 +50,43 @@ outputs:
value:
service_name: {get_attr: [NovaConductorBase, role_data, service_name]}
config_settings: {get_attr: [NovaConductorBase, role_data, config_settings]}
- step_config: {get_attr: [NovaConductorBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [NovaConductorBase, role_data, step_config]
service_config_settings: {get_attr: [NovaConductorBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &nova_conductor_image
- list_join:
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
- puppet_tags: nova_config
- config_volume: nova
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/nova_conductor.json:
- command: /usr/bin/nova-conductor
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ /var/lib/kolla/config_files/nova_conductor.json:
+ command: /usr/bin/nova-conductor
docker_config:
step_4:
nova_conductor:
- image: *nova_conductor_image
+ image: &nova_conductor_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
net: host
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /run:/run
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable nova_conductor service
+ tags: step2
+ service: name=openstack-nova-conductor state=stopped enabled=no
diff --git a/docker/services/nova-ironic.yaml b/docker/services/nova-ironic.yaml
index c1858ded..2f4da6c0 100644
--- a/docker/services/nova-ironic.yaml
+++ b/docker/services/nova-ironic.yaml
@@ -12,7 +12,7 @@ parameters:
description: image
default: 'centos-binary-nova-compute-ironic:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
@@ -33,6 +33,8 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
NovaIronicBase:
type: ../../puppet/services/nova-ironic.yaml
@@ -45,44 +47,45 @@ outputs:
value:
service_name: {get_attr: [NovaIronicBase, role_data, service_name]}
config_settings: {get_attr: [NovaIronicBase, role_data, config_settings]}
- step_config: {get_attr: [NovaIronicBase, role_data, step_config]}
- puppet_tags: nova_config,nova_paste_api_ini
- docker_image: &nova_ironic_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
- config_volume: nova
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ step_config: &step_config
+ get_attr: [NovaIronicBase, role_data, step_config]
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config,nova_paste_api_ini
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova_ironic.json:
command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
- - dest: /etc/nova/rootwrap.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf
docker_config:
step_5:
novacompute:
- image: *nova_ironic_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
net: host
privileged: true
user: root
restart: always
volumes:
- - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova:/var/lib/kolla/config_files/src:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - /dev:/dev
- - /etc/iscsi:/etc/iscsi
- - nova_compute:/var/lib/nova/
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /run:/run
+ - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
+ - /var/lib/nova/:/var/lib/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable nova-compute service
+ tags: step2
+ service: name=openstack-nova-compute state=stopped enabled=no
diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml
index d6e7dc76..ba637605 100644
--- a/docker/services/nova-libvirt.yaml
+++ b/docker/services/nova-libvirt.yaml
@@ -14,7 +14,7 @@ parameters:
type: string
# we configure libvirt via the nova-compute container due to coupling
# in the puppet modules
- DockerNovaComputeImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-compute:latest'
type: string
@@ -35,6 +35,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NovaLibvirtBase:
type: ../../puppet/services/nova-libvirt.yaml
properties:
@@ -48,45 +51,58 @@ outputs:
value:
service_name: {get_attr: [NovaLibvirtBase, role_data, service_name]}
config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]}
- step_config: {get_attr: [NovaLibvirtBase, role_data, step_config]}
- docker_image: &libvirt_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
- puppet_tags: nova_config
- config_volume: nova_libvirt
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ step_config: &step_config
+ get_attr: [NovaLibvirtBase, role_data, step_config]
+ puppet_config:
+ config_volume: nova_libvirt
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova-libvirt.json:
- command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
- config_files:
- - dest: /etc/libvirt/libvirtd.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/libvirt/libvirtd.conf
+ command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
docker_config:
step_3:
nova_libvirt:
- image: *libvirt_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro
- - /dev:/dev
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
- - /sys/fs/cgroup:/sys/fs/cgroup
- - /var/lib/nova:/var/lib/nova
- # Needed to use host's virtlogd
- - /var/run/libvirt:/var/run/libvirt
- - libvirtd:/var/lib/libvirt
- - nova_libvirt_qemu:/etc/libvirt/qemu
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_libvirt/etc/libvirt/:/etc/libvirt/:ro
+ - /lib/modules:/lib/modules:ro
+ - /dev:/dev
+ - /run:/run
+ - /sys/fs/cgroup:/sys/fs/cgroup
+ - /var/lib/nova:/var/lib/nova
+ # Needed to use host's virtlogd
+ - /var/run/libvirt:/var/run/libvirt
+ - /var/lib/libvirt:/var/lib/libvirt
+ - /etc/libvirt/qemu:/etc/libvirt/qemu
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create libvirt persistent data directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /etc/libvirt/qemu
+ - /var/lib/libvirt
+ upgrade_tasks:
+ - name: Stop and disable libvirtd service
+ tags: step2
+ service: name=libvirtd state=stopped enabled=no
diff --git a/docker/services/nova-metadata.yaml b/docker/services/nova-metadata.yaml
index a4baaa27..b452c61b 100644
--- a/docker/services/nova-metadata.yaml
+++ b/docker/services/nova-metadata.yaml
@@ -37,12 +37,14 @@ outputs:
config_settings:
map_merge:
- get_attr: [NovaMetadataBase, role_data, config_settings]
- step_config: {get_attr: [NovaMetadataBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [NovaMetadataBase, role_data, step_config]
service_config_settings: {get_attr: [NovaMetadataBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: ''
- puppet_tags: ''
- config_volume: ''
- config_image: ''
+ puppet_config:
+ config_volume: ''
+ puppet_tags: ''
+ step_config: *step_config
+ config_image: ''
kolla_config: {}
docker_config: {}
diff --git a/docker/services/nova-placement.yaml b/docker/services/nova-placement.yaml
index f0f7d724..53460a83 100644
--- a/docker/services/nova-placement.yaml
+++ b/docker/services/nova-placement.yaml
@@ -29,6 +29,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NovaPlacementBase:
type: ../../puppet/services/nova-placement.yaml
properties:
@@ -45,43 +48,21 @@ outputs:
map_merge:
- get_attr: [NovaPlacementBase, role_data, config_settings]
- apache::default_vhost: false
- step_config: {get_attr: [NovaPlacementBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [NovaPlacementBase, role_data, step_config]
service_config_settings: {get_attr: [NovaPlacementBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &nova_placement_image
- list_join:
+ puppet_config:
+ config_volume: nova_placement
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image: &nova_placement_image
+ list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
- puppet_tags: nova_config
- config_volume: nova_placement
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
kolla_config:
- /var/lib/kolla/config_files/nova_placement.json:
- command: /usr/sbin/httpd -DFOREGROUND
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
- - dest: /etc/httpd/conf.d/10-placement_wsgi.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-placement_wsgi.conf
- - dest: /etc/httpd/conf/httpd.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
- - dest: /etc/httpd/conf/ports.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
- - dest: /var/www/cgi-bin/nova/nova-placement-api
- owner: nova
- perm: '0644'
- source: /var/lib/kolla/config_files/src/var/www/cgi-bin/nova/nova-placement-api
+ /var/lib/kolla/config_files/nova_placement.json:
+ command: /usr/sbin/httpd -DFOREGROUND
docker_config:
# start this early so it is up before computes start reporting
step_3:
@@ -92,10 +73,18 @@ outputs:
user: root
restart: always
volumes:
- - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova_placement/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/nova_placement/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_placement/etc/nova/:/etc/nova/:ro
+ - /var/lib/config-data/nova_placement/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/nova_placement/var/www/:/var/www/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable nova_placement service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped enabled=no
diff --git a/docker/services/nova-scheduler.yaml b/docker/services/nova-scheduler.yaml
index a1a98b48..54f30abd 100644
--- a/docker/services/nova-scheduler.yaml
+++ b/docker/services/nova-scheduler.yaml
@@ -12,7 +12,7 @@ parameters:
description: image
default: 'centos-binary-nova-scheduler:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
@@ -33,6 +33,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NovaSchedulerBase:
type: ../../puppet/services/nova-scheduler.yaml
properties:
@@ -46,39 +49,43 @@ outputs:
value:
service_name: {get_attr: [NovaSchedulerBase, role_data, service_name]}
config_settings: {get_attr: [NovaSchedulerBase, role_data, config_settings]}
- step_config: {get_attr: [NovaSchedulerBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [NovaSchedulerBase, role_data, step_config]
service_config_settings: {get_attr: [NovaSchedulerBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &nova_scheduler_image
- list_join:
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ]
- puppet_tags: nova_config
- config_volume: nova
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/nova_scheduler.json:
- command: /usr/bin/nova-scheduler
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ /var/lib/kolla/config_files/nova_scheduler.json:
+ command: /usr/bin/nova-scheduler
docker_config:
step_4:
nova_scheduler:
- image: *nova_scheduler_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ]
net: host
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /run:/run
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable nova_scheduler service
+ tags: step2
+ service: name=openstack-nova-scheduler state=stopped enabled=no
diff --git a/docker/services/panko-api.yaml b/docker/services/panko-api.yaml
new file mode 100644
index 00000000..61bdf7ac
--- /dev/null
+++ b/docker/services/panko-api.yaml
@@ -0,0 +1,108 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Panko service configured with docker
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerPankoApiImage:
+ description: image
+ default: 'centos-binary-panko-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ PankoApiPuppetBase:
+ type: ../../puppet/services/panko-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Panko API role.
+ value:
+ service_name: {get_attr: [PankoApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [PankoApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [PankoApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [PankoApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: panko
+ puppet_tags: panko_api_paste_ini,panko_config
+ step_config: *step_config
+ config_image: &panko_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/panko-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ docker_config:
+ step_3:
+ panko-init-log:
+ start_order: 0
+ image: *panko_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/panko && chown panko:panko /var/log/panko']
+ volumes:
+ - logs:/var/log
+ panko_db_sync:
+ start_order: 1
+ image: *panko_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/config-data/panko/etc/panko:/etc/panko:ro
+ - logs:/var/log
+ command: /usr/bin/panko-dbsync
+ step_4:
+ panko_api:
+ start_order: 2
+ image: *panko_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/panko-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/panko/etc/panko/:/etc/panko/:ro
+ - /var/lib/config-data/panko/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/panko/var/www/:/var/www/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/rabbitmq.yaml b/docker/services/rabbitmq.yaml
index cea3d8a7..a04893e4 100644
--- a/docker/services/rabbitmq.yaml
+++ b/docker/services/rabbitmq.yaml
@@ -33,6 +33,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
RabbitmqBase:
type: ../../puppet/services/rabbitmq.yaml
properties:
@@ -46,36 +49,21 @@ outputs:
value:
service_name: {get_attr: [RabbitmqBase, role_data, service_name]}
config_settings: {get_attr: [RabbitmqBase, role_data, config_settings]}
- step_config: {get_attr: [RabbitmqBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [RabbitmqBase, role_data, step_config]
service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &rabbitmq_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
- puppet_tags: file
- config_volume: rabbitmq
- config_image: *rabbitmq_image
+ puppet_config:
+ config_volume: rabbitmq
+ puppet_tags: file
+ step_config: *step_config
+ config_image: &rabbitmq_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
kolla_config:
/var/lib/kolla/config_files/rabbitmq.json:
command: /usr/lib/rabbitmq/bin/rabbitmq-server
- config_files:
- - dest: /etc/rabbitmq/rabbitmq.config
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq.config
- - dest: /etc/rabbitmq/enabled_plugins
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/rabbitmq/enabled_plugins
- - dest: /etc/rabbitmq/rabbitmq-env.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq-env.conf
- - dest: /etc/rabbitmq/rabbitmqadmin.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmqadmin.conf
docker_config:
step_1:
rabbitmq_bootstrap:
@@ -84,11 +72,14 @@ outputs:
net: host
privileged: false
volumes:
- - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - rabbitmq:/var/lib/rabbitmq/
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- KOLLA_BOOTSTRAP=True
@@ -110,10 +101,22 @@ outputs:
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - rabbitmq:/var/lib/rabbitmq/
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/lib/rabbitmq
+ file:
+ path: /var/lib/rabbitmq
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable rabbitmq service
+ tags: step2
+ service: name=rabbitmq-server state=stopped enabled=no
diff --git a/docker/services/services.yaml b/docker/services/services.yaml
index cd9f4cb5..21387c9b 100644
--- a/docker/services/services.yaml
+++ b/docker/services/services.yaml
@@ -67,13 +67,27 @@ outputs:
{get_attr: [PuppetServices, role_data, global_config_settings]}
step_config:
{get_attr: [ServiceChain, role_data, step_config]}
- docker_image: {get_attr: [ServiceChain, role_data, docker_image]}
- puppet_tags: {get_attr: [ServiceChain, role_data, puppet_tags]}
- config_volume: {get_attr: [ServiceChain, role_data, config_volume]}
- config_image: {get_attr: [ServiceChain, role_data, config_image]}
+ puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
kolla_config:
map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
docker_config:
{get_attr: [ServiceChain, role_data, docker_config]}
docker_puppet_tasks:
{get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
+ host_prep_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks
+ expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ upgrade_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ upgrade_batch_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ service_metadata_settings:
+ get_attr: [PuppetServices, role_data, service_metadata_settings]
diff --git a/docker/services/swift-proxy.yaml b/docker/services/swift-proxy.yaml
index 09553319..6e8d6eb9 100644
--- a/docker/services/swift-proxy.yaml
+++ b/docker/services/swift-proxy.yaml
@@ -29,6 +29,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
SwiftProxyBase:
type: ../../puppet/services/swift-proxy.yaml
properties:
@@ -42,16 +45,18 @@ outputs:
value:
service_name: {get_attr: [SwiftProxyBase, role_data, service_name]}
config_settings: {get_attr: [SwiftProxyBase, role_data, config_settings]}
- step_config: {get_attr: [SwiftProxyBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [SwiftProxyBase, role_data, step_config]
service_config_settings: {get_attr: [SwiftProxyBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &swift_proxy_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
- puppet_tags: swift_proxy_config
- config_volume: swift
- config_image: *swift_proxy_image
+ puppet_config:
+ config_volume: swift
+ puppet_tags: swift_proxy_config
+ step_config: *step_config
+ config_image: &swift_proxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
kolla_config:
/var/lib/kolla/config_files/swift_proxy.json:
command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
@@ -62,15 +67,27 @@ outputs:
net: host
user: swift
restart: always
- # I'm mounting /etc/swift as rw. Are the rings written to at all during runtime?
volumes:
- - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ # FIXME I'm mounting /etc/swift as rw. Are the rings written to
+ # at all during runtime?
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /srv/node
+ file:
+ path: /srv/node
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable swift_proxy service
+ tags: step2
+ service: name=openstack-swift-proxy state=stopped enabled=no
diff --git a/docker/services/swift-ringbuilder.yaml b/docker/services/swift-ringbuilder.yaml
index de91e7cf..21102505 100644
--- a/docker/services/swift-ringbuilder.yaml
+++ b/docker/services/swift-ringbuilder.yaml
@@ -66,15 +66,17 @@ outputs:
value:
service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]}
config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
- step_config: {get_attr: [SwiftRingbuilderBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [SwiftRingbuilderBase, role_data, step_config]
service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
- puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
# BEGIN DOCKER SETTINGS
- docker_image: &docker_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
- config_volume: 'swift'
- config_image: *docker_image
+ puppet_config:
+ config_volume: 'swift'
+ puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
kolla_config: {}
docker_config: {}
diff --git a/docker/services/swift-storage.yaml b/docker/services/swift-storage.yaml
index 5b2ec6e6..5044c54c 100644
--- a/docker/services/swift-storage.yaml
+++ b/docker/services/swift-storage.yaml
@@ -41,6 +41,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
SwiftStorageBase:
type: ../../puppet/services/swift-storage.yaml
properties:
@@ -54,16 +57,18 @@ outputs:
value:
service_name: {get_attr: [SwiftStorageBase, role_data, service_name]}
config_settings: {get_attr: [SwiftStorageBase, role_data, config_settings]}
- step_config: {get_attr: [SwiftStorageBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [SwiftStorageBase, role_data, step_config]
service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &swift_proxy_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
- puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
- config_volume: swift
- config_image: *swift_proxy_image
+ puppet_config:
+ config_volume: swift
+ puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
+ step_config: *step_config
+ config_image: &swift_proxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
kolla_config:
/var/lib/kolla/config_files/swift_account_auditor.json:
command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf
@@ -97,90 +102,86 @@ outputs:
# volume during the configuration stage. We just need to create this
# directory and make sure it's owned by swift.
swift_setup_srv:
- image:
+ image: &swift_account_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
user: root
- command: ['/bin/bash', '-c', 'mkdir /srv/node && chown swift:swift /srv/node']
+ command: ['chown', '-R', 'swift:', '/srv/node']
volumes:
- - swift-srv:/srv
+ - /srv/node:/srv/node
step_4:
swift_account_auditor:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: &kolla_env
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
swift_account_reaper:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_account_replicator:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_account_server:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_container_auditor:
- image:
+ image: &swift_container_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
@@ -188,71 +189,70 @@ outputs:
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_container_replicator:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ image: *swift_container_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_container_updater:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ image: *swift_container_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_container_server:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ image: *swift_container_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_object_auditor:
- image:
+ image: &swift_object_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
@@ -260,84 +260,104 @@ outputs:
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_object_expirer:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ image: *swift_proxy_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_object_replicator:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ image: *swift_object_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_object_updater:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ image: *swift_object_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
swift_object_server:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ image: *swift_object_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
environment: *kolla_env
+ host_prep_tasks:
+ - name: create /srv/node
+ file:
+ path: /srv/node
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable swift storage services
+ tags: step2
+ service: name={{ item }} state=stopped enabled=no
+ with_items:
+ - openstack-swift-account-auditor
+ - openstack-swift-account-reaper
+ - openstack-swift-account-replicator
+ - openstack-swift-account
+ - openstack-swift-container-auditor
+ - openstack-swift-container-replicator
+ - openstack-swift-container-updater
+ - openstack-swift-container
+ - openstack-swift-object-auditor
+ - openstack-swift-object-replicator
+ - openstack-swift-object-updater
+ - openstack-swift-object
diff --git a/docker/services/zaqar.yaml b/docker/services/zaqar.yaml
index 9f248ce1..fdb353bc 100644
--- a/docker/services/zaqar.yaml
+++ b/docker/services/zaqar.yaml
@@ -29,6 +29,9 @@ parameters:
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
ZaqarBase:
type: ../../puppet/services/zaqar.yaml
properties:
@@ -42,35 +45,23 @@ outputs:
value:
service_name: {get_attr: [ZaqarBase, role_data, service_name]}
config_settings: {get_attr: [ZaqarBase, role_data, config_settings]}
- step_config: {get_attr: [ZaqarBase, role_data, step_config]}
+ step_config: &step_config
+ get_attr: [ZaqarBase, role_data, step_config]
service_config_settings: {get_attr: [ZaqarBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &zaqar_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
- puppet_tags: zaqar_config
- config_volume: zaqar
- config_image: *zaqar_image
+ puppet_config:
+ config_volume: zaqar
+ puppet_tags: zaqar_config
+ step_config: *step_config
+ config_image: &zaqar_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
kolla_config:
/var/lib/kolla/config_files/zaqar.json:
command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf
- config_files:
- - dest: /etc/zaqar/zaqar.conf
- owner: zaqar
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf
/var/lib/kolla/config_files/zaqar_websocket.json:
command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf --config-file /etc/zaqar/1.conf
- config_files:
- - dest: /etc/zaqar/zaqar.conf
- owner: zaqar
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf
- - dest: /etc/zaqar/1.conf
- owner: zaqar
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/zaqar/1.conf
docker_config:
step_4:
zaqar:
@@ -79,10 +70,13 @@ outputs:
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
zaqar_websocket:
@@ -91,9 +85,17 @@ outputs:
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ yaql:
+ expression: $.data.common.concat($.data.service)
+ data:
+ common: {get_attr: [ContainersCommon, volumes]}
+ service:
+ - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable zaqar service
+ tags: step2
+ service: name=openstack-zaqar.service state=stopped enabled=no
+
diff --git a/environments/cadf.yaml b/environments/cadf.yaml
new file mode 100644
index 00000000..af5c7fdf
--- /dev/null
+++ b/environments/cadf.yaml
@@ -0,0 +1,2 @@
+parameter_defaults:
+ KeystoneNotificationFormat: cadf
diff --git a/environments/cinder-netapp-config.yaml b/environments/cinder-netapp-config.yaml
index b9a84342..dfd15893 100644
--- a/environments/cinder-netapp-config.yaml
+++ b/environments/cinder-netapp-config.yaml
@@ -1,7 +1,7 @@
# A Heat environment file which can be used to enable a
# a Cinder NetApp backend, configured via puppet
resource_registry:
- OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+ OS::TripleO::Services::CinderBackendNetApp: ../puppet/services/cinder-backend-netapp.yaml
parameter_defaults:
CinderEnableNetappBackend: true
diff --git a/environments/contrail/contrail-net.yaml b/environments/contrail/contrail-net.yaml
index 1e64f91d..cca9beac 100644
--- a/environments/contrail/contrail-net.yaml
+++ b/environments/contrail/contrail-net.yaml
@@ -8,7 +8,7 @@ resource_registry:
parameter_defaults:
ControlPlaneSubnetCidr: '24'
- ControlPlaneDefaultRoute: 192.0.2.254
+ ControlPlaneDefaultRoute: 192.168.24.254
InternalApiNetCidr: 10.0.0.0/24
InternalApiAllocationPools: [{'start': '10.0.0.10', 'end': '10.0.0.200'}]
InternalApiDefaultRoute: 10.0.0.1
@@ -17,7 +17,7 @@ parameter_defaults:
ManagementInterfaceDefaultRoute: 10.1.0.1
ExternalNetCidr: 10.2.0.0/24
ExternalAllocationPools: [{'start': '10.2.0.10', 'end': '10.2.0.200'}]
- EC2MetadataIp: 192.0.2.1 # Generally the IP of the Undercloud
+ EC2MetadataIp: 192.168.24.1 # Generally the IP of the Undercloud
DnsServers: ["8.8.8.8","8.8.4.4"]
VrouterPhysicalInterface: eth1
VrouterGateway: 10.0.0.1
diff --git a/environments/contrail/roles_data_contrail.yaml b/environments/contrail/roles_data_contrail.yaml
index 5f6c4691..d6d6f291 100644
--- a/environments/contrail/roles_data_contrail.yaml
+++ b/environments/contrail/roles_data_contrail.yaml
@@ -29,6 +29,7 @@
CountDefault: 1
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
@@ -115,6 +116,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
@@ -140,6 +142,7 @@
- name: BlockStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -156,6 +159,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
@@ -173,6 +177,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -188,6 +193,7 @@
- name: ContrailController
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailConfig
- OS::TripleO::Services::ContrailControl
- OS::TripleO::Services::ContrailDatabase
@@ -203,6 +209,7 @@
- name: ContrailAnalytics
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailAnalytics
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -215,6 +222,7 @@
- name: ContrailAnalyticsDatabase
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailAnalyticsDatabase
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -227,6 +235,7 @@
- name: ContrailTsn
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailTsn
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
diff --git a/environments/deployed-server-environment.j2.yaml b/environments/deployed-server-environment.j2.yaml
new file mode 100644
index 00000000..327934da
--- /dev/null
+++ b/environments/deployed-server-environment.j2.yaml
@@ -0,0 +1,11 @@
+resource_registry:
+ OS::TripleO::Server: ../deployed-server/deployed-server.yaml
+ OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
+ OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
+
+{% for role in roles %}
+ # Default nic config mappings
+ OS::TripleO::{{role.name}}::Net::SoftwareConfig: ../net-config-static.yaml
+{% endfor %}
+
+ OS::TripleO::ControllerDeployedServer::Net::SoftwareConfig: ../net-config-static-bridge.yaml
diff --git a/environments/deployed-server-environment.yaml b/environments/deployed-server-environment.yaml
deleted file mode 100644
index 7bc1bd9b..00000000
--- a/environments/deployed-server-environment.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-resource_registry:
- OS::TripleO::Server: ../deployed-server/deployed-server.yaml
- OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
- OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
diff --git a/environments/docker-services-tls-everywhere.yaml b/environments/docker-services-tls-everywhere.yaml
new file mode 100644
index 00000000..ec39951b
--- /dev/null
+++ b/environments/docker-services-tls-everywhere.yaml
@@ -0,0 +1,28 @@
+# This environment contains the services that can work with TLS-everywhere.
+resource_registry:
+ # This can be used when you don't want to run puppet on the host,
+ # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
+ # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+ OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+ # The compute node still needs extra initialization steps
+ OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+
+ # NOTE: add roles to be docker enabled as we support them.
+ OS::TripleO::Services::Keystone: ../docker/services/keystone.yaml
+
+ OS::TripleO::PostDeploySteps: ../docker/post.yaml
+ OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
+
+ OS::TripleO::Services: ../docker/services/services.yaml
+
+parameter_defaults:
+ # Defaults to 'tripleoupstream'. Specify a local docker registry
+ # Example: 192.168.24.1:8787/tripleoupstream
+ DockerNamespace: tripleoupstream
+ DockerNamespaceIsRegistry: false
+
+ ComputeServices:
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::Docker
diff --git a/environments/docker.yaml b/environments/docker.yaml
index cb13c5c3..755e94c2 100644
--- a/environments/docker.yaml
+++ b/environments/docker.yaml
@@ -1,5 +1,10 @@
resource_registry:
- OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+ # This can be used when you don't want to run puppet on the host,
+ # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
+ # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+ OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+ # The compute node still needs extra initialization steps
+ OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
#NOTE (dprince) add roles to be docker enabled as we support them
OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
@@ -14,11 +19,6 @@ resource_registry:
OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml
OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml
OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml
- # FIXME: these need to go into a environments/services-docker dir?
- OS::TripleO::Services::NovaIronic: ../docker/services/nova-ironic.yaml
- OS::TripleO::Services::IronicApi: ../docker/services/ironic-api.yaml
- OS::TripleO::Services::IronicConductor: ../docker/services/ironic-conductor.yaml
- OS::TripleO::Services::IronicPxe: ../docker/services/ironic-pxe.yaml
OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
@@ -26,18 +26,24 @@ resource_registry:
OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
- OS::TripleO::Services::MistralApi: ../docker/services/mistral-api.yaml
- OS::TripleO::Services::MistralEngine: ../docker/services/mistral-engine.yaml
- OS::TripleO::Services::MistralExecutor: ../docker/services/mistral-executor.yaml
- OS::TripleO::Services::Zaqar: ../docker/services/zaqar.yaml
OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml
OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml
OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml
+ OS::TripleO::Services::GnocchiApi: ../docker/services/gnocchi-api.yaml
+ OS::TripleO::Services::GnocchiMetricd: ../docker/services/gnocchi-metricd.yaml
+ OS::TripleO::Services::GnocchiStatsd: ../docker/services/gnocchi-statsd.yaml
+ OS::TripleO::Services::AodhApi: ../docker/services/aodh-api.yaml
+ OS::TripleO::Services::AodhEvaluator: ../docker/services/aodh-evaluator.yaml
+ OS::TripleO::Services::AodhNotifier: ../docker/services/aodh-notifier.yaml
+ OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
+ OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
OS::TripleO::PostDeploySteps: ../docker/post.yaml
+ OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
+
OS::TripleO::Services: ../docker/services/services.yaml
parameter_defaults:
@@ -50,3 +56,4 @@ parameter_defaults:
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::Docker
diff --git a/environments/enable-internal-tls.yaml b/environments/enable-internal-tls.yaml
index ff4ecfbe..2fdecb4f 100644
--- a/environments/enable-internal-tls.yaml
+++ b/environments/enable-internal-tls.yaml
@@ -2,15 +2,17 @@
# a TLS for in the internal network via certmonger
parameter_defaults:
EnableInternalTLS: true
+ RabbitClientUseSSL: true
# Required for novajoin to enroll the overcloud nodes
ServerMetadata:
ipa_enroll: True
resource_registry:
+ OS::TripleO::Services::CertmongerUser: ../puppet/services/certmonger-user.yaml
+
OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml
- OS::TripleO::Services::ApacheTLS: ../puppet/services/apache-internal-tls-certmonger.yaml
- OS::TripleO::Services::MySQLTLS: ../puppet/services/database/mysql-internal-tls-certmonger.yaml
+
# We use apache as a TLS proxy
OS::TripleO::Services::TLSProxyBase: ../puppet/services/apache.yaml
diff --git a/environments/external-loadbalancer-vip-v6.yaml b/environments/external-loadbalancer-vip-v6.yaml
index fbd1fb98..c8375fc7 100644
--- a/environments/external-loadbalancer-vip-v6.yaml
+++ b/environments/external-loadbalancer-vip-v6.yaml
@@ -1,4 +1,9 @@
resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip_v6.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool_v6.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool_v6.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool_v6.yaml
@@ -13,7 +18,7 @@ parameter_defaults:
# to control your VIPs (currently one per network)
# NOTE: we will eventually move to one VIP per service
#
- ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+ ControlFixedIPs: [{'ip_address':'192.168.24.251'}]
PublicVirtualFixedIPs: [{'ip_address':'2001:db8:fd00:1000:0000:0000:0000:0005'}]
InternalApiVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0005'}]
StorageVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:3000:0000:0000:0000:0005'}]
diff --git a/environments/external-loadbalancer-vip.yaml b/environments/external-loadbalancer-vip.yaml
index 1759c04c..33f145d9 100644
--- a/environments/external-loadbalancer-vip.yaml
+++ b/environments/external-loadbalancer-vip.yaml
@@ -1,4 +1,9 @@
resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
@@ -12,7 +17,7 @@ parameter_defaults:
# to control your VIPs (currently one per network)
# NOTE: we will eventually move to one VIP per service
#
- ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+ ControlFixedIPs: [{'ip_address':'192.168.24.251'}]
PublicVirtualFixedIPs: [{'ip_address':'10.0.0.251'}]
InternalApiVirtualFixedIPs: [{'ip_address':'172.16.2.251'}]
StorageVirtualFixedIPs: [{'ip_address':'172.16.1.251'}]
diff --git a/environments/fixed-ip-vips-v6.yaml b/environments/fixed-ip-vips-v6.yaml
new file mode 100644
index 00000000..c288d7b0
--- /dev/null
+++ b/environments/fixed-ip-vips-v6.yaml
@@ -0,0 +1,21 @@
+# This template allows the IPs to be preselected for each VIP. Note that
+# this template should be included after other templates which affect the
+# network such as network-isolation.yaml.
+
+resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+parameter_defaults:
+ # Set the IP addresses of the VIPs here.
+ # NOTE: we will eventually move to one VIP per service
+ #
+ ControlFixedIPs: [{'ip_address':'192.168.24.240'}]
+ PublicVirtualFixedIps: [{'ip_address':'2001:db8:fd00:1000:0000:0000:0000:0005'}]
+ InternalApiVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0005'}]
+ StorageVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:3000:0000:0000:0000:000'}]
+ StorageMgmtVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:4000:0000:0000:0000:0005'}]
+ RedisVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0006'}]
diff --git a/environments/fixed-ip-vips.yaml b/environments/fixed-ip-vips.yaml
new file mode 100644
index 00000000..3860f41d
--- /dev/null
+++ b/environments/fixed-ip-vips.yaml
@@ -0,0 +1,21 @@
+# This template allows the IPs to be preselected for each VIP. Note that
+# this template should be included after other templates which affect the
+# network such as network-isolation.yaml.
+
+resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+parameter_defaults:
+ # Set the IP addresses of the VIPs here.
+ # NOTE: we will eventually move to one VIP per service
+ #
+ ControlFixedIPs: [{'ip_address':'192.168.24.240'}]
+ PublicVirtualFixedIps: [{'ip_address':'10.0.0.240'}]
+ InternalApiVirtualFixedIPs: [{'ip_address':'172.16.2.240'}]
+ StorageVirtualFixedIPs: [{'ip_address':'172.16.1.240'}]
+ StorageMgmtVirtualFixedIPs: [{'ip_address':'172.16.3.240'}]
+ RedisVirtualFixedIPs: [{'ip_address':'172.16.2.241'}]
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml
index f59b0414..f1c90e2d 100644
--- a/environments/hyperconverged-ceph.yaml
+++ b/environments/hyperconverged-ceph.yaml
@@ -6,12 +6,14 @@ resource_registry:
parameter_defaults:
ComputeServices:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Kernel
@@ -31,3 +33,4 @@ parameter_defaults:
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Vpp
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Docker
diff --git a/environments/logging-environment.yaml b/environments/logging-environment.yaml
index c583ca79..ae8bd7b9 100644
--- a/environments/logging-environment.yaml
+++ b/environments/logging-environment.yaml
@@ -18,7 +18,7 @@ resource_registry:
## (note the use of port 24284 for ssl connections)
#
# LoggingServers:
-# - host: 192.0.2.11
+# - host: 192.168.24.11
# port: 24284
# LoggingUsesSSL: true
# LoggingSharedKey: secret
diff --git a/environments/major-upgrade-all-in-one.yaml b/environments/major-upgrade-all-in-one.yaml
deleted file mode 100644
index 4283b212..00000000
--- a/environments/major-upgrade-all-in-one.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-resource_registry:
- OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
diff --git a/environments/major-upgrade-composable-steps-docker.yaml b/environments/major-upgrade-composable-steps-docker.yaml
new file mode 100644
index 00000000..40da726d
--- /dev/null
+++ b/environments/major-upgrade-composable-steps-docker.yaml
@@ -0,0 +1,11 @@
+resource_registry:
+ # FIXME(shardy) do we need to break major_upgrade_steps.yaml apart to
+ # enable docker specific logic, or is just overridding PostUpgradeSteps
+ # enough (as we want to share the ansible tasks steps etc)
+ OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
+parameter_defaults:
+ EnableConfigPurge: false
+ UpgradeLevelNovaCompute: auto
+ UpgradeInitCommonCommand: |
+ #!/bin/bash
+ # Ocata to Pike, put any needed host-level workarounds here
diff --git a/environments/major-upgrade-composable-steps.yaml b/environments/major-upgrade-composable-steps.yaml
index 9ecc2251..8b1617f9 100644
--- a/environments/major-upgrade-composable-steps.yaml
+++ b/environments/major-upgrade-composable-steps.yaml
@@ -1,13 +1,13 @@
resource_registry:
OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
parameter_defaults:
+ EnableConfigPurge: true
UpgradeLevelNovaCompute: auto
UpgradeInitCommonCommand: |
#!/bin/bash
# Newton to Ocata, we need to remove old hiera hook data and
# install ansible heat agents and ansible-pacemaker
set -eu
- yum install -y openstack-heat-agents
yum install -y python-heat-agent-*
yum install -y ansible-pacemaker
rm -f /usr/libexec/os-apply-config/templates/etc/puppet/hiera.yaml
diff --git a/environments/major-upgrade-converge-docker.yaml b/environments/major-upgrade-converge-docker.yaml
new file mode 100644
index 00000000..a3816b50
--- /dev/null
+++ b/environments/major-upgrade-converge-docker.yaml
@@ -0,0 +1,8 @@
+# Use this to reset any mappings only used for upgrades after the
+# update of all nodes is completed
+resource_registry:
+ OS::TripleO::PostDeploySteps: ../docker/post.yaml
+parameter_defaults:
+ EnableConfigPurge: false
+ UpgradeLevelNovaCompute: ''
+ UpgradeInitCommonCommand: ''
diff --git a/environments/major-upgrade-converge.yaml b/environments/major-upgrade-converge.yaml
index f09fb20e..4e8bf46b 100644
--- a/environments/major-upgrade-converge.yaml
+++ b/environments/major-upgrade-converge.yaml
@@ -3,5 +3,6 @@
resource_registry:
OS::TripleO::PostDeploySteps: ../puppet/post.yaml
parameter_defaults:
+ EnableConfigPurge: false
UpgradeLevelNovaCompute: ''
UpgradeInitCommonCommand: ''
diff --git a/environments/network-environment.yaml b/environments/network-environment.yaml
index 210b6b03..3de5dba5 100644
--- a/environments/network-environment.yaml
+++ b/environments/network-environment.yaml
@@ -18,8 +18,8 @@ parameter_defaults:
# CIDR subnet mask length for provisioning network
ControlPlaneSubnetCidr: '24'
# Gateway router for the provisioning network (or Undercloud IP)
- ControlPlaneDefaultRoute: 192.0.2.254
- EC2MetadataIp: 192.0.2.1 # Generally the IP of the Undercloud
+ ControlPlaneDefaultRoute: 192.168.24.254
+ EC2MetadataIp: 192.168.24.1 # Generally the IP of the Undercloud
# Customize the IP subnets to match the local environment
InternalApiNetCidr: 172.17.0.0/24
StorageNetCidr: 172.18.0.0/24
diff --git a/environments/neutron-bgpvpn.yaml b/environments/neutron-bgpvpn.yaml
new file mode 100644
index 00000000..2a632480
--- /dev/null
+++ b/environments/neutron-bgpvpn.yaml
@@ -0,0 +1,16 @@
+# A Heat environment file that can be used to deploy Neutron BGPVPN service
+#
+# Currently there are four types of service provider for Neutron BGPVPN
+# The default option is a dummy driver that allows to enable the API.
+# In order to enable other backend, replace the content of BgpvpnServiceProvider
+#
+# - Bagpipe: BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.service_drivers.bagpipe.bagpipe.BaGPipeBGPVPNDriver:default
+# - OpenContrail: BGPVPN:OpenContrail:networking_bgpvpn.neutron.services.service_drivers.opencontrail.opencontrail.OpenContrailBGPVPNDriver:default
+# - OpenDaylight: BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default
+# - Nuage: BGPVPN:Nuage:nuage_neutron.bgpvpn.services.service_drivers.driver.NuageBGPVPNDriver:default
+resource_registry:
+ OS::TripleO::Services::NeutronBgpVpnApi: ../puppet/services/neutron-bgpvpn-api.yaml
+
+parameter_defaults:
+ NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+ BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
diff --git a/environments/neutron-l2gw-api.yaml b/environments/neutron-l2gw-api.yaml
new file mode 100644
index 00000000..09894671
--- /dev/null
+++ b/environments/neutron-l2gw-api.yaml
@@ -0,0 +1,20 @@
+# A Heat environment file that can be used to deploy Neutron L2 Gateway service
+#
+# Currently there are only two service provider for Neutron L2 Gateway
+# The default option is a dummy driver that allows to enable the API.
+# In order to enable other backend, replace the content of L2gwServiceProvider
+#
+# - L2 gateway agent: L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.rpc_l2gw.L2gwRpcDriver:default
+# - OpenDaylight: L2GW:OpenDaylight:networking_odl.l2gateway.driver.OpenDaylightL2gwDriver:default
+resource_registry:
+ OS::TripleO::Services::NeutronL2gwApi: ../puppet/services/neutron-l2gw-api.yaml
+
+parameter_defaults:
+ NeutronServicePlugins: "networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin"
+ L2gwServiceProvider: ["L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.L2gwDriver:default"]
+
+ # Optional
+ # L2gwServiceDefaultInterfaceName:
+ # L2gwServiceDefaultDeviceName:
+ # L2gwServiceQuotaL2Gateway:
+ # L2gwServicePeriodicMonitoringInterval:
diff --git a/environments/neutron-ml2-bigswitch.yaml b/environments/neutron-ml2-bigswitch.yaml
index 750d3c4e..8a4a144c 100644
--- a/environments/neutron-ml2-bigswitch.yaml
+++ b/environments/neutron-ml2-bigswitch.yaml
@@ -3,12 +3,17 @@
resource_registry:
OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
+ OS::TripleO::NeutronBigswitchAgent: ../puppet/services/neutron-bigswitch-agent.yaml
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
parameter_defaults:
# Required to fill in:
NeutronBigswitchRestproxyServers:
NeutronBigswitchRestproxyServerAuth:
- NeutronMechanismDrivers: bsn_ml2
+ NeutronMechanismDrivers: openvswitch,bsn_ml2
+ NeutronServicePlugins: bsn_l3,bsn_service_plugin
+ KeystoneNotificationDriver: messaging
# Optional:
# NeutronBigswitchRestproxyAutoSyncOnFailure:
@@ -19,3 +24,9 @@ parameter_defaults:
# NeutronBigswitchAgentEnabled:
# NeutronBigswitchLLDPEnabled:
+ ControllerExtraConfig:
+ neutron::agents::l3::enabled: false
+ neutron::agents::dhcp::enable_force_metadata: true
+ neutron::agents::dhcp::enable_isolated_metadata: true
+ neutron::agents::dhcp::enable_metadata_network: false
+ neutron::server::l3_ha: false
diff --git a/environments/neutron-ml2-cisco-n1kv.yaml b/environments/neutron-ml2-cisco-n1kv.yaml
index 651e9564..8d46e1ca 100644
--- a/environments/neutron-ml2-cisco-n1kv.yaml
+++ b/environments/neutron-ml2-cisco-n1kv.yaml
@@ -5,7 +5,7 @@ resource_registry:
OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
parameter_defaults:
- N1000vVSMIP: '192.0.2.50'
- N1000vMgmtGatewayIP: '192.0.2.1'
+ N1000vVSMIP: '192.168.24.50'
+ N1000vMgmtGatewayIP: '192.168.24.1'
N1000vVSMDomainID: '100'
N1000vVSMHostMgmtIntf: 'br-ex'
diff --git a/environments/neutron-nuage-config.yaml b/environments/neutron-nuage-config.yaml
index 74899246..601554a1 100644
--- a/environments/neutron-nuage-config.yaml
+++ b/environments/neutron-nuage-config.yaml
@@ -10,7 +10,6 @@ resource_registry:
OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-nuage.yaml
parameter_defaults:
- NeutronNuageOSControllerIp: '0.0.0.0'
NeutronNuageNetPartitionName: 'default_name'
NeutronNuageVSDIp: '0.0.0.0:0'
NeutronNuageVSDUsername: 'username'
diff --git a/environments/neutron-opendaylight.yaml b/environments/neutron-opendaylight.yaml
index ed7292b7..4644725d 100644
--- a/environments/neutron-opendaylight.yaml
+++ b/environments/neutron-opendaylight.yaml
@@ -3,6 +3,7 @@ resource_registry:
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+ OS::TripleO::Services::NeutronCorePlugin: ../puppet/services/neutron-plugin-ml2-odl.yaml
OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
diff --git a/environments/nova-api-policy.yaml b/environments/nova-api-policy.yaml
new file mode 100644
index 00000000..681bd010
--- /dev/null
+++ b/environments/nova-api-policy.yaml
@@ -0,0 +1,10 @@
+# A Heat environment file which can be used to configure access policies for
+# Nova API resources. It is here for example and doesn't cover all services
+# but just Nova here.
+# While recipes for editing policy.json files is supported, modifying the
+# policy can have unexpected side effects and is not encouraged.
+
+parameter_defaults:
+ # The target is "compute:get_all", the "list all instances" API of the Compute service.
+ # The rule is an empty string meaning "always". This policy allows anybody to list instances.
+ NovaApiPolicies: { nova-context_is_admin: { key: 'compute:get_all', value: '' } }
diff --git a/environments/securetty.yaml b/environments/securetty.yaml
new file mode 100644
index 00000000..cdadf376
--- /dev/null
+++ b/environments/securetty.yaml
@@ -0,0 +1,12 @@
+resource_registry:
+ OS::TripleO::Services::Securetty: ../puppet/services/securetty.yaml
+
+parameter_defaults:
+ TtyValues:
+ - console
+ - tty1
+ - tty2
+ - tty3
+ - tty4
+ - tty5
+ - tty6
diff --git a/environments/services-docker/ironic.yaml b/environments/services-docker/ironic.yaml
new file mode 100644
index 00000000..e927ecb3
--- /dev/null
+++ b/environments/services-docker/ironic.yaml
@@ -0,0 +1,5 @@
+resource_registry:
+ OS::TripleO::Services::IronicApi: ../../docker/services/ironic-api.yaml
+ OS::TripleO::Services::IronicConductor: ../../docker/services/ironic-conductor.yaml
+ OS::TripleO::Services::IronicPxe: ../../docker/services/ironic-pxe.yaml
+ OS::TripleO::Services::NovaIronic: ../../docker/services/nova-ironic.yaml
diff --git a/environments/services-docker/mistral.yaml b/environments/services-docker/mistral.yaml
new file mode 100644
index 00000000..a215d2a0
--- /dev/null
+++ b/environments/services-docker/mistral.yaml
@@ -0,0 +1,4 @@
+resource_registry:
+ OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml
+ OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml
+ OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml
diff --git a/environments/services-docker/zaqar.yaml b/environments/services-docker/zaqar.yaml
new file mode 100644
index 00000000..ca0b3b15
--- /dev/null
+++ b/environments/services-docker/zaqar.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
diff --git a/environments/services/ceilometer-api.yaml b/environments/services/ceilometer-api.yaml
new file mode 100644
index 00000000..1e37e73b
--- /dev/null
+++ b/environments/services/ceilometer-api.yaml
@@ -0,0 +1,6 @@
+resource_registry:
+ OS::TripleO::Services::CeilometerApi: ../../puppet/services/ceilometer-api.yaml
+
+parameter_defaults:
+ CeilometerApiEndpoint: true
+
diff --git a/environments/services/disable-ceilometer-api.yaml b/environments/services/disable-ceilometer-api.yaml
deleted file mode 100644
index 94cd8d5d..00000000
--- a/environments/services/disable-ceilometer-api.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-resource_registry:
- OS::TripleO::Services::CeilometerApi: OS::Heat::None
diff --git a/environments/services/keystone_domain_specific_ldap_backend.yaml b/environments/services/keystone_domain_specific_ldap_backend.yaml
new file mode 100644
index 00000000..3cc9c7b7
--- /dev/null
+++ b/environments/services/keystone_domain_specific_ldap_backend.yaml
@@ -0,0 +1,18 @@
+# This is an example template on how to configure keystone domain specific LDAP
+# backends. This will configure a domain called tripleoldap will the attributes
+# specified.
+parameter_defaults:
+ KeystoneLDAPDomainEnable: true
+ KeystoneLDAPBackendConfigs:
+ tripleoldap:
+ url: ldap://192.168.24.251
+ user: cn=openstack,ou=Users,dc=tripleo,dc=example,dc=com
+ password: Secrete
+ suffix: dc=tripleo,dc=example,dc=com
+ user_tree_dn: ou=Users,dc=tripleo,dc=example,dc=com
+ user_filter: "(memberOf=cn=OSuser,ou=Groups,dc=tripleo,dc=example,dc=com)"
+ user_objectclass: person
+ user_id_attribute: cn
+ user_allow_create: false
+ user_allow_update: false
+ user_allow_delete: false
diff --git a/environments/services/panko.yaml b/environments/services/panko.yaml
deleted file mode 100644
index 28bf99f6..00000000
--- a/environments/services/panko.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-resource_registry:
- OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
diff --git a/environments/services/qdr.yaml b/environments/services/qdr.yaml
new file mode 100644
index 00000000..e4ad87bd
--- /dev/null
+++ b/environments/services/qdr.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::Qdr: ../../puppet/services/qdr.yaml
diff --git a/environments/swift-external.yaml b/environments/swift-external.yaml
new file mode 100644
index 00000000..0bf0d39e
--- /dev/null
+++ b/environments/swift-external.yaml
@@ -0,0 +1,12 @@
+resource_registry:
+ OS::TripleO::Services::ExternalSwiftProxy: ../puppet/services/external-swift-proxy.yaml
+ OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::SwiftStorage: OS::Heat::None
+ OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
+
+parameter_defaults:
+ ExternalPublicUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+ ExternalInternalUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+ ExternalAdminUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+ ExternalSwiftUserTenant: 'service'
+
diff --git a/environments/undercloud.yaml b/environments/undercloud.yaml
index 2540fbe5..7a2716da 100644
--- a/environments/undercloud.yaml
+++ b/environments/undercloud.yaml
@@ -11,6 +11,7 @@ parameter_defaults:
NeutronBridgeMappings: ctlplane:br-ctlplane
NeutronAgentExtensions: []
NeutronFlatNetworks: '*'
+ NeutronDnsDomain: ''
NovaSchedulerAvailableFilters: 'tripleo_common.filters.list.tripleo_filters'
NovaSchedulerDefaultFilters: ['RetryFilter', 'TripleOCapabilitiesFilter', 'ComputeCapabilitiesFilter', 'AvailabilityZoneFilter', 'RamFilter', 'DiskFilter', 'ComputeFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
NeutronDhcpAgentsPerNetwork: 2
diff --git a/environments/updates/update-from-192_0_2-subnet.yaml b/environments/updates/update-from-192_0_2-subnet.yaml
new file mode 100644
index 00000000..1813e7be
--- /dev/null
+++ b/environments/updates/update-from-192_0_2-subnet.yaml
@@ -0,0 +1,3 @@
+parameter_defaults:
+ ControlPlaneDefaultRoute: 192.0.2.1
+ EC2MetadataIp: 192.0.2.1
diff --git a/extraconfig/nova_metadata/krb-service-principals.yaml b/extraconfig/nova_metadata/krb-service-principals.yaml
index c66e6460..56d3cbc0 100644
--- a/extraconfig/nova_metadata/krb-service-principals.yaml
+++ b/extraconfig/nova_metadata/krb-service-principals.yaml
@@ -46,7 +46,7 @@ resources:
# Filter null values and values that contain don't contain
# 'metadata_settings', get the values from that key and get the
# unique ones.
- expression: list($.data.where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct())
+ expression: list(coalesce($.data, []).where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct())
data: {get_param: RoleData}
# Generates entries for nova metadata with the following format:
@@ -57,7 +57,7 @@ resources:
properties:
value:
yaql:
- expression: let(fqdns => $.data.fqdns) -> dict($.data.metadata.where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))]))
+ expression: let(fqdns => $.data.fqdns) -> dict(coalesce($.data.metadata, []).where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))]))
data:
metadata: {get_attr: [IncomingMetadataSettings, value]}
fqdns:
@@ -72,7 +72,7 @@ resources:
properties:
value:
yaql:
- expression: dict($.data.where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1]))
+ expression: dict(coalesce($.data, []).where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1]))
data: {get_attr: [IncomingMetadataSettings, value]}
outputs:
diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
index e8316c53..30a83550 100644
--- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
@@ -53,6 +53,12 @@ parameters:
type: string
rhel_reg_http_proxy_password:
type: string
+ UpdateOnRHELRegistration:
+ type: boolean
+ default: false
+ description: |
+ When enabled, the system will perform a yum update after performing the
+ RHEL Registration process.
resources:
@@ -134,6 +140,37 @@ resources:
input_values:
REG_METHOD: {get_param: rhel_reg_method}
+ YumUpdateConfigurationAfterRHELRegistration:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -x
+ num_updates=$(yum list -q updates | wc -l)
+ if [ "$num_updates" -eq "0" ]; then
+ echo "No packages require updating"
+ exit 0
+ fi
+ full_command="yum -q -y update"
+ echo "Running: $full_command"
+ result=$($full_command)
+ return_code=$?
+ echo "$result"
+ echo "yum return code: $return_code"
+ exit $return_code
+
+ UpdateDeploymentAfterRHELRegistration:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: RHELRegistrationDeployment
+ conditions:
+ update_requested: {get_param: UpdateOnRHELRegistration}
+ properties:
+ name: UpdateDeploymentAfterRHELRegistration
+ config: {get_resource: YumUpdateConfigurationAfterRHELRegistration}
+ server: {get_param: server}
+ actions: ['CREATE'] # Only do this on CREATE
+
outputs:
deploy_stdout:
description: Deployment reference, used to trigger puppet apply on changes
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
index 6f83cc4b..d14ed73f 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
@@ -11,7 +11,7 @@ if [ -e $OK ] ; then
exit 0
fi
-retryCount=0
+retry_max_count=10
opts=
config_opts=
attach_opts=
@@ -157,27 +157,41 @@ else
fi
function retry() {
- if [[ $retryCount < 3 ]]; then
- $@
- if ! [[ $? == 0 ]]; then
- retryCount=$(echo $retryCount + 1 | bc)
- echo "WARN: Failed to connect when running '$@', retrying..."
- retry $@
- else
- retryCount=0
+ # Inhibit -e since we want to retry without exiting..
+ set +e
+ # Retry delay (seconds)
+ retry_delay=2.0
+ retry_count=0
+ mycli="$@"
+ while [ $retry_count -lt ${retry_max_count} ]
+ do
+ echo "INFO: Sleeping ${retry_delay} ..."
+ sleep ${retry_delay}
+ echo "INFO: Executing '${mycli}' ..."
+ ${mycli}
+ if [ $? -eq 0 ]; then
+ echo "INFO: Ran '${mycli}' successfully, not retrying..."
+ break
+ else
+ echo "WARN: Failed to connect when running '${mycli}', retrying (attempt #$retry_count )..."
+ retry_count=$(echo $retry_count + 1 | bc)
+ fi
+ done
+
+ if [ $retry_count -ge ${retry_max_count} ]; then
+ echo "ERROR: Failed to connect after ${retry_max_count} attempts when running '${mycli}'"
+ exit 1
fi
- else
- echo "ERROR: Failed to connect after 3 attempts when running '$@'"
- exit 1
- fi
+ # Re-enable -e when exiting retry()
+ set -e
}
function detect_satellite_version {
ping_api=$REG_SAT_URL/katello/api/ping
- if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
+ if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
echo Satellite 6 detected at $REG_SAT_URL
satellite_version=6
- elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
echo Satellite 5 detected at $REG_SAT_URL
satellite_version=5
else
@@ -220,16 +234,15 @@ case "${REG_METHOD:-}" in
detect_satellite_version
if [ "$satellite_version" = "6" ]; then
repos="$repos --enable ${satellite_repo}"
- curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
retry subscription-manager register $opts
retry subscription-manager $repos
retry yum install -y katello-agent || true # needed for errata reporting to satellite6
katello-package-upload
- retry subscription-manager repos --disable ${satellite_repo}
else
pushd /usr/share/rhn/
- curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
popd
retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
fi
diff --git a/extraconfig/tasks/aodh_data_migration.sh b/extraconfig/tasks/aodh_data_migration.sh
deleted file mode 100644
index d4c29673..00000000
--- a/extraconfig/tasks/aodh_data_migration.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# This delivers the aodh data migration script to be invoked as part of the tripleo
-# major upgrade workflow to migrate all the alarm data from mongodb to mysql.
-# This needs to run post controller node upgrades so new aodh mysql db configured and
-# running.
-#
-set -eu
-
-#Get existing mongodb connection
-MONGO_DB_CONNECTION="$(crudini --get /etc/ceilometer/ceilometer.conf database connection)"
-
-# Get the aodh database string from hiera data
-MYSQL_DB_CONNECTION="$(crudini --get /etc/aodh/aodh.conf database connection)"
-
-#Run migration
-/usr/bin/aodh-data-migration --nosql-conn $MONGO_DB_CONNECTION --sql-conn $MYSQL_DB_CONNECTION
-
-
diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
deleted file mode 100644
index cf5d7a84..00000000
--- a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Software-config for ceilometer configuration under httpd during upgrades
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
- CeilometerWsgiMitakaNewtonPreUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- config:
- get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp
-
- CeilometerWsgiMitakaNewtonUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - "#!/bin/bash\n\nset -e\n\n"
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - "disable_standalone_ceilometer_api\n\n"
-
- CeilometerWsgiMitakaNewtonPostUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: |
- #!/bin/bash
- set -e
- /usr/bin/systemctl reload httpd
-
- CeilometerWsgiMitakaNewtonPreUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonUpgradeConfigDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonPostUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig}
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
deleted file mode 100755
index 8bdff5e7..00000000
--- a/extraconfig/tasks/major_upgrade_check.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster()
-{
- if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
- fi
-}
-
-check_pcsd()
-{
- if pcs status 2>&1 | grep -E 'Offline'; then
- echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
- exit 1
- fi
-}
-
-mysql_need_update()
-{
- # Shall we upgrade mysql data directory during the stack upgrade?
- if [ "$mariadb_do_major_upgrade" = "auto" ]; then
- ret=$(is_mysql_upgrade_needed)
- if [ $ret = "1" ]; then
- DO_MYSQL_UPGRADE=1
- else
- DO_MYSQL_UPGRADE=0
- fi
- echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
- elif [ "$mariadb_do_major_upgrade" = "no" ]; then
- DO_MYSQL_UPGRADE=0
- else
- DO_MYSQL_UPGRADE=1
- fi
-}
-
-check_disk_for_mysql_dump()
-{
- # Where to backup current database if mysql need to be upgraded
- MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
- MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
- # Spare disk ratio for extra safety
- MYSQL_BACKUP_SIZE_RATIO=1.2
-
- mysql_need_update
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-
- if [ -d "$MYSQL_BACKUP_DIR" ]; then
- echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
- exit 1
- fi
- mkdir "$MYSQL_BACKUP_DIR"
- if [ $? -ne 0 ]; then
- echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
- exit 1
- fi
-
- # the /root/.my.cnf is needed because we set the mysql root
- # password from liberty onwards
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- # While not ideal, this step allows us to calculate exactly how much space the dump
- # will need. Our main goal here is avoiding any chance of corruption due to disk space
- # exhaustion
- backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
- database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
- free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
- # we need at least space for a new mysql database + dump of the existing one,
- # times a small factor for additional safety room
- # note: bash doesn't do floating point math or floats in if statements,
- # so use python to apply the ratio and cast it back to integer
- required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
- if [ $required_space -ge $free_space ]; then
- echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
- exit 1
- fi
- fi
- fi
-}
-
-check_python_rpm()
-{
- # If for some reason rpm-python are missing we want to error out early enough
- if ! rpm -q rpm-python &> /dev/null; then
- echo_error "ERROR: upgrade cannot start without rpm-python installed"
- exit 1
- fi
-}
-
-check_clean_cluster()
-{
- if pcs status | grep -q Stopped:; then
- echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running."
- exit 1
- fi
-}
-
-check_galera_root_password()
-{
- # BZ: 1357112
- if [ ! -e /root/.my.cnf ]; then
- echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
- exit 1
- fi
-}
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
deleted file mode 100755
index 080831ab..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster
-check_pcsd
-if [[ -n $(is_bootstrap_node) ]]; then
- check_clean_cluster
-fi
-check_python_rpm
-check_galera_root_password
-check_disk_for_mysql_dump
-
-# We want to disable fencing during the cluster --stop as it might fence
-# nodes where a service fails to stop, which could be fatal during an upgrade
-# procedure. So we remember the stonith state. If it was enabled we reenable it
-# at the end of this script
-if [[ -n $(is_bootstrap_node) ]]; then
- STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
- # We create this empty file if stonith was set to true so we can reenable stonith in step2
- rm -f /var/tmp/stonith-true
- if [ $STONITH_STATE == "true" ]; then
- touch /var/tmp/stonith-true
- fi
- pcs property set stonith-enabled=false
-fi
-
-# Migrate to HA NG and fix up rabbitmq queues
-# We fix up the rabbitmq ha queues after the migration because it will
-# restart the rabbitmq resource. Doing it after the migration means no other
-# services will be restart as there are no other constraints
-if [[ -n $(is_bootstrap_node) ]]; then
- migrate_full_to_ng_ha
- rabbitmq_newton_ocata_upgrade
-fi
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
deleted file mode 100755
index 8b900842..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_sync_timeout=1800
-
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
- manage_systemd_service stop "${service%%-clone}"
- # So the reason for not reusing check_resource_systemd is that
- # I have observed systemctl is-active returning unknown with at least
- # one service that was stopped (See LP 1627254)
- timeout=600
- tstart=$(date +%s)
- tend=$(( $tstart + $timeout ))
- check_interval=3
- while (( $(date +%s) < $tend )); do
- if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
- echo "$service still active, sleeping $check_interval seconds."
- sleep $check_interval
- else
- # we do not care if it is inactive, unknown or failed as long as it is
- # not running
- break
- fi
-
- done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
-# to support specific upgrade scenario
-
-# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
-# later
-mysql_need_update
-
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
- cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
- fi
-
- pcs resource disable redis
- check_resource redis stopped 600
- pcs resource disable rabbitmq
- check_resource rabbitmq stopped 600
- pcs resource disable galera
- check_resource galera stopped 600
- pcs resource disable openstack-cinder-volume
- check_resource openstack-cinder-volume stopped 600
- # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
- pcs resource disable $vip
- check_resource $vip stopped 60
- done
- pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_sync_timeout )) ; then
- echo_error "ERROR: cluster shutdown timed out"
- exit 1
- fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
- echo_error "ERROR: mysql backup dir already exist"
- exit 1
- fi
- mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-special_case_ovs_upgrade_if_needed
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- # Scripts run via heat have no HOME variable set and this confuses
- # mysqladmin
- export HOME=/root
-
- mkdir /var/lib/mysql || /bin/true
- chown mysql:mysql /var/lib/mysql
- chmod 0755 /var/lib/mysql
- restorecon -R /var/lib/mysql/
- mysql_install_db --datadir=/var/lib/mysql --user=mysql
- chown -R mysql:mysql /var/lib/mysql/
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- mysqld_safe --wsrep-new-cluster &
- # We have a populated /root/.my.cnf with root/password here so
- # we need to temporarily rename it because the newly created
- # db is empty and no root password is set
- mv /root/.my.cnf /root/.my.cnf.temporary
- timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
- mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
- mv /root/.my.cnf.temporary /root/.my.cnf
- mysqladmin -u root shutdown
- # The import was successful so we may remove the folder
- rm -r "$MYSQL_BACKUP_DIR"
- fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller. Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ -f /var/tmp/stonith-true ]; then
- pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
- fi
- rm -f /var/tmp/stonith-true
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
deleted file mode 100755
index a3cbd945..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- pcs cluster start --all
-
- tstart=$(date +%s)
- while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_form_timeout )) ; then
- echo_error "ERROR: timed out forming the cluster"
- exit 1
- fi
- done
-
- if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
- echo_error "ERROR: timed out waiting for cluster to finish transition"
- exit 1
- fi
-
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
- pcs resource enable $vip
- check_resource_pacemaker $vip started 60
- done
-fi
-
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo_error "ERROR galera sync timed out"
- exit 1
- fi
- done
-
- # Run all the db syncs
- # TODO: check if this can be triggered in puppet and removed from here
- ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
- cinder-manage db sync
- glance-manage db_sync
- heat-manage --config-file /etc/heat/heat.conf db_sync
- keystone-manage db_sync
- neutron-db-manage upgrade heads
- nova-manage db sync
- nova-manage api_db sync
- nova-manage db online_data_migrations
- sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
deleted file mode 100755
index d2cb9553..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
-start_or_enable_service redis
-check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
-
-# start httpd so keystone is available for gnocchi
-# upgrade to run.
-systemctl start httpd
-
-# Swift isn't controled by pacemaker
-systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
deleted file mode 100755
index fa95f1f8..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-if [[ -n $(is_bootstrap_node) ]]; then
- # run gnocchi upgrade
- gnocchi-upgrade
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
deleted file mode 100755
index d569084d..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
- services=${services%%openstack-sahara*}
-fi
-for service in $services; do
- manage_systemd_service start "${service%%-clone}"
- check_resource_systemd "${service%%-clone}" started 600
-done
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
deleted file mode 100644
index 74d3be71..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ /dev/null
@@ -1,175 +0,0 @@
-heat_template_version: ocata
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-
- UpgradeLevelNovaCompute:
- type: string
- description: Nova Compute upgrade level
- default: ''
- MySqlMajorUpgrade:
- type: string
- description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade
- constraints:
- - allowed_values: ['auto', 'yes', 'no']
- default: 'auto'
- KeepSaharaServicesOnUpgrade:
- type: boolean
- default: true
- description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton
-
-
-resources:
- # TODO(jistr): for Mitaka->Newton upgrades and further we can use
- # map_merge with input_values instead of feeding params into scripts
- # via str_replace on bash snippets
-
- ControllerPacemakerUpgradeConfig_Step1:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_1.sh
-
- ControllerPacemakerUpgradeDeployment_Step1:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step2:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_2.sh
-
- ControllerPacemakerUpgradeDeployment_Step2:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step1
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step3:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_3.sh
-
- ControllerPacemakerUpgradeDeployment_Step3:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step2
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step4:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_4.sh
-
- ControllerPacemakerUpgradeDeployment_Step4:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step3
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step4}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step5:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_5.sh
-
- ControllerPacemakerUpgradeDeployment_Step5:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step4
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step6:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE'
- params:
- KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_6.sh
-
- ControllerPacemakerUpgradeDeployment_Step6:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step5
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
- input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
deleted file mode 100644
index ae22a1e7..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/bin/bash
-
-# Special pieces of upgrade migration logic go into this
-# file. E.g. Pacemaker cluster transitions for existing deployments,
-# matching changes to overcloud_controller_pacemaker.pp (Puppet
-# handles deployment, this file handles migrations).
-#
-# This file shouldn't execute any action on its own, all logic should
-# be wrapped into bash functions. Upgrade scripts will source this
-# file and call the functions defined in this file where appropriate.
-#
-# The migration functions should be idempotent. If the migration has
-# been already applied, it should be possible to call the function
-# again without damaging the deployment or failing the upgrade.
-
-# If the major version of mysql is going to change after the major
-# upgrade, the database must be upgraded on disk to avoid failures
-# due to internal incompatibilities between major mysql versions
-# https://bugs.launchpad.net/tripleo/+bug/1587449
-# This function detects whether a database upgrade is required
-# after a mysql package upgrade. It returns 0 when no major upgrade
-# has to take place, 1 otherwise.
-function is_mysql_upgrade_needed {
- # The name of the package which provides mysql might differ
- # after the upgrade. Consider the generic package name, which
- # should capture the major version change (e.g. 5.5 -> 10.1)
- local name="mariadb"
- local output
- local ret
- set +e
- output=$(yum -q check-update $name)
- ret=$?
- set -e
- if [ $ret -ne 100 ]; then
- # no updates so we exit
- echo "0"
- return
- fi
-
- local currentepoch=$(rpm -q --qf "%{epoch}" $name)
- local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2)
- local currentrelease=$(rpm -q --qf "%{release}" $name)
- local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name)
- local newepoch=$(echo "$newoutput" | awk '{ print $1 }')
- local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2)
- local newrelease=$(echo "$newoutput" | awk '{ print $3 }')
-
- # With this we trigger the dump restore/path if we change either epoch or
- # version in the package If only the release tag changes we do not do it
- # FIXME: we could refine this by trying to parse the mariadb version
- # into X.Y.Z and trigger the update only if X and/or Y change.
- output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc")
- if [ "$output" != "-1" ]; then
- echo "0"
- return
- fi
- echo "1"
-}
-
-# This function returns the list of services to be migrated away from pacemaker
-# and to systemd. The reason to have these services in a separate function is because
-# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
-# and in the function to migrate the cluster from full HA to HA NG
-function services_to_migrate {
- # The following PCMK resources the ones the we are going to delete
- PCMK_RESOURCE_TODELETE="
- httpd-clone
- memcached-clone
- mongod-clone
- neutron-dhcp-agent-clone
- neutron-l3-agent-clone
- neutron-metadata-agent-clone
- neutron-netns-cleanup-clone
- neutron-openvswitch-agent-clone
- neutron-ovs-cleanup-clone
- neutron-server-clone
- openstack-aodh-evaluator-clone
- openstack-aodh-listener-clone
- openstack-aodh-notifier-clone
- openstack-ceilometer-central-clone
- openstack-ceilometer-collector-clone
- openstack-ceilometer-notification-clone
- openstack-cinder-api-clone
- openstack-cinder-scheduler-clone
- openstack-glance-api-clone
- openstack-gnocchi-metricd-clone
- openstack-gnocchi-statsd-clone
- openstack-heat-api-cfn-clone
- openstack-heat-api-clone
- openstack-heat-api-cloudwatch-clone
- openstack-heat-engine-clone
- openstack-nova-api-clone
- openstack-nova-conductor-clone
- openstack-nova-consoleauth-clone
- openstack-nova-novncproxy-clone
- openstack-nova-scheduler-clone
- openstack-sahara-api-clone
- openstack-sahara-engine-clone
- "
- echo $PCMK_RESOURCE_TODELETE
-}
-
-# This function will migrate a mitaka system where all the resources are managed
-# via pacemaker to a newton setup where only a few services will be managed by pacemaker
-# On a high-level it will operate as follows:
-# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
-# during the conversion
-# 2. Remove all the colocation constraints and then the ordering constraints, except the
-# ones related to haproxy/VIPs which exist in Newton as well
-# 3. Take the cluster out of maintenance-mode
-# 4. Remove all the resources that won't be managed by pacemaker in newton. The
-# outcome will be
-# that they are stopped and removed from pacemakers control
-# 5. Do a resource cleanup to make sure the cluster is in a clean state
-function migrate_full_to_ng_ha {
- if [[ -n $(pcmk_running) ]]; then
- pcs property set maintenance-mode=true
-
- # First we go through all the colocation constraints (except the ones
- # we want to keep, i.e. the haproxy/ip ones) and we remove those
- COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $COL_CONSTRAINTS; do
- log_debug "Deleting colocation constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
-
- # Now we kill all the ordering constraints (except the haproxy/ip ones)
- ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $ORD_CONSTRAINTS; do
- log_debug "Deleting ordering constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
- # At this stage all the pacemaker resources are removed from the CIB.
- # Once we remove the maintenance-mode those systemd resources will keep
- # on running. They shall be systemd enabled via the puppet converge
- # step later on
- pcs property set maintenance-mode=false
-
- # At this stage there are no constraints whatsoever except the haproxy/ip ones
- # which we want to keep. We now disable and then delete each resource
- # that will move to systemd.
- # We want the systemd resources be stopped before doing "yum update",
- # that way "systemctl try-restart <service>" is no-op because the
- # service was down already
- PCS_STATUS_OUTPUT="$(pcs status)"
- for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
- if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
- log_debug "Deleting $resource from the CIB"
- if ! pcs resource disable "$resource" --wait=600; then
- echo_error "ERROR: resource $resource failed to be disabled"
- exit 1
- fi
- pcs resource delete --force "$resource"
- else
- log_debug "Service $resource not found as a pacemaker resource, not trying to delete."
- fi
- done
-
- # We need to do a pcs resource cleanup here + crm_resource --wait to
- # make sure the cluster is in a clean state before we stop everything,
- # upgrade and restart everything
- pcs resource cleanup
- # We are making sure here that the cluster is stable before proceeding
- if ! timeout -k 10 600 crm_resource --wait; then
- echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
- exit 1
- fi
- fi
-}
-
-function disable_standalone_ceilometer_api {
- if [[ -n $(is_bootstrap_node) ]]; then
- if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then
- # Disable pacemaker resources for ceilometer-api
- manage_pacemaker_service disable openstack-ceilometer-api
- check_resource_pacemaker openstack-ceilometer-api stopped 600
- pcs resource delete openstack-ceilometer-api --wait=600
- fi
- fi
-}
-
-
-# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
-# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
-# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
-# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
-# Note that changing an attribute like this makes the rabbitmq resource restart
-function rabbitmq_newton_ocata_upgrade {
- if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
- # Number of controller is obtained by counting how many hostnames we
- # have in controller_node_names hiera key
- nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
- nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
- if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
- echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
- exit 1
- fi
- pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
- fi
-}
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
deleted file mode 100644
index 45933fb7..00000000
--- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Software-config for performing aodh data migration
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
-
- AodhMysqlMigrationScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: aodh_data_migration.sh}
-
- AodhMysqlMigrationScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: AodhMysqlMigrationScriptConfig}
- input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
deleted file mode 100644
index a8d43663..00000000
--- a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This puppet manifest is to be used only during a Mitaka->Newton upgrade
-# It configures ceilometer to be run under httpd but it makes sure to not
-# restart any services. This snippet needs to be called before init as a
-# pre upgrade migration.
-
-Service <|
- tag == 'ceilometer-service'
-|> {
- hasrestart => true,
- restart => '/bin/true',
- start => '/bin/true',
- stop => '/bin/true',
-}
-
-if $::hostname == downcase(hiera('bootstrap_nodeid')) {
- $pacemaker_master = true
- $sync_db = true
-} else {
- $pacemaker_master = false
- $sync_db = false
-}
-
-include ::tripleo::packages
-
-
-if str2bool(hiera('mongodb::server::ipv6', false)) {
- $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[')
- $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
-} else {
- $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017')
-}
-$mongodb_replset = hiera('mongodb::server::replset')
-$mongo_node_string = join($mongo_node_ips_with_port, ',')
-$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
-
-$rabbit_hosts = hiera('rabbitmq_node_ips', undef)
-$rabbit_port = hiera('ceilometer::rabbit_port', 5672)
-$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}")
-
-class { '::ceilometer' :
- rabbit_hosts => $rabbit_endpoints,
-}
-
-class {'::ceilometer::db':
- database_connection => $database_connection,
-}
-
-if $sync_db {
- include ::ceilometer::db::sync
-}
-
-include ::ceilometer::config
-
-class { '::ceilometer::api':
- enabled => true,
- service_name => 'httpd',
- keystone_password => hiera('ceilometer::keystone::auth::password'),
- identity_uri => hiera('ceilometer::keystone::authtoken::auth_url'),
- auth_uri => hiera('ceilometer::keystone::authtoken::auth_uri'),
- keystone_tenant => hiera('ceilometer::keystone::authtoken::project_name'),
-}
-
-class { '::apache' :
- service_enable => false,
- service_manage => true,
- service_restart => '/bin/true',
- purge_configs => false,
- purge_vhost_dir => false,
-}
-
-# To ensure existing ports are not overridden
-class { '::aodh::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::gnocchi::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-
-class { '::keystone::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::ceilometer::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index aae4a2de..4480f74d 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -299,9 +299,10 @@ function systemctl_swift {
}
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+# Update condition and add --notriggerun for +bug/1669714
function special_case_ovs_upgrade_if_needed {
- if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
+ if rpm -qa | grep "^openvswitch-2.5.0-14" || rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart" ; then
+ echo "Manual upgrade of openvswitch - ovs-2.5.0-14 or restart in postun detected"
rm -rf OVS_UPGRADE
mkdir OVS_UPGRADE && pushd OVS_UPGRADE
echo "Attempting to downloading latest openvswitch with yumdownloader"
@@ -310,8 +311,8 @@ function special_case_ovs_upgrade_if_needed {
if rpm -U --test $pkg 2>&1 | grep "already installed" ; then
echo "Looks like newer version of $pkg is already installed, skipping"
else
- echo "Updating $pkg with nopostun option"
- rpm -U --replacepkgs --nopostun $pkg
+ echo "Updating $pkg with --nopostun --notriggerun"
+ rpm -U --replacepkgs --nopostun --notriggerun $pkg
fi
done
popd
diff --git a/extraconfig/tasks/run_puppet.sh b/extraconfig/tasks/run_puppet.sh
index b7771e33..e3f6c493 100755
--- a/extraconfig/tasks/run_puppet.sh
+++ b/extraconfig/tasks/run_puppet.sh
@@ -10,7 +10,10 @@ function run_puppet {
export FACTER_deploy_config_name="${role}Deployment_Step${step}"
if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then
set +e
- puppet apply --detailed-exitcodes "${manifest}"
+ puppet apply --detailed-exitcodes \
+ --modulepath \
+ /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+ "${manifest}"
rc=$?
echo "puppet apply exited with exit code $rc"
else
diff --git a/extraconfig/tasks/ssh/host_public_key.yaml b/extraconfig/tasks/ssh/host_public_key.yaml
new file mode 100644
index 00000000..847c8772
--- /dev/null
+++ b/extraconfig/tasks/ssh/host_public_key.yaml
@@ -0,0 +1,42 @@
+heat_template_version: ocata
+
+description: >
+ This is a template which will fetch the ssh host public key.
+
+parameters:
+ server:
+ description: ID of the node to apply this config to
+ type: string
+
+resources:
+ SshHostPubKeyConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ outputs:
+ - name: rsa
+ - name: ecdsa
+ - name: ed25519
+ config: |
+ #!/bin/sh -x
+ test -e '/etc/ssh/ssh_host_rsa_key.pub' && cat /etc/ssh/ssh_host_rsa_key.pub > $heat_outputs_path.rsa
+ test -e '/etc/ssh/ssh_host_ecdsa_key.pub' && cat /etc/ssh/ssh_host_ecdsa_key.pub > $heat_outputs_path.ecdsa
+ test -e '/etc/ssh/ssh_host_ed25519_key.pub' && cat /etc/ssh/ssh_host_ed25519_key.pub > $heat_outputs_path.ed25519
+
+ SshHostPubKeyDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: SshHostPubKeyConfig}
+ server: {get_param: server}
+
+
+outputs:
+ ecdsa:
+ description: Host ssh public key (ecdsa)
+ value: {get_attr: [SshHostPubKeyDeployment, ecdsa]}
+ rsa:
+ description: Host ssh public key (rsa)
+ value: {get_attr: [SshHostPubKeyDeployment, rsa]}
+ ed25519:
+ description: Host ssh public key (ed25519)
+ value: {get_attr: [SshHostPubKeyDeployment, ed25519]}
diff --git a/extraconfig/tasks/ssh/known_hosts_config.yaml b/extraconfig/tasks/ssh/known_hosts_config.yaml
new file mode 100644
index 00000000..2ebcb63c
--- /dev/null
+++ b/extraconfig/tasks/ssh/known_hosts_config.yaml
@@ -0,0 +1,36 @@
+heat_template_version: ocata
+description: 'SSH Known Hosts Config'
+
+parameters:
+ known_hosts:
+ type: string
+
+resources:
+
+ SSHKnownHostsConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: known_hosts
+ default: {get_param: known_hosts}
+ config: |
+ #!/bin/bash
+ set -eux
+ set -o pipefail
+
+ echo "Creating ssh known hosts file"
+
+ if [ ! -z "${known_hosts}" ]; then
+ echo "${known_hosts}"
+ echo -ne "${known_hosts}" > /etc/ssh/ssh_known_hosts
+ chmod 0644 /etc/ssh/ssh_known_hosts
+ else
+ rm -f /etc/ssh/ssh_known_hosts
+ echo "No ssh known hosts"
+ fi
+
+outputs:
+ OS::stack_id:
+ description: The SSHKnownHostsConfig resource.
+ value: {get_resource: SSHKnownHostsConfig} \ No newline at end of file
diff --git a/extraconfig/tasks/swift-ring-deploy.yaml b/extraconfig/tasks/swift-ring-deploy.yaml
deleted file mode 100644
index d17f78ae..00000000
--- a/extraconfig/tasks/swift-ring-deploy.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-heat_template_version: ocata
-
-parameters:
- servers:
- type: json
- SwiftRingGetTempurl:
- default: ''
- description: A temporary Swift URL to download rings from.
- type: string
-
-resources:
- SwiftRingDeployConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: swift_ring_get_tempurl
- config: |
- #!/bin/sh
- pushd /
- curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true
- popd
-
- SwiftRingDeploy:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: SwiftRingDeploy
- config: {get_resource: SwiftRingDeployConfig}
- servers: {get_param: servers}
- input_values:
- swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
diff --git a/extraconfig/tasks/swift-ring-update.yaml b/extraconfig/tasks/swift-ring-update.yaml
deleted file mode 100644
index 440c6883..00000000
--- a/extraconfig/tasks/swift-ring-update.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-heat_template_version: ocata
-
-parameters:
- servers:
- type: json
- SwiftRingPutTempurl:
- default: ''
- description: A temporary Swift URL to upload rings to.
- type: string
-
-resources:
- SwiftRingUpdateConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: swift_ring_put_tempurl
- config: |
- #!/bin/sh
- TMP_DATA=$(mktemp -d)
- function cleanup {
- rm -Rf "$TMP_DATA"
- }
- trap cleanup EXIT
- # sanity check in case rings are not consistent within cluster
- swift-recon --md5 | grep -q "doesn't match" && exit 1
- pushd ${TMP_DATA}
- tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/*
- resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz`
- popd
- if [ "$resp" != "201" ]; then
- exit 1
- fi
-
- SwiftRingUpdate:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: SwiftRingUpdate
- config: {get_resource: SwiftRingUpdateConfig}
- servers: {get_param: servers}
- input_values:
- swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh
index c2565410..a5a312dc 100644
--- a/extraconfig/tasks/tripleo_upgrade_node.sh
+++ b/extraconfig/tasks/tripleo_upgrade_node.sh
@@ -28,37 +28,43 @@ SCRIPT_NAME=$(basename $0)
$(declare -f log_debug)
$(declare -f manage_systemd_service)
$(declare -f systemctl_swift)
+$(declare -f special_case_ovs_upgrade_if_needed)
# pin nova messaging +-1 for the nova-compute service
if [[ -n \$NOVA_COMPUTE ]]; then
crudini --set /etc/nova/nova.conf upgrade_levels compute auto
fi
-$(declare -f special_case_ovs_upgrade_if_needed)
special_case_ovs_upgrade_if_needed
-yum -y install python-zaqarclient # needed for os-collect-config
if [[ -n \$SWIFT_STORAGE ]]; then
systemctl_swift stop
fi
+
yum -y update
+
if [[ -n \$SWIFT_STORAGE ]]; then
systemctl_swift start
fi
# Due to bug#1640177 we need to restart compute agent
if [[ -n \$NOVA_COMPUTE ]]; then
- echo "Restarting openstack ceilometer agent compute"
+ log_debug "Restarting openstack ceilometer agent compute"
systemctl restart openstack-ceilometer-compute
fi
# Apply puppet manifest to converge just right after the ${ROLE} upgrade
$(declare -f run_puppet)
for step in 1 2 3 4 5 6; do
+ log_debug "Running puppet step \$step for ${ROLE}"
if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then
- echo "Puppet failure at step \${step}"
+ log_debug "Puppet failure at step \${step}"
exit 1
fi
+ log_debug "Completed puppet step \$step"
done
+
+log_debug "TripleO upgrade run completed."
+
ENDOFCAT
# ensure the permissions are OK
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index 6bf415b2..20a5b658 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -40,9 +40,17 @@ touch "$timestamp_file"
command_arguments=${command_arguments:-}
-list_updates=$(yum list updates)
-
-if [[ "$list_updates" == "" ]]; then
+# yum check-update exits 100 if updates are available
+set +e
+check_update=$(yum check-update 2>&1)
+check_update_exit=$?
+set -e
+
+if [[ "$check_update_exit" == "1" ]]; then
+ echo "Failed to check for package updates"
+ echo "$check_update"
+ exit 1
+elif [[ "$check_update_exit" != "100" ]]; then
echo "No packages require updating"
exit 0
fi
@@ -70,7 +78,7 @@ if [[ "$pacemaker_status" == "active" && \
fi
fi
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
special_case_ovs_upgrade_if_needed
if [[ "$pacemaker_status" == "active" ]] ; then
@@ -100,17 +108,6 @@ return_code=$?
echo "$result"
echo "yum return code: $return_code"
-# Writes any changes caused by alterations to os-net-config and bounces the
-# interfaces *before* restarting the cluster.
-os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
-RETVAL=$?
-if [[ $RETVAL == 2 ]]; then
- echo "os-net-config: interface configuration files updated successfully"
-elif [[ $RETVAL != 0 ]]; then
- echo "ERROR: os-net-config configuration failed"
- exit $RETVAL
-fi
-
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Starting cluster node"
pcs cluster start
@@ -127,15 +124,19 @@ if [[ "$pacemaker_status" == "active" ]] ; then
fi
done
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo "ERROR galera sync timed out"
- exit 1
- fi
- done
+ RETVAL=$( pcs resource show galera-master | grep wsrep_cluster_address | grep -q `crm_node -n` ; echo $? )
+
+ if [[ $RETVAL -eq 0 && -e /etc/sysconfig/clustercheck ]]; then
+ tstart=$(date +%s)
+ while ! clustercheck; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > galera_sync_timeout )) ; then
+ echo "ERROR galera sync timed out"
+ exit 1
+ fi
+ done
+ fi
echo "Waiting for pacemaker cluster to settle"
if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
diff --git a/hosts-config.yaml b/hosts-config.yaml
index 5a211716..c02c4208 100644
--- a/hosts-config.yaml
+++ b/hosts-config.yaml
@@ -31,7 +31,7 @@ outputs:
The content that should be appended to your /etc/hosts if you want to get
hostname-based access to the deployed nodes (useful for testing without
setting up a DNS).
- value: {get_attr: [hostsConfigImpl, config, hosts]}
+ value: {get_param: hosts}
OS::stack_id:
description: The ID of the hostsConfigImpl resource.
value: {get_resource: hostsConfigImpl}
diff --git a/net-config-linux-bridge.yaml b/net-config-linux-bridge.yaml
index 04664818..a544d547 100644
--- a/net-config-linux-bridge.yaml
+++ b/net-config-linux-bridge.yaml
@@ -33,7 +33,7 @@ parameters:
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
- default: 192.0.2.1
+ default: 192.168.24.1
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
index 277bd676..c92ce377 100644
--- a/network/endpoints/endpoint_data.yaml
+++ b/network/endpoints/endpoint_data.yaml
@@ -225,7 +225,6 @@ Keystone:
net_param: KeystonePublicApi
uri_suffixes:
'': /v2.0
- EC2: /v2.0/ec2tokens
V3: /v3
names:
EC2: KeystoneEC2
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index fecac0af..b4fcbb17 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -6012,88 +6012,6 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, KeystoneAdmin, port]
- KeystoneEC2:
- host:
- str_replace:
- template:
- get_param: [EndpointMap, KeystoneInternal, host]
- params:
- CLOUDNAME:
- get_param:
- - CloudEndpoints
- - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- IP_ADDRESS:
- get_param:
- - NetIpMap
- - str_replace:
- params:
- NETWORK:
- get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- template: NETWORK_uri
- host_nobrackets:
- str_replace:
- template:
- get_param: [EndpointMap, KeystoneInternal, host]
- params:
- CLOUDNAME:
- get_param:
- - CloudEndpoints
- - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- IP_ADDRESS:
- get_param:
- - NetIpMap
- - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- port:
- get_param: [EndpointMap, KeystoneInternal, port]
- protocol:
- get_param: [EndpointMap, KeystoneInternal, protocol]
- uri:
- list_join:
- - ''
- - - get_param: [EndpointMap, KeystoneInternal, protocol]
- - ://
- - str_replace:
- template:
- get_param: [EndpointMap, KeystoneInternal, host]
- params:
- CLOUDNAME:
- get_param:
- - CloudEndpoints
- - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- IP_ADDRESS:
- get_param:
- - NetIpMap
- - str_replace:
- params:
- NETWORK:
- get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- template: NETWORK_uri
- - ':'
- - get_param: [EndpointMap, KeystoneInternal, port]
- - /v2.0/ec2tokens
- uri_no_suffix:
- list_join:
- - ''
- - - get_param: [EndpointMap, KeystoneInternal, protocol]
- - ://
- - str_replace:
- template:
- get_param: [EndpointMap, KeystoneInternal, host]
- params:
- CLOUDNAME:
- get_param:
- - CloudEndpoints
- - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- IP_ADDRESS:
- get_param:
- - NetIpMap
- - str_replace:
- params:
- NETWORK:
- get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- template: NETWORK_uri
- - ':'
- - get_param: [EndpointMap, KeystoneInternal, port]
KeystoneInternal:
host:
str_replace:
diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml
index a1042ebb..7fb9420c 100644
--- a/network/service_net_map.j2.yaml
+++ b/network/service_net_map.j2.yaml
@@ -67,6 +67,7 @@ parameters:
HorizonNetwork: internal_api
MemcachedNetwork: internal_api
RabbitmqNetwork: internal_api
+ QdrNetwork: internal_api
RedisNetwork: internal_api
MysqlNetwork: internal_api
CephClusterNetwork: storage_mgmt
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index 2834f907..04e33442 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -5,15 +5,14 @@ resource_registry:
OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
+ OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
+ OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
# Tasks (for internal TripleO usage)
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
- OS::TripleO::Tasks::SwiftRingDeploy: extraconfig/tasks/swift-ring-deploy.yaml
- OS::TripleO::Tasks::SwiftRingUpdate: extraconfig/tasks/swift-ring-update.yaml
-
{% for role in roles %}
OS::TripleO::{{role.name}}::PreNetworkConfig: OS::Heat::None
OS::TripleO::{{role.name}}PostDeploySteps: puppet/post.yaml
@@ -43,6 +42,9 @@ resource_registry:
OS::TripleO::ServiceServerMetadataHook: OS::Heat::None
OS::TripleO::Server: OS::Nova::Server
+{% for role in roles %}
+ OS::TripleO::{{role.name}}Server: OS::TripleO::Server
+{% endfor %}
# This creates the "heat-admin" user for all OS images by default
# To disable, replace with firstboot/userdata_default.yaml
@@ -111,7 +113,6 @@ resource_registry:
# services
OS::TripleO::Services: puppet/services/services.yaml
OS::TripleO::Services::Apache: puppet/services/apache.yaml
- OS::TripleO::Services::ApacheTLS: OS::Heat::None
OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
OS::TripleO::Services::CephMds: OS::Heat::None
OS::TripleO::Services::CephMon: OS::Heat::None
@@ -135,8 +136,9 @@ resource_registry:
OS::TripleO::Services::HeatEngine: puppet/services/heat-engine.yaml
OS::TripleO::Services::Kernel: puppet/services/kernel.yaml
OS::TripleO::Services::MySQL: puppet/services/database/mysql.yaml
- OS::TripleO::Services::MySQLTLS: OS::Heat::None
+ OS::TripleO::Services::NeutronBgpVpnApi: OS::Heat::None
OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml
+ OS::TripleO::Services::NeutronL2gwApi: OS::Heat::None
OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
# FIXME(shardy) the duplicate NeutronServer line can be removed when we've updated
@@ -160,6 +162,7 @@ resource_registry:
OS::TripleO::Services::PacemakerRemote: OS::Heat::None
OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None
OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
+ OS::TripleO::Services::Qdr: OS::Heat::None
OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml
OS::TripleO::Services::HAProxyPublicTLS: OS::Heat::None
OS::TripleO::Services::HAProxyInternalTLS: OS::Heat::None
@@ -168,6 +171,7 @@ resource_registry:
OS::TripleO::Services::SaharaApi: OS::Heat::None
OS::TripleO::Services::SaharaEngine: OS::Heat::None
OS::TripleO::Services::Sshd: OS::Heat::None
+ OS::TripleO::Services::Securetty: OS::Heat::None
OS::TripleO::Services::Redis: puppet/services/database/redis.yaml
OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml
OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml
@@ -181,6 +185,7 @@ resource_registry:
OS::TripleO::Services::NovaLibvirt: puppet/services/nova-libvirt.yaml
OS::TripleO::Services::Ntp: puppet/services/time/ntp.yaml
OS::TripleO::Services::SwiftProxy: puppet/services/swift-proxy.yaml
+ OS::TripleO::Services::ExternalSwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: puppet/services/swift-storage.yaml
OS::TripleO::Services::SwiftRingBuilder: puppet/services/swift-ringbuilder.yaml
OS::TripleO::Services::Snmp: puppet/services/snmp.yaml
@@ -192,6 +197,7 @@ resource_registry:
OS::TripleO::Services::CeilometerAgentCentral: puppet/services/ceilometer-agent-central.yaml
OS::TripleO::Services::CeilometerAgentNotification: puppet/services/ceilometer-agent-notification.yaml
OS::TripleO::Services::ComputeCeilometerAgent: puppet/services/ceilometer-agent-compute.yaml
+ OS::TripleO::Services::CeilometerAgentIpmi: puppet/services/ceilometer-agent-ipmi.yaml
OS::TripleO::Services::Horizon: puppet/services/horizon.yaml
#Gnocchi services
OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml
@@ -230,6 +236,10 @@ resource_registry:
OS::TripleO::Services::Zaqar: OS::Heat::None
OS::TripleO::Services::NeutronML2FujitsuCfab: OS::Heat::None
OS::TripleO::Services::NeutronML2FujitsuFossw: OS::Heat::None
+ OS::TripleO::Services::CinderBackendDellPs: OS::Heat::None
+ OS::TripleO::Services::CinderBackendDellSc: OS::Heat::None
+ OS::TripleO::Services::CinderBackendNetApp: OS::Heat::None
+ OS::TripleO::Services::CinderBackendScaleIO: OS::Heat::None
OS::TripleO::Services::CinderHPELeftHandISCSI: OS::Heat::None
OS::TripleO::Services::Etcd: OS::Heat::None
OS::TripleO::Services::Ec2Api: OS::Heat::None
@@ -240,6 +250,8 @@ resource_registry:
OS::TripleO::Services::OctaviaWorker: OS::Heat::None
OS::TripleO::Services::MySQLClient: puppet/services/database/mysql-client.yaml
OS::TripleO::Services::Vpp: OS::Heat::None
+ OS::TripleO::Services::Docker: OS::Heat::None
+ OS::TripleO::Services::CertmongerUser: OS::Heat::None
parameter_defaults:
EnablePackageInstall: false
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index e99f770f..54092fa2 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -1,4 +1,12 @@
-{% set primary_role_name = roles[0].name -%}
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+ {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+ {%- set _ = primary_role.pop() -%}
+ {%- set _ = primary_role.append(role) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
heat_template_version: ocata
description: >
@@ -43,7 +51,9 @@ parameters:
type: string
ControlFixedIPs:
default: []
- description: Should be used for arbitrary ips.
+ description: >
+ Control the IP allocation for the ControlVirtualIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
type: json
InternalApiVirtualFixedIPs:
default: []
@@ -114,6 +124,11 @@ parameters:
description: What interface to add to the HypervisorNeutronPhysicalBridge.
type: string
+ NodeCreateBatchSize:
+ default: 30
+ description: Maxiumum batch size for creating nodes
+ type: number
+
# Jinja loop for Role in role_data.yaml
{% for role in roles %}
# Parameters generated for {{role.name}} Role
@@ -249,6 +264,16 @@ resources:
type: json
value: {get_attr: [EndpointMap, endpoint_map]}
+ SshKnownHostsConfig:
+ type: OS::TripleO::Ssh::KnownHostsConfig
+ properties:
+ known_hosts:
+ list_join:
+ - ''
+ {% for role in roles %}
+ - {get_attr: [{{role.name}}, known_hosts_entry]}
+ {% endfor %}
+
# Jinja loop for Role in roles_data.yaml
{% for role in roles %}
# Resources generated for {{role.name}} Role
@@ -280,6 +305,13 @@ resources:
config: {get_attr: [hostsConfig, config_id]}
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ {{role.name}}SshKnownHostsDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ name: {{role.name}}SshKnownHostsDeployment
+ config: {get_resource: SshKnownHostsConfig}
+ servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+
{{role.name}}AllNodesDeployment:
type: OS::Heat::StructuredDeployments
depends_on:
@@ -339,6 +371,9 @@ resources:
{{role.name}}:
type: OS::Heat::ResourceGroup
depends_on: Networks
+ update_policy:
+ batch_create:
+ max_batch_size: {get_param: NodeCreateBatchSize}
properties:
count: {get_param: {{role.name}}Count}
removal_policies: {get_param: {{role.name}}RemovalPolicies}
@@ -398,7 +433,7 @@ resources:
-
{% for role in roles %}
- list_join:
- - "\n"
+ - ""
- {get_attr: [{{role.name}}, hosts_entry]}
{% endfor %}
@@ -579,12 +614,24 @@ resources:
PingTestIps:
list_join:
- ' '
- - - {get_attr: [{{primary_role_name}}, resource.0.external_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.internal_api_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.storage_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.storage_mgmt_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.tenant_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.management_ip_address]}
+ - - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, external_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, internal_api_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, storage_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, storage_mgmt_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, tenant_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, management_ip_address]}
UpdateWorkflow:
type: OS::TripleO::Tasks::UpdateWorkflow
@@ -672,3 +719,9 @@ outputs:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
{% endfor %}
+ RoleNetIpMap:
+ description: Mapping of each network to a list of IPs for each role
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}IpListMap, net_ip_map]}
+{% endfor %}
diff --git a/plan-environment.yaml b/plan-environment.yaml
index f629eff3..1f9c8211 100644
--- a/plan-environment.yaml
+++ b/plan-environment.yaml
@@ -1,5 +1,8 @@
-version: 1.0
-
-template: overcloud.yaml
-environments:
-- path: overcloud-resource-registry-puppet.yaml
+version: 1.0
+
+name: overcloud
+description: >
+ Default Deployment plan
+template: overcloud.yaml
+environments:
+ - path: overcloud-resource-registry-puppet.yaml
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index 51f9abac..b9e5c6fe 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -126,7 +126,7 @@ parameters:
resources:
BlockStorage:
- type: OS::TripleO::Server
+ type: OS::TripleO::BlockStorageServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
@@ -457,6 +457,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: BlockStorageDeployment
+ properties:
+ server: {get_resource: BlockStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -504,6 +510,37 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [BlockStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the block storage server
value:
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index d7d7f478..075f42ba 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -132,7 +132,7 @@ parameters:
resources:
CephStorage:
- type: OS::TripleO::Server
+ type: OS::TripleO::CephStorageServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
@@ -468,6 +468,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: CephStorageDeployment
+ properties:
+ server: {get_resource: CephStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -515,6 +521,37 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [CephStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the ceph storage server
value:
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index ebdd762d..351b3823 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -145,7 +145,7 @@ parameters:
resources:
NovaCompute:
- type: OS::TripleO::Server
+ type: OS::TripleO::ComputeServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
@@ -492,6 +492,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: NovaComputeDeployment
+ properties:
+ server: {get_resource: NovaCompute}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -559,7 +565,38 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [NovaCompute, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
- {get_resource: NovaCompute}
+ {get_resource: NovaCompute} \ No newline at end of file
diff --git a/puppet/config.role.j2.yaml b/puppet/config.role.j2.yaml
index 7337d062..cdbc76f0 100644
--- a/puppet/config.role.j2.yaml
+++ b/puppet/config.role.j2.yaml
@@ -38,7 +38,7 @@ resources:
- ''
- list_join:
- ','
- - ['file,concat,file_line', {get_param: PuppetTags}]
+ - ['file,concat,file_line,augeas', {get_param: PuppetTags}]
outputs:
- name: result
inputs:
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index 2f4f583c..92eb70ad 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -165,7 +165,7 @@ parameter_groups:
resources:
Controller:
- type: OS::TripleO::Server
+ type: OS::TripleO::ControllerServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
@@ -467,7 +467,6 @@ resources:
- all_nodes # provided by allNodesConfig
- vip_data # provided by allNodesConfig
- '"%{::osfamily}"'
- - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
- neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
- neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
- cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
@@ -532,6 +531,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: ControllerDeployment
+ properties:
+ server: {get_resource: Controller}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -599,6 +604,37 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [Controller, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
index 3daf3fd3..b6d1239a 100644
--- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -53,41 +53,40 @@ resources:
NetworkMidoNetConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- midonet_data:
- mapped_data:
- enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController}
- enable_cassandra_on_controller: {get_param: EnableCassandraOnController}
- midonet_tunnelzone_name: {get_param: TunnelZoneName}
- midonet_tunnelzone_type: {get_param: TunnelZoneType}
- midonet_libvirt_qemu_data: |
- user = "root"
- group = "root"
- cgroup_device_acl = [
- "/dev/null", "/dev/full", "/dev/zero",
- "/dev/random", "/dev/urandom",
- "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
- "/dev/rtc","/dev/hpet", "/dev/vfio/vfio",
- "/dev/net/tun"
- ]
- tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort}
- tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort}
- tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort}
- tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift}
- tripleo::haproxy::midonet_api: true
- # Missed Neutron Puppet data
- neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver'
- neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver'
- neutron::plugins::midonet::midonet_api_port: 8081
- neutron::params::midonet_server_package: 'python-networking-midonet'
+ datafiles:
+ midonet_data:
+ mapped_data:
+ enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController}
+ enable_cassandra_on_controller: {get_param: EnableCassandraOnController}
+ midonet_tunnelzone_name: {get_param: TunnelZoneName}
+ midonet_tunnelzone_type: {get_param: TunnelZoneType}
+ midonet_libvirt_qemu_data: |
+ user = "root"
+ group = "root"
+ cgroup_device_acl = [
+ "/dev/null", "/dev/full", "/dev/zero",
+ "/dev/random", "/dev/urandom",
+ "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+ "/dev/rtc","/dev/hpet", "/dev/vfio/vfio",
+ "/dev/net/tun"
+ ]
+ tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort}
+ tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort}
+ tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort}
+ tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift}
+ tripleo::haproxy::midonet_api: true
+ # Missed Neutron Puppet data
+ neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver'
+ neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver'
+ neutron::plugins::midonet::midonet_api_port: 8081
+ neutron::params::midonet_server_package: 'python-networking-midonet'
- # Make sure the l3 agent does not run
- l3_agent_service: false
- neutron::agents::l3::manage_service: false
- neutron::agents::l3::enabled: false
+ # Make sure the l3 agent does not run
+ l3_agent_service: false
+ neutron::agents::l3::manage_service: false
+ neutron::agents::l3::enabled: false
NetworkMidonetDeploymentControllers:
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index 9b900bc4..b05fa636 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -101,31 +101,30 @@ resources:
NetworkCiscoConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- neutron_cisco_data:
- mapped_data:
- neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip}
- neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username}
- neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password}
- neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list}
- neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs}
- neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig}
- neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork}
- neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix}
- neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin}
- neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix}
- neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig}
- neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime}
- neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount}
- neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate}
- neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk}
- neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig}
- neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks}
- neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges}
- neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges}
+ datafiles:
+ neutron_cisco_data:
+ mapped_data:
+ neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip}
+ neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username}
+ neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password}
+ neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list}
+ neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs}
+ neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig}
+ neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork}
+ neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix}
+ neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin}
+ neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix}
+ neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig}
+ neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime}
+ neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount}
+ neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate}
+ neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk}
+ neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig}
+ neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks}
+ neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges}
+ neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges}
NetworkCiscoDeployment:
type: OS::Heat::StructuredDeployments
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
index 7fe2a842..e3f4cce6 100644
--- a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
@@ -20,14 +20,22 @@ resources:
NeutronBigswitchConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- neutron_bigswitch_data:
- mapped_data:
- neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
- neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+ datafiles:
+ neutron_bigswitch_data:
+ mapped_data:
+ neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+ neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+ # NOTE(aschultz): required for the puppet module but we don't
+ # actually want them defined on the compute nodes so we're
+ # relying on the puppet module's handling of <SERVICE DEFAULT>
+ # to just not set these but still accept that they were defined.
+ # This will should be fixed in puppet-neutron and removed here,
+ # but for backportability, we need to define something.
+ neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>'
+ neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>'
+
NeutronBigswitchDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
index 47c782c7..1d16e909 100644
--- a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
@@ -50,22 +50,21 @@ resources:
NovaNuageConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- nova_nuage_data:
- mapped_data:
- nuage::vrs::active_controller: {get_input: ActiveController}
- nuage::vrs::standby_controller: {get_input: StandbyController}
- nuage::metadataagent::metadata_port: {get_input: MetadataPort}
- nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort}
- nuage::metadataagent::metadata_secret: {get_input: SharedSecret}
- nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion}
- nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername}
- nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs}
- nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType}
- nuage::metadataagent::nova_region_name: {get_input: NovaRegionName}
+ datafiles:
+ nova_nuage_data:
+ mapped_data:
+ nuage::vrs::active_controller: {get_input: ActiveController}
+ nuage::vrs::standby_controller: {get_input: StandbyController}
+ nuage::metadataagent::metadata_port: {get_input: MetadataPort}
+ nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort}
+ nuage::metadataagent::metadata_secret: {get_input: SharedSecret}
+ nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion}
+ nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername}
+ nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs}
+ nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType}
+ nuage::metadataagent::nova_region_name: {get_input: NovaRegionName}
NovaNuageDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
deleted file mode 100644
index 763ae39a..00000000
--- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+++ /dev/null
@@ -1,158 +0,0 @@
-heat_template_version: ocata
-
-description: Configure hieradata for Cinder Netapp configuration
-
-parameters:
- server:
- description: ID of the controller node to apply this config to
- type: string
-
- # Config specific parameters, to be provided via parameter_defaults
- CinderEnableNetappBackend:
- type: boolean
- default: true
- CinderNetappBackendName:
- type: string
- default: 'tripleo_netapp'
- CinderNetappLogin:
- type: string
- CinderNetappPassword:
- type: string
- hidden: true
- CinderNetappServerHostname:
- type: string
- CinderNetappServerPort:
- type: string
- default: '80'
- CinderNetappSizeMultiplier:
- type: string
- default: '1.2'
- CinderNetappStorageFamily:
- type: string
- default: 'ontap_cluster'
- CinderNetappStorageProtocol:
- type: string
- default: 'nfs'
- CinderNetappTransportType:
- type: string
- default: 'http'
- CinderNetappVfiler:
- type: string
- default: ''
- CinderNetappVolumeList:
- type: string
- default: ''
- CinderNetappVserver:
- type: string
- default: ''
- CinderNetappPartnerBackendName:
- type: string
- default: ''
- CinderNetappNfsShares:
- type: string
- default: ''
- CinderNetappNfsSharesConfig:
- type: string
- default: '/etc/cinder/shares.conf'
- CinderNetappNfsMountOptions:
- type: string
- default: ''
- CinderNetappCopyOffloadToolPath:
- type: string
- default: ''
- CinderNetappControllerIps:
- type: string
- default: ''
- CinderNetappSaPassword:
- type: string
- default: ''
- hidden: true
- CinderNetappStoragePools:
- type: string
- default: ''
- CinderNetappHostType:
- type: string
- default: ''
- CinderNetappWebservicePath:
- type: string
- default: '/devmgr/v2'
- # DEPRECATED options for compatibility with older versions
- CinderNetappEseriesHostType:
- type: string
- default: 'linux_dm_mp'
-
-parameter_groups:
-- label: deprecated
- description: Do not use deprecated params, they will be removed.
- parameters:
- - CinderNetappEseriesHostType
-
-resources:
- CinderNetappConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- hiera:
- datafiles:
- cinder_netapp_data:
- mapped_data:
- tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend}
- cinder::backend::netapp::title: {get_input: NetappBackendName}
- cinder::backend::netapp::netapp_login: {get_input: NetappLogin}
- cinder::backend::netapp::netapp_password: {get_input: NetappPassword}
- cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname}
- cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort}
- cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier}
- cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily}
- cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol}
- cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType}
- cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler}
- cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList}
- cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver}
- cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName}
- cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares}
- cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig}
- cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions}
- cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath}
- cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps}
- cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword}
- cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools}
- cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType}
- cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath}
-
- CinderNetappDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- name: CinderNetappDeployment
- config: {get_resource: CinderNetappConfig}
- server: {get_param: server}
- input_values:
- EnableNetappBackend: {get_param: CinderEnableNetappBackend}
- NetappBackendName: {get_param: CinderNetappBackendName}
- NetappLogin: {get_param: CinderNetappLogin}
- NetappPassword: {get_param: CinderNetappPassword}
- NetappServerHostname: {get_param: CinderNetappServerHostname}
- NetappServerPort: {get_param: CinderNetappServerPort}
- NetappSizeMultiplier: {get_param: CinderNetappSizeMultiplier}
- NetappStorageFamily: {get_param: CinderNetappStorageFamily}
- NetappStorageProtocol: {get_param: CinderNetappStorageProtocol}
- NetappTransportType: {get_param: CinderNetappTransportType}
- NetappVfiler: {get_param: CinderNetappVfiler}
- NetappVolumeList: {get_param: CinderNetappVolumeList}
- NetappVserver: {get_param: CinderNetappVserver}
- NetappPartnerBackendName: {get_param: CinderNetappPartnerBackendName}
- NetappNfsShares: {get_param: CinderNetappNfsShares}
- NetappNfsSharesConfig: {get_param: CinderNetappNfsSharesConfig}
- NetappNfsMountOptions: {get_param: CinderNetappNfsMountOptions}
- NetappCopyOffloadToolPath: {get_param: CinderNetappCopyOffloadToolPath}
- NetappControllerIps: {get_param: CinderNetappControllerIps}
- NetappSaPassword: {get_param: CinderNetappSaPassword}
- NetappStoragePools: {get_param: CinderNetappStoragePools}
- NetappHostType: {get_param: CinderNetappHostType}
- NetappWebservicePath: {get_param: CinderNetappWebservicePath}
-
-outputs:
- deploy_stdout:
- description: Deployment reference, used to trigger puppet apply on changes
- value: {get_attr: [CinderNetappDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
index 0f4806db..e7d0b830 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
@@ -6,6 +6,14 @@ parameters:
server:
description: ID of the controller node to apply this config to
type: string
+ NeutronBigswitchAgentEnabled:
+ description: The state of the neutron-bsn-agent service.
+ type: boolean
+ default: true
+ NeutronBigswitchLLDPEnabled:
+ description: The state of the neutron-bsn-lldp service.
+ type: boolean
+ default: false
NeutronBigswitchRestproxyServers:
description: 'Big Switch controllers ("IP:port,IP:port")'
type: string
@@ -38,19 +46,20 @@ resources:
NeutronBigswitchConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- neutron_bigswitch_data:
- mapped_data:
- neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
- neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
- neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
- neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval}
- neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id}
- neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl}
- neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory}
+ datafiles:
+ neutron_bigswitch_data:
+ mapped_data:
+ neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+ neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+ neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
+ neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
+ neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
+ neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval}
+ neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id}
+ neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl}
+ neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory}
NeutronBigswitchDeployment:
type: OS::Heat::StructuredDeployment
@@ -59,6 +68,8 @@ resources:
config: {get_resource: NeutronBigswitchConfig}
server: {get_param: server}
input_values:
+ neutron_enable_bigswitch_agent: {get_param: NeutronBigswitchAgentEnabled}
+ neutron_enable_bigswitch_lldp: {get_param: NeutronBigswitchLLDPEnabled}
restproxy_servers: {get_param: NeutronBigswitchRestproxyServers}
restproxy_server_auth: {get_param: NeutronBigswitchRestproxyServerAuth }
restproxy_auto_sync_on_failure: {get_param: NeutronBigswitchRestproxyAutoSyncOnFailure}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
index 6eae812f..40b407bc 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
@@ -10,7 +10,7 @@ parameters:
# Config specific parameters, to be provided via parameter_defaults
N1000vVSMIP:
type: string
- default: '192.0.2.50'
+ default: '192.168.24.50'
N1000vVSMDomainID:
type: number
default: 100
@@ -62,7 +62,7 @@ parameters:
default: '255.255.255.0'
N1000vMgmtGatewayIP:
type: string
- default: '192.0.2.1'
+ default: '192.168.24.1'
N1000vPacemakerControl:
type: boolean
default: true
@@ -96,48 +96,47 @@ resources:
CiscoN1kvConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- cisco_n1kv_data:
- mapped_data:
- #enable_cisco_n1kv: {get_input: EnableCiscoN1kv}
- # VEM Parameters
- n1kv_vem_source: {get_input: n1kv_vem_source}
- n1kv_vem_version: {get_input: n1kv_vem_version}
- neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
- neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id}
- neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6}
- neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf}
- neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile}
- neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config}
- neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb}
- neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet}
- neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood}
- #VSM Parameter
- n1kv_vsm_source: {get_input: n1kv_vsm_source}
- n1kv_vsm_version: {get_input: n1kv_vsm_version}
- n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf}
- n1k_vsm::vsm_role: {get_input: n1kv_vsm_role}
- n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl}
- n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br}
- n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password}
- n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id}
- n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip}
- n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask}
- n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip}
- n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip}
- n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan}
- # Cisco N1KV driver Parameters
- neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
- neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username}
- neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password}
- neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration}
- neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size}
- neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout}
- neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval}
- neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries}
+ datafiles:
+ cisco_n1kv_data:
+ mapped_data:
+ #enable_cisco_n1kv: {get_input: EnableCiscoN1kv}
+ # VEM Parameters
+ n1kv_vem_source: {get_input: n1kv_vem_source}
+ n1kv_vem_version: {get_input: n1kv_vem_version}
+ neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
+ neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id}
+ neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6}
+ neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf}
+ neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile}
+ neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config}
+ neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb}
+ neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet}
+ neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood}
+ #VSM Parameter
+ n1kv_vsm_source: {get_input: n1kv_vsm_source}
+ n1kv_vsm_version: {get_input: n1kv_vsm_version}
+ n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf}
+ n1k_vsm::vsm_role: {get_input: n1kv_vsm_role}
+ n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl}
+ n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br}
+ n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password}
+ n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id}
+ n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip}
+ n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask}
+ n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip}
+ n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip}
+ n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan}
+ # Cisco N1KV driver Parameters
+ neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
+ neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username}
+ neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password}
+ neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration}
+ neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size}
+ neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout}
+ neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval}
+ neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries}
CiscoN1kvDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml
index 6f2dd684..28092773 100644
--- a/puppet/major_upgrade_steps.j2.yaml
+++ b/puppet/major_upgrade_steps.j2.yaml
@@ -32,20 +32,6 @@ parameters:
type: string
hidden: true
-conditions:
- # Conditions to disable any steps where the task list is empty
-{%- for role in roles %}
- {{role.name}}UpgradeBatchConfigEnabled:
- not:
- equals:
- - {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
- - []
- {{role.name}}UpgradeConfigEnabled:
- not:
- equals:
- - {get_param: [role_data, {{role.name}}, upgrade_tasks]}
- - []
-{%- endfor %}
resources:
@@ -65,18 +51,22 @@ resources:
- " crudini --set /etc/nova/nova.conf placement project_domain_name Default\n\n"
- " crudini --set /etc/nova/nova.conf placement user_domain_name Default\n\n"
- " crudini --set /etc/nova/nova.conf placement project_name service\n\n"
- - " systemctl restart openstack-nova-compute\n\n"
- - "fi\n\n"
+ - " crudini --set /etc/nova/nova.conf placement os_interface internal\n\n"
- str_replace:
template: |
crudini --set /etc/nova/nova.conf placement password 'SERVICE_PASSWORD'
- crudini --set /etc/nova/nova.conf placement region_name 'REGION_NAME'
+ crudini --set /etc/nova/nova.conf placement os_region_name 'REGION_NAME'
crudini --set /etc/nova/nova.conf placement auth_url 'AUTH_URL'
- ROLE='ROLE_NAME'
params:
SERVICE_PASSWORD: { get_param: NovaPassword }
REGION_NAME: { get_param: KeystoneRegion }
AUTH_URL: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ - " systemctl restart openstack-nova-compute\n\n"
+ - "fi\n\n"
+ - str_replace:
+ template: |
+ ROLE='ROLE_NAME'
+ params:
ROLE_NAME: {{role.name}}
- get_file: ../extraconfig/tasks/pacemaker_common_functions.sh
- get_file: ../extraconfig/tasks/run_puppet.sh
@@ -100,12 +90,11 @@ resources:
{{role.name}}UpgradeBatchConfig_Step{{step}}:
type: OS::TripleO::UpgradeConfig
{%- if step > 0 %}
- condition: {{role.name}}UpgradeBatchConfigEnabled
- {% if role.name in enabled_roles %}
+ {%- if role in enabled_roles %}
depends_on:
- {{role.name}}UpgradeBatch_Step{{step -1}}
{%- endif %}
- {% else %}
+ {%- else %}
{% for role in roles if role.disable_upgrade_deployment|default(false) %}
{% if deliver_script.update({'deliver': True}) %} {% endif %}
{% endfor %}
@@ -125,13 +114,11 @@ resources:
{%- for role in enabled_roles %}
{{role.name}}UpgradeBatch_Step{{step}}:
type: OS::Heat::SoftwareDeploymentGroup
- condition: {{role.name}}UpgradeBatchConfigEnabled
{%- if step > 0 %}
depends_on:
- - {{role.name}}UpgradeBatch_Step{{step -1}}
- {% else %}
- depends_on:
- - {{role.name}}UpgradeBatchConfig_Step{{step}}
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}UpgradeBatch_Step{{step -1}}
+ {%- endfor %}
{%- endif %}
update_policy:
batch_create:
@@ -185,11 +172,10 @@ resources:
# do, and there should be minimal performance hit (creating the
# config is cheap compared to the time to apply the deployment).
{%- if step > 0 %}
- condition: {{role.name}}UpgradeConfigEnabled
- {% if role.name in enabled_roles %}
+ {%- if role in enabled_roles %}
depends_on:
- {{role.name}}Upgrade_Step{{step -1}}
- {% endif %}
+ {%- endif %}
{%- endif %}
properties:
UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]}
@@ -201,9 +187,18 @@ resources:
{{role.name}}Upgrade_Step{{step}}:
type: OS::Heat::SoftwareDeploymentGroup
{%- if step > 0 %}
- condition: {{role.name}}UpgradeConfigEnabled
+ # Make sure we wait that all roles have finished their own
+ # previous step before going to the next, so we can guarantee
+ # state for each steps.
depends_on:
- - {{role.name}}Upgrade_Step{{step -1}}
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}Upgrade_Step{{step -1}}
+ {%- endfor %}
+ {%- else %}
+ depends_on:
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+ {%- endfor %}
{%- endif %}
properties:
name: {{role.name}}Upgrade_Step{{step}}
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index 6ee06d78..84b646a2 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -127,7 +127,7 @@ parameters:
resources:
SwiftStorage:
- type: OS::Nova::Server
+ type: OS::Nova::ObjectStorageServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
@@ -455,6 +455,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: SwiftStorageHieraDeploy
+ properties:
+ server: {get_resource: SwiftStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -502,6 +508,37 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [SwiftStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the swift storage server
value:
diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2
index 581c4f0d..782a32c9 100644
--- a/puppet/puppet-steps.j2
+++ b/puppet/puppet-steps.j2
@@ -30,19 +30,12 @@
input_values:
update_identifier: {get_param: DeployIdentifier}
- {% if role.name in ['Controller', 'ObjectStorage'] %}
- {{role.name}}SwiftRingDeploy:
- type: OS::TripleO::Tasks::SwiftRingDeploy
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- {% endif %}
-
# Step through a series of configuration steps
{% for step in range(1, 6) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
{% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ depends_on: [{{role.name}}PrePuppet, {{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
{% else %}
depends_on:
{% for dep in roles %}
@@ -88,15 +81,4 @@
servers: {get_param: [servers, {{role.name}}]}
input_values:
update_identifier: {get_param: DeployIdentifier}
-
- {% if role.name in ['Controller', 'ObjectStorage'] %}
- {{role.name}}SwiftRingUpdate:
- type: OS::TripleO::Tasks::SwiftRingUpdate
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step5
- {% endfor %}
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- {% endif %}
{% endfor %}
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 1f68f41f..960f0d58 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -148,7 +148,7 @@ parameters:
resources:
{{role}}:
- type: OS::TripleO::Server
+ type: OS::TripleO::{{role.name}}Server
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
@@ -483,12 +483,19 @@ resources:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
properties:
+ name: UpdateDeployment
config: {get_resource: UpdateConfig}
server: {get_resource: {{role}}}
input_values:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: {{role}}Deployment
+ properties:
+ server: {get_resource: {{role}}}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -536,6 +543,37 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [{{role}}, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for {{role}} server
value:
diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml
index d7c87b61..7cc6e4c6 100644
--- a/puppet/services/aodh-api.yaml
+++ b/puppet/services/aodh-api.yaml
@@ -24,6 +24,12 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ AodhApiPolicies:
+ description: |
+ A hash of policies to configure for Aodh API.
+ e.g. { aodh-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
AodhBase:
@@ -61,6 +67,7 @@ outputs:
aodh::wsgi::apache::wsgi_process_display_name: 'aodh_wsgi'
aodh::api::service_name: 'httpd'
aodh::api::enable_proxy_headers_parsing: true
+ aodh::policy::policies: {get_param: AodhApiPolicies}
tripleo.aodh_api.firewall_rules:
'128 aodh-api':
dport:
diff --git a/puppet/services/aodh-base.yaml b/puppet/services/aodh-base.yaml
index c2c2d023..48a2aecd 100644
--- a/puppet/services/aodh-base.yaml
+++ b/puppet/services/aodh-base.yaml
@@ -77,8 +77,10 @@ outputs:
aodh::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
aodh::rabbit_port: {get_param: RabbitClientPort}
aodh::keystone::authtoken::project_name: 'service'
+ aodh::keystone::authtoken::user_domain_name: 'Default'
+ aodh::keystone::authtoken::project_domain_name: 'Default'
aodh::keystone::authtoken::password: {get_param: AodhPassword}
- aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::auth::auth_password: {get_param: AodhPassword}
aodh::auth::auth_region: 'regionOne'
diff --git a/puppet/services/apache-internal-tls-certmonger.yaml b/puppet/services/apache-internal-tls-certmonger.yaml
deleted file mode 100644
index 4c94f440..00000000
--- a/puppet/services/apache-internal-tls-certmonger.yaml
+++ /dev/null
@@ -1,75 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Apache service TLS configurations.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- # The following parameters are not needed by the template but are
- # required to pass the pep8 tests
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- ApacheNetworks:
- type: OS::Heat::Value
- properties:
- value:
- # NOTE(jaosorior) Get unique network names to create
- # certificates for those. We skip the tenant network since
- # we don't need a certificate for that, and the external
- # network will be handled in another template.
- yaql:
- expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
- data:
- map:
- get_param: ServiceNetMap
-
-outputs:
- role_data:
- description: Role data for the Apache role.
- value:
- service_name: apache_internal_tls_certmonger
- config_settings:
- generate_service_certificates: true
- apache_certificates_specs:
- map_merge:
- repeat:
- template:
- httpd-NETWORK:
- service_certificate: '/etc/pki/tls/certs/httpd-NETWORK.crt'
- service_key: '/etc/pki/tls/private/httpd-NETWORK.key'
- hostname: "%{hiera('fqdn_NETWORK')}"
- principal: "HTTP/%{hiera('fqdn_NETWORK')}"
- for_each:
- NETWORK: {get_attr: [ApacheNetworks, value]}
- metadata_settings:
- repeat:
- template:
- - service: HTTP
- network: $NETWORK
- type: node
- for_each:
- $NETWORK: {get_attr: [ApacheNetworks, value]}
- upgrade_tasks:
- - name: Check if httpd is deployed
- command: systemctl is-enabled httpd
- tags: common
- ignore_errors: True
- register: httpd_enabled
- - name: "PreUpgrade step0,validation: Check service httpd is running"
- shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
- when: httpd_enabled.rc == 0
- tags: step0,validation
diff --git a/puppet/services/apache.yaml b/puppet/services/apache.yaml
index 2d950151..6e53b1f7 100644
--- a/puppet/services/apache.yaml
+++ b/puppet/services/apache.yaml
@@ -31,13 +31,25 @@ parameters:
type: boolean
default: false
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
- ApacheTLS:
- type: OS::TripleO::Services::ApacheTLS
+ ApacheNetworks:
+ type: OS::Heat::Value
properties:
- ServiceNetMap: {get_param: ServiceNetMap}
+ value:
+ # NOTE(jaosorior) Get unique network names to create
+ # certificates for those. We skip the tenant network since
+ # we don't need a certificate for that, and the external
+ # is for HAProxy so it isn't used for apache either.
+ yaql:
+ expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
+ data:
+ map:
+ get_param: ServiceNetMap
outputs:
role_data:
@@ -46,7 +58,6 @@ outputs:
service_name: apache
config_settings:
map_merge:
- - get_attr: [ApacheTLS, role_data, config_settings]
-
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
@@ -64,8 +75,33 @@ outputs:
apache::mod::prefork::serverlimit: { get_param: ApacheServerLimit }
apache::mod::remoteip::proxy_ips:
- "%{hiera('apache_remote_proxy_ips_network')}"
+ -
+ generate_service_certificates: true
+ tripleo::certmonger::apache_dirs::certificate_dir: '/etc/pki/tls/certs/httpd'
+ tripleo::certmonger::apache_dirs::key_dir: '/etc/pki/tls/private/httpd'
+ apache_certificates_specs:
+ map_merge:
+ repeat:
+ template:
+ httpd-NETWORK:
+ service_certificate: '/etc/pki/tls/certs/httpd/httpd-NETWORK.crt'
+ service_key: '/etc/pki/tls/private/httpd/httpd-NETWORK.key'
+ hostname: "%{hiera('fqdn_NETWORK')}"
+ principal: "HTTP/%{hiera('fqdn_NETWORK')}"
+ for_each:
+ NETWORK: {get_attr: [ApacheNetworks, value]}
metadata_settings:
- get_attr: [ApacheTLS, role_data, metadata_settings]
+ if:
+ - internal_tls_enabled
+ -
+ repeat:
+ template:
+ - service: HTTP
+ network: $NETWORK
+ type: node
+ for_each:
+ $NETWORK: {get_attr: [ApacheNetworks, value]}
+ - null
upgrade_tasks:
- name: Check if httpd is deployed
command: systemctl is-enabled httpd
diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml
index cba92415..91a5b01c 100644
--- a/puppet/services/barbican-api.yaml
+++ b/puppet/services/barbican-api.yaml
@@ -55,6 +55,12 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ BarbicanPolicies:
+ description: |
+ A hash of policies to configure for Barbican.
+ e.g. { barbican-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
@@ -74,9 +80,10 @@ outputs:
map_merge:
- get_attr: [ApacheServiceBase, role_data, config_settings]
- barbican::keystone::authtoken::password: {get_param: BarbicanPassword}
- barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
barbican::keystone::authtoken::project_name: 'service'
+ barbican::policy::policies: {get_param: BarbicanPolicies}
barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]}
barbican::api::db_auto_create: false
barbican::api::enabled_certificate_plugins: ['simple_certificate']
@@ -135,14 +142,14 @@ outputs:
nova::compute::barbican_endpoint:
get_param: [EndpointMap, BarbicanInternal, uri]
nova::compute::barbican_auth_endpoint:
- get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+ get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]
cinder_api:
cinder::api::keymgr_api_class: >
castellan.key_manager.barbican_key_manager.BarbicanKeyManager
cinder::api::keymgr_encryption_api_url:
get_param: [EndpointMap, BarbicanInternal, uri]
cinder::api::keymgr_encryption_auth_url:
- get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+ get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
diff --git a/puppet/services/ceilometer-agent-ipmi.yaml b/puppet/services/ceilometer-agent-ipmi.yaml
new file mode 100644
index 00000000..26647dfd
--- /dev/null
+++ b/puppet/services/ceilometer-agent-ipmi.yaml
@@ -0,0 +1,77 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Ceilometer Ipmi Agent service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ RedisPassword:
+ description: The password for the redis service account.
+ type: string
+ hidden: true
+ MonitoringSubscriptionCeilometerIpmi:
+ default: 'overcloud-ceilometer-agent-ipmi'
+ type: string
+ CeilometerAgentIpmiLoggingSource:
+ type: json
+ default:
+ tag: openstack.ceilometer.agent.ipmi
+ path: /var/log/ceilometer/ipmi.log
+
+resources:
+ CeilometerServiceBase:
+ type: ./ceilometer-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Agent Ipmi role.
+ value:
+ service_name: ceilometer_agent_ipmi
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerIpmi}
+ logging_source: {get_param: CeilometerAgentIpmiLoggingSource}
+ logging_groups:
+ - ceilometer
+ config_settings:
+ map_merge:
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer_redis_password: {get_param: RedisPassword}
+ ipmi_namespace: true
+ step_config: |
+ include ::tripleo::profile::base::ceilometer::agent::polling
+ upgrade_tasks:
+ - name: Check if ceilometer-agent-ipmi is deployed
+ command: systemctl is-enabled openstack-ceilometer-ipmi
+ tags: common
+ ignore_errors: True
+ register: ceilometer_ipmi_enabled
+ - name: "PreUpgrade step0,validation: Check if openstack-ceilometer-ipmi is running"
+ shell: >
+ /usr/bin/systemctl show 'openstack-ceilometer-ipmi' --property ActiveState |
+ grep '\bactive\b'
+ when: ceilometer_ipmi_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop openstack-ceilometer-ipmi service
+ tags: step1
+ when: ceilometer_ipmi_enabled.rc == 0
+ service: name=openstack-ceilometer-ipmi state=stopped
+ - name: Install openstack-ceilometer-ipmi package if it was disabled
+ tags: step3
+ yum: name=openstack-ceilometer-ipmi state=latest
+ when: ceilometer_ipmi_enabled.rc != 0
diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml
index f5ee9d40..ba94b451 100644
--- a/puppet/services/ceilometer-api.yaml
+++ b/puppet/services/ceilometer-api.yaml
@@ -29,6 +29,12 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ CeilometerApiPolicies:
+ description: |
+ A hash of policies to configure for Ceilometer API.
+ e.g. { ceilometer-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
CeilometerServiceBase:
@@ -78,6 +84,7 @@ outputs:
"%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
+ ceilometer::policy::policies: {get_param: CeilometerApiPolicies}
ceilometer::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
ceilometer::wsgi::apache::ssl: {get_param: EnableInternalTLS}
ceilometer::wsgi::apache::servername:
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
index 874c6893..e1613720 100644
--- a/puppet/services/ceilometer-base.yaml
+++ b/puppet/services/ceilometer-base.yaml
@@ -37,7 +37,7 @@ parameters:
constraints:
- allowed_values: ['gnocchi', 'database']
CeilometerEventDispatcher:
- default: ['gnocchi']
+ default: ['panko', 'gnocchi']
description: Comma-separated list of Dispatchers to process events data
type: comma_delimited_list
constraints:
@@ -76,6 +76,11 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ CeilometerApiEndpoint:
+ default: false
+ description: Whether to create or skip API endpoint. Set this to
+ false, if you choose to disable Ceilometer API service.
+ type: boolean
outputs:
role_data:
@@ -83,6 +88,7 @@ outputs:
value:
service_name: ceilometer_base
config_settings:
+ ceilometer_auth_enabled: true
ceilometer::debug: {get_param: Debug}
ceilometer::db::database_connection:
list_join:
@@ -98,14 +104,18 @@ outputs:
# we include db_sync class in puppet-tripleo
ceilometer::db::sync_db: false
ceilometer::keystone::authtoken::project_name: 'service'
+ ceilometer::keystone::authtoken::user_domain_name: 'Default'
+ ceilometer::keystone::authtoken::project_domain_name: 'Default'
ceilometer::keystone::authtoken::password: {get_param: CeilometerPassword}
- ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers}
ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
ceilometer::agent::auth::auth_tenant_name: 'service'
+ ceilometer::agent::auth::auth_user_domain_name: 'Default'
+ ceilometer::agent::auth::auth_project_domain_name: 'Default'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher}
@@ -129,6 +139,7 @@ outputs:
ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
ceilometer::keystone::auth::tenant: 'service'
+ ceilometer::keystone::auth::configure_endpoint: {get_param: CeilometerApiEndpoint}
mysql:
ceilometer::db::mysql::password: {get_param: CeilometerPassword}
ceilometer::db::mysql::user: ceilometer
diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml
index 01531971..49856115 100644
--- a/puppet/services/ceph-rgw.yaml
+++ b/puppet/services/ceph-rgw.yaml
@@ -73,7 +73,7 @@ outputs:
ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
- ceph::rgw::keystone::auth::roles: [ 'admin', 'member', '_member_' ]
+ ceph::rgw::keystone::auth::roles: [ 'admin', 'Member', '_member_' ]
ceph::rgw::keystone::auth::tenant: service
ceph::rgw::keystone::auth::user: swift
ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
diff --git a/puppet/services/certmonger-user.yaml b/puppet/services/certmonger-user.yaml
new file mode 100644
index 00000000..af9802b0
--- /dev/null
+++ b/puppet/services/certmonger-user.yaml
@@ -0,0 +1,28 @@
+heat_template_version: ocata
+
+description: >
+ Requests certificates using certmonger through Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the certmonger-user service
+ value:
+ service_name: certmonger_user
+ step_config: |
+ include ::tripleo::profile::base::certmonger_user
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index c0ea7aaa..c1e6b0b0 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -46,6 +46,12 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ CinderApiPolicies:
+ description: |
+ A hash of policies to configure for Cinder API.
+ e.g. { cinder-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
conditions:
cinder_workers_zero: {equals : [{get_param: CinderWorkers}, 0]}
@@ -80,10 +86,13 @@ outputs:
map_merge:
- get_attr: [CinderBase, role_data, config_settings]
- get_attr: [ApacheServiceBase, role_data, config_settings]
- - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
cinder::keystone::authtoken::password: {get_param: CinderPassword}
cinder::keystone::authtoken::project_name: 'service'
+ cinder::keystone::authtoken::user_domain_name: 'Default'
+ cinder::keystone::authtoken::project_domain_name: 'Default'
+ cinder::policy::policies: {get_param: CinderApiPolicies}
cinder::api::enable_proxy_headers_parsing: true
cinder::api::nova_catalog_info: 'compute:nova:internalURL'
@@ -161,13 +170,13 @@ outputs:
tags: step0,validation
- name: check for cinder running under apache (post upgrade)
tags: step1
- shell: "apachectl -t -D DUMP_VHOSTS | grep -q cinder"
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder"
register: cinder_apache
ignore_errors: true
- name: Stop cinder_api service (running under httpd)
tags: step1
service: name=httpd state=stopped
- when: "cinder_apache.rc == 0"
+ when: cinder_apache.rc == 0
- name: Stop and disable cinder_api service (pre-upgrade not under httpd)
tags: step1
when: cinder_api_enabled.rc == 0
diff --git a/puppet/services/cinder-backend-netapp.yaml b/puppet/services/cinder-backend-netapp.yaml
new file mode 100644
index 00000000..29a0ce1b
--- /dev/null
+++ b/puppet/services/cinder-backend-netapp.yaml
@@ -0,0 +1,129 @@
+heat_template_version: ocata
+
+description: Openstack Cinder Netapp backend
+
+parameters:
+ CinderEnableNetappBackend:
+ type: boolean
+ default: true
+ CinderNetappBackendName:
+ type: string
+ default: 'tripleo_netapp'
+ CinderNetappLogin:
+ type: string
+ CinderNetappPassword:
+ type: string
+ hidden: true
+ CinderNetappServerHostname:
+ type: string
+ CinderNetappServerPort:
+ type: string
+ default: '80'
+ CinderNetappSizeMultiplier:
+ type: string
+ default: '1.2'
+ CinderNetappStorageFamily:
+ type: string
+ default: 'ontap_cluster'
+ CinderNetappStorageProtocol:
+ type: string
+ default: 'nfs'
+ CinderNetappTransportType:
+ type: string
+ default: 'http'
+ CinderNetappVfiler:
+ type: string
+ default: ''
+ CinderNetappVolumeList:
+ type: string
+ default: ''
+ CinderNetappVserver:
+ type: string
+ default: ''
+ CinderNetappPartnerBackendName:
+ type: string
+ default: ''
+ CinderNetappNfsShares:
+ type: string
+ default: ''
+ CinderNetappNfsSharesConfig:
+ type: string
+ default: '/etc/cinder/shares.conf'
+ CinderNetappNfsMountOptions:
+ type: string
+ default: ''
+ CinderNetappCopyOffloadToolPath:
+ type: string
+ default: ''
+ CinderNetappControllerIps:
+ type: string
+ default: ''
+ CinderNetappSaPassword:
+ type: string
+ default: ''
+ hidden: true
+ CinderNetappStoragePools:
+ type: string
+ default: ''
+ CinderNetappHostType:
+ type: string
+ default: ''
+ CinderNetappWebservicePath:
+ type: string
+ default: '/devmgr/v2'
+ # DEPRECATED options for compatibility with older versions
+ CinderNetappEseriesHostType:
+ type: string
+ default: 'linux_dm_mp'
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - CinderNetappEseriesHostType
+
+outputs:
+ role_data:
+ description: Role data for the Cinder NetApp backend.
+ value:
+ service_name: cinder_backend_netapp
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_param: CinderEnableNetappBackend}
+ cinder::backend::netapp::title: {get_param: CinderNetappBackendName}
+ cinder::backend::netapp::netapp_login: {get_param: CinderNetappLogin}
+ cinder::backend::netapp::netapp_password: {get_param: CinderNetappPassword}
+ cinder::backend::netapp::netapp_server_hostname: {get_param: CinderNetappServerHostname}
+ cinder::backend::netapp::netapp_server_port: {get_param: CinderNetappServerPort}
+ cinder::backend::netapp::netapp_size_multiplier: {get_param: CinderNetappSizeMultiplier}
+ cinder::backend::netapp::netapp_storage_family: {get_param: CinderNetappStorageFamily}
+ cinder::backend::netapp::netapp_storage_protocol: {get_param: CinderNetappStorageProtocol}
+ cinder::backend::netapp::netapp_transport_type: {get_param: CinderNetappTransportType}
+ cinder::backend::netapp::netapp_vfiler: {get_param: CinderNetappVfiler}
+ cinder::backend::netapp::netapp_volume_list: {get_param: CinderNetappVolumeList}
+ cinder::backend::netapp::netapp_vserver: {get_param: CinderNetappVserver}
+ cinder::backend::netapp::netapp_partner_backend_name: {get_param: CinderNetappPartnerBackendName}
+ cinder::backend::netapp::nfs_shares: {get_param: CinderNetappNfsShares}
+ cinder::backend::netapp::nfs_shares_config: {get_param: CinderNetappNfsSharesConfig}
+ cinder::backend::netapp::nfs_mount_options: {get_param: CinderNetappNfsMountOptions}
+ cinder::backend::netapp::netapp_copyoffload_tool_path: {get_param: CinderNetappCopyOffloadToolPath}
+ cinder::backend::netapp::netapp_controller_ips: {get_param: CinderNetappControllerIps}
+ cinder::backend::netapp::netapp_sa_password: {get_param: CinderNetappSaPassword}
+ cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools}
+ cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType}
+ cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-backend-scaleio.yaml b/puppet/services/cinder-backend-scaleio.yaml
index eb709cd5..c4e4aa3d 100644
--- a/puppet/services/cinder-backend-scaleio.yaml
+++ b/puppet/services/cinder-backend-scaleio.yaml
@@ -106,6 +106,6 @@ outputs:
cinder::backend::scaleio::sio_round_volume_capacity: {get_param: CinderScaleIORoundVolumeCapacity}
cinder::backend::scaleio::sio_unmap_volume_before_deletion: {get_param: CinderScaleIOUnmapVolumeBeforeDeletion}
cinder::backend::scaleio::sio_max_over_subscription_ratio: {get_param: CinderScaleIOMaxOverSubscriptionRatio}
- cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOThinProvision}
+ cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOSanThinProvision}
step_config: |
include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml
index b52955ef..26f1a96f 100644
--- a/puppet/services/cinder-volume.yaml
+++ b/puppet/services/cinder-volume.yaml
@@ -94,11 +94,7 @@ outputs:
tripleo::profile::base::cinder::volume::cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend}
tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
- tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers:
- str_replace:
- template: SERVERS
- params:
- SERVERS: {get_param: CinderNfsServers}
+ tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
diff --git a/puppet/services/congress.yaml b/puppet/services/congress.yaml
index 8bc9f2e3..5f6b5657 100644
--- a/puppet/services/congress.yaml
+++ b/puppet/services/congress.yaml
@@ -47,6 +47,12 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ CongressPolicies:
+ description: |
+ A hash of policies to configure for Congress.
+ e.g. { congress-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
outputs:
role_data:
@@ -73,9 +79,12 @@ outputs:
congress::rabbit_port: {get_param: RabbitClientPort}
congress::server::bind_host: {get_param: [ServiceNetMap, CongressApiNetwork]}
+ congress::keystone::authtoken::password: {get_param: CongressPassword}
congress::keystone::authtoken::project_name: 'service'
- congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ congress::keystone::authtoken::user_domain_name: 'Default'
+ congress::keystone::authtoken::project_domain_name: 'Default'
+ congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
congress::db::mysql::password: {get_param: CongressPassword}
congress::db::mysql::user: congress
@@ -84,10 +93,12 @@ outputs:
congress::db::mysql::allowed_hosts:
- '%'
- {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ congress::policy::policies: {get_param: CongressPolicies}
service_config_settings:
keystone:
congress::keystone::auth::tenant: 'service'
+ congress::keystone::auth::region: {get_param: KeystoneRegion}
congress::keystone::auth::password: {get_param: CongressPassword}
congress::keystone::auth::public_url: {get_param: [EndpointMap, CongressPublic, uri]}
congress::keystone::auth::internal_url: {get_param: [EndpointMap, CongressInternal, uri]}
diff --git a/puppet/services/database/mongodb.yaml b/puppet/services/database/mongodb.yaml
index 63ec4446..50597216 100644
--- a/puppet/services/database/mongodb.yaml
+++ b/puppet/services/database/mongodb.yaml
@@ -19,6 +19,10 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MongodbMemoryLimit:
+ default: '20G'
+ description: Limit the amount of memory mongodb uses with systemd.
+ type: string
MongoDbLoggingSource:
type: json
description: Fluentd logging configuration for mongodb.
@@ -49,6 +53,7 @@ outputs:
map_merge:
- get_attr: [MongoDbBase, role_data, config_settings]
- tripleo::profile::base::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]}
+ tripleo::profile::base::database::mongodb::memory_limit: {get_param: MongodbMemoryLimit}
mongodb::server::service_manage: True
tripleo.mongodb.firewall_rules:
'101 mongodb_config':
diff --git a/puppet/services/database/mysql-internal-tls-certmonger.yaml b/puppet/services/database/mysql-internal-tls-certmonger.yaml
deleted file mode 100644
index 9f7eaf57..00000000
--- a/puppet/services/database/mysql-internal-tls-certmonger.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-heat_template_version: ocata
-
-description: >
- MySQL configurations for using TLS via certmonger.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- # The following parameters are not needed by the template but are
- # required to pass the pep8 tests
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-outputs:
- role_data:
- description: MySQL configurations for using TLS via certmonger.
- value:
- service_name: mysql_internal_tls_certmonger
- config_settings:
- generate_service_certificates: true
- tripleo::profile::base::database::mysql::certificate_specs:
- service_certificate: '/etc/pki/tls/certs/mysql.crt'
- service_key: '/etc/pki/tls/private/mysql.key'
- hostname:
- str_replace:
- template: "%{hiera('cloud_name_NETWORK')}"
- params:
- NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- principal:
- str_replace:
- template: "mysql/%{hiera('cloud_name_NETWORK')}"
- params:
- NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- metadata_settings:
- - service: mysql
- network: {get_param: [ServiceNetMap, MysqlNetwork]}
- type: vip
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
index 808f1353..da55da3c 100644
--- a/puppet/services/database/mysql.yaml
+++ b/puppet/services/database/mysql.yaml
@@ -23,6 +23,10 @@ parameters:
description: Configures MySQL max_connections config setting
type: number
default: 4096
+ MysqlIncreaseFileLimit:
+ description: Flag to increase MySQL open-files-limit to 16384
+ type: boolean
+ default: true
MysqlRootPassword:
type: string
hidden: true
@@ -38,13 +42,13 @@ parameters:
description: The password for the nova db account
type: string
hidden: true
+ EnableInternalTLS:
+ type: boolean
+ default: false
-resources:
+conditions:
- MySQLTLS:
- type: OS::TripleO::Services::MySQLTLS
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
outputs:
role_data:
@@ -53,7 +57,6 @@ outputs:
service_name: mysql
config_settings:
map_merge:
- - get_attr: [MySQLTLS, role_data, config_settings]
-
# The Galera package should work in cluster and
# non-cluster modes based on the config file.
@@ -96,10 +99,32 @@ outputs:
$NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
tripleo::profile::base::database::mysql::client_bind_address:
{get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::generate_dropin_file_limit:
+ {get_param: MysqlIncreaseFileLimit}
+ - generate_service_certificates: true
+ tripleo::profile::base::database::mysql::certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/mysql.crt'
+ service_key: '/etc/pki/tls/private/mysql.key'
+ hostname:
+ str_replace:
+ template: "%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ principal:
+ str_replace:
+ template: "mysql/%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
step_config: |
include ::tripleo::profile::base::database::mysql
metadata_settings:
- get_attr: [MySQLTLS, role_data, metadata_settings]
+ if:
+ - internal_tls_enabled
+ -
+ - service: mysql
+ network: {get_param: [ServiceNetMap, MysqlNetwork]}
+ type: vip
+ - null
upgrade_tasks:
- name: Check for galera root password
tags: step0
diff --git a/puppet/services/database/redis-base.yaml b/puppet/services/database/redis-base.yaml
index 2b7dd430..af89ffb1 100644
--- a/puppet/services/database/redis-base.yaml
+++ b/puppet/services/database/redis-base.yaml
@@ -42,3 +42,4 @@ outputs:
redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
+ redis::sentinel::sentinel_bind: {get_param: [ServiceNetMap, RedisNetwork]}
diff --git a/puppet/services/docker.yaml b/puppet/services/docker.yaml
new file mode 100644
index 00000000..e7da2383
--- /dev/null
+++ b/puppet/services/docker.yaml
@@ -0,0 +1,43 @@
+heat_template_version: ocata
+
+description: >
+ Configures docker on the host
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: tripleoupstream
+ type: string
+ DockerNamespaceIsRegistry:
+ type: boolean
+ default: false
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the docker service
+ value:
+ service_name: docker
+ config_settings:
+ tripleo::profile::base::docker::docker_namespace: {get_param: DockerNamespace}
+ tripleo::profile::base::docker::insecure_registry: {get_param: DockerNamespaceIsRegistry}
+ step_config: |
+ include ::tripleo::profile::base::docker
+ upgrade_tasks:
+ - name: Install docker packages on upgrade if missing
+ tags: step3
+ yum: name=docker state=latest
+
diff --git a/puppet/services/ec2-api.yaml b/puppet/services/ec2-api.yaml
index 70821396..d1adefe5 100644
--- a/puppet/services/ec2-api.yaml
+++ b/puppet/services/ec2-api.yaml
@@ -42,6 +42,12 @@ parameters:
default: 'false'
description: Set to true to enable package installation via Puppet
type: boolean
+ Ec2ApiPolicies:
+ description: |
+ A hash of policies to configure for EC2-API.
+ e.g. { ec2api-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
conditions:
@@ -67,18 +73,19 @@ outputs:
ec2api::keystone::authtoken::password: {get_param: Ec2ApiPassword}
ec2api::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
ec2api::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ec2api::policy::policies: {get_param: Ec2ApiPolicies}
ec2api::api::enabled: true
ec2api::package_manage: {get_param: EnablePackageInstall}
ec2api::api::ec2api_listen:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, Ec2ApiNetwork]}
ec2api::metadata::metadata_listen:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, Ec2ApiMetadataNetwork]}
ec2api::db::database_connection:
@@ -91,6 +98,11 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ec2_api'
- '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ ec2api::api::keystone_ec2_tokens_url:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, KeystoneV3Internal, uri]}
+ - '/ec2tokens'
-
if:
- nova_workers_zero
diff --git a/puppet/services/etcd.yaml b/puppet/services/etcd.yaml
index 7cdd8451..5db8bec0 100644
--- a/puppet/services/etcd.yaml
+++ b/puppet/services/etcd.yaml
@@ -19,9 +19,9 @@ parameters:
via parameter_defaults in the resource registry.
type: json
EtcdInitialClusterToken:
- default: 'etcd-tripleo'
description: Initial cluster token for the etcd cluster during bootstrap.
type: string
+ hidden: true
MonitoringSubscriptionEtcd:
default: 'overcloud-etcd'
type: string
diff --git a/puppet/services/external-swift-proxy.yaml b/puppet/services/external-swift-proxy.yaml
new file mode 100644
index 00000000..75f5b6a0
--- /dev/null
+++ b/puppet/services/external-swift-proxy.yaml
@@ -0,0 +1,70 @@
+heat_template_version: ocata
+
+description: >
+ External Swift Proxy endpoint configured with Puppet
+
+parameters:
+ ExternalPublicUrl:
+ description: Public endpoint url for the external swift proxy
+ type: string
+ ExternalInternalUrl:
+ description: Internal endpoint url for the external swift proxy
+ type: string
+ ExternalAdminUrl:
+ description: External endpoint url for the external swift proxy
+ type: string
+ ExternalSwiftUserTenant:
+ description: Tenant where swift user will be set as admin
+ type: string
+ default: 'service'
+ SwiftPassword:
+ description: The password for the swift service account, used by the swift proxy services.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+outputs:
+ role_data:
+ description: Role data for External Swift proxy.
+ value:
+ service_name: external_swift_proxy
+ config_settings:
+
+ step_config:
+
+ service_config_settings:
+ keystone:
+ swift::keystone::auth::public_url: {get_param: ExternalPublicUrl}
+ swift::keystone::auth::internal_url: {get_param: ExternalInternalUrl}
+ swift::keystone::auth::admin_url: {get_param: ExternalAdminUrl}
+ swift::keystone::auth::public_url_s3: ''
+ swift::keystone::auth::internal_url_s3: ''
+ swift::keystone::auth::admin_url_s3: ''
+ swift::keystone::auth::password: {get_param: SwiftPassword}
+ swift::keystone::auth::region: {get_param: KeystoneRegion}
+ swift::keystone::auth::tenant: {get_param: ExternalSwiftUserTenant}
+ swift::keystone::auth::configure_s3_endpoint: false
+ swift::keystone::auth::operator_roles:
+ - admin
+ - swiftoperator
+ - ResellerAdmin
+
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index ce389dc1..f61e6154 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -48,6 +48,74 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ CephClientUserName:
+ default: openstack
+ type: string
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ GlanceNotifierStrategy:
+ description: Strategy to use for Glance notification queue
+ type: string
+ default: noop
+ GlanceLogFile:
+ description: The filepath of the file to use for logging messages from Glance.
+ type: string
+ default: ''
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
+ GlanceNfsEnabled:
+ default: false
+ description: >
+ When using GlanceBackend 'file', mount NFS share for image storage.
+ type: boolean
+ GlanceNfsShare:
+ default: ''
+ description: >
+ NFS share to mount for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceNfsOptions:
+ default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
+ description: >
+ NFS mount options for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceRbdPoolName:
+ default: images
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ GlanceApiPolicies:
+ description: |
+ A hash of policies to configure for Glance API.
+ e.g. { glance-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
conditions:
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
@@ -62,13 +130,6 @@ resources:
EndpointMap: {get_param: EndpointMap}
EnableInternalTLS: {get_param: EnableInternalTLS}
- GlanceBase:
- type: ./glance-base.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
outputs:
role_data:
description: Role data for the Glance API role.
@@ -80,7 +141,6 @@ outputs:
- glance
config_settings:
map_merge:
- - get_attr: [GlanceBase, role_data, config_settings]
- get_attr: [TLSProxyBase, role_data, config_settings]
- glance::api::database_connection:
list_join:
@@ -101,6 +161,7 @@ outputs:
glance::api::enable_proxy_headers_parsing: true
glance::api::debug: {get_param: Debug}
glance::api::workers: {get_param: GlanceWorkers}
+ glance::policy::policies: {get_param: GlanceApiPolicies}
tripleo.glance_api.firewall_rules:
'112 glance_api':
dport:
@@ -132,10 +193,41 @@ outputs:
- use_tls_proxy
- 'localhost'
- {get_param: [ServiceNetMap, GlanceApiNetwork]}
+ glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
+ glance_log_file: {get_param: GlanceLogFile}
+ glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ glance::backend::swift::swift_store_user: service:glance
+ glance::backend::swift::swift_store_key: {get_param: GlancePassword}
+ glance::backend::swift::swift_store_create_container_on_put: true
+ glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+ glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
+ glance_backend: {get_param: GlanceBackend}
+ glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
+ glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
+ glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
+ glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ glance::notify::rabbitmq::notification_driver: messagingv2
+ tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
+ tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
+ tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
+ service_config_settings:
+ keystone:
+ glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
+ glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
+ glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
+ glance::keystone::auth::password: {get_param: GlancePassword }
+ glance::keystone::auth::region: {get_param: KeystoneRegion}
+ glance::keystone::auth::tenant: 'service'
+ mysql:
+ glance::db::mysql::password: {get_param: GlancePassword}
+ glance::db::mysql::user: glance
+ glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ glance::db::mysql::dbname: glance
+ glance::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
step_config: |
include ::tripleo::profile::base::glance::api
- service_config_settings:
- get_attr: [GlanceBase, role_data, service_config_settings]
upgrade_tasks:
- name: Check if glance_api is deployed
command: systemctl is-enabled openstack-glance-api
diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml
deleted file mode 100644
index f5548982..00000000
--- a/puppet/services/glance-base.yaml
+++ /dev/null
@@ -1,126 +0,0 @@
-heat_template_version: ocata
-
-description: >
- OpenStack Glance Common settings with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- CephClientUserName:
- default: openstack
- type: string
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- GlanceNotifierStrategy:
- description: Strategy to use for Glance notification queue
- type: string
- default: noop
- GlanceLogFile:
- description: The filepath of the file to use for logging messages from Glance.
- type: string
- default: ''
- GlancePassword:
- description: The password for the glance service and db account, used by the glance services.
- type: string
- hidden: true
- GlanceBackend:
- default: swift
- description: The short name of the Glance backend to use. Should be one
- of swift, rbd, or file
- type: string
- constraints:
- - allowed_values: ['swift', 'file', 'rbd']
- GlanceNfsEnabled:
- default: false
- description: >
- When using GlanceBackend 'file', mount NFS share for image storage.
- type: boolean
- GlanceNfsShare:
- default: ''
- description: >
- NFS share to mount for image storage (when GlanceNfsEnabled is true)
- type: string
- GlanceNfsOptions:
- default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
- description: >
- NFS mount options for image storage (when GlanceNfsEnabled is true)
- type: string
- GlanceRbdPoolName:
- default: images
- type: string
- RabbitPassword:
- description: The password for RabbitMQ
- type: string
- hidden: true
- RabbitUserName:
- default: guest
- description: The username for RabbitMQ
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
- type: string
- KeystoneRegion:
- type: string
- default: 'regionOne'
- description: Keystone region for endpoint
-
-outputs:
- role_data:
- description: Role data for the Glance common role.
- value:
- service_name: glance_base
- config_settings:
- glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
- glance_log_file: {get_param: GlanceLogFile}
- glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
- glance::backend::swift::swift_store_user: service:glance
- glance::backend::swift::swift_store_key: {get_param: GlancePassword}
- glance::backend::swift::swift_store_create_container_on_put: true
- glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
- glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
- glance_backend: {get_param: GlanceBackend}
- glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
- glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
- glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
- glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- glance::notify::rabbitmq::notification_driver: messagingv2
- tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
- tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
- tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
- service_config_settings:
- keystone:
- glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
- glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
- glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
- glance::keystone::auth::password: {get_param: GlancePassword }
- glance::keystone::auth::region: {get_param: KeystoneRegion}
- glance::keystone::auth::tenant: 'service'
- mysql:
- glance::db::mysql::password: {get_param: GlancePassword}
- glance::db::mysql::user: glance
- glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- glance::db::mysql::dbname: glance
- glance::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml
index 08a939a6..cd323703 100644
--- a/puppet/services/gnocchi-api.yaml
+++ b/puppet/services/gnocchi-api.yaml
@@ -44,6 +44,12 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ GnocchiApiPolicies:
+ description: |
+ A hash of policies to configure for Gnocchi API.
+ e.g. { gnocchi-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
@@ -83,10 +89,13 @@ outputs:
gnocchi::api::enabled: true
gnocchi::api::enable_proxy_headers_parsing: true
gnocchi::api::service_name: 'httpd'
- gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ gnocchi::policy::policies: {get_param: GnocchiApiPolicies}
+ gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
gnocchi::keystone::authtoken::project_name: 'service'
+ gnocchi::keystone::authtoken::user_domain_name: 'Default'
+ gnocchi::keystone::authtoken::project_domain_name: 'Default'
gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS}
gnocchi::wsgi::apache::servername:
str_replace:
@@ -103,10 +112,6 @@ outputs:
# internal_api_subnet - > IP/CIDR
gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
gnocchi::wsgi::apache::wsgi_process_display_name: 'gnocchi_wsgi'
-
- gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri]}
step_config: |
include ::tripleo::profile::base::gnocchi::api
service_config_settings:
diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml
index c6310056..dc6daece 100644
--- a/puppet/services/gnocchi-base.yaml
+++ b/puppet/services/gnocchi-base.yaml
@@ -32,10 +32,6 @@ parameters:
CephClientUserName:
default: openstack
type: string
- KeystoneRegion:
- type: string
- default: 'regionOne'
- description: Keystone region for endpoint
RedisPassword:
description: The password for the redis service account.
type: string
@@ -70,8 +66,9 @@ outputs:
- '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
gnocchi::db::sync::extra_opts: '--skip-storage'
gnocchi::storage::swift::swift_user: 'service:gnocchi'
- gnocchi::storage::swift::swift_auth_version: 2
+ gnocchi::storage::swift::swift_auth_version: 3
gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword}
+ gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneV3Internal, uri]}
gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName}
gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName}
gnocchi::storage::ceph::ceph_keyring:
diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml
index 483f0a45..c4d44853 100644
--- a/puppet/services/heat-api-cfn.yaml
+++ b/puppet/services/heat-api-cfn.yaml
@@ -38,8 +38,23 @@ parameters:
default:
tag: openstack.heat.api.cfn
path: /var/log/heat/heat-api-cfn.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
@@ -59,19 +74,32 @@ outputs:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api_cfn::workers: {get_param: HeatWorkers}
- tripleo.heat_api_cfn.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api_cfn.firewall_rules:
'125 heat_cfn':
dport:
- 8000
- 13800
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ heat::wsgi::apache_api_cfn::ssl: {get_param: EnableInternalTLS}
+ heat::api_cfn::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ heat::wsgi::apache_api_cfn::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api_cfn::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api_cfn
service_config_settings:
@@ -94,7 +122,16 @@ outputs:
shell: /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b'
when: heat_api_cfn_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api_cfn service
+ - name: check for heat_api_cfn running under apache (post upgrade)
tags: step1
- when: heat_api_cfn_enabled.rc == 0
- service: name=openstack-heat-api-cfn state=stopped
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cfn_wsgi"
+ register: heat_api_cfn_apache
+ ignore_errors: true
+ - name: Stop heat_api_cfn service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_cfn_apache.rc == 0
+ - name: Stop and disable heat_api_cfn service (pre-upgrade not under httpd)
+ tags: step1
+ when: heat_api_cfn_apache.rc == 0
+ service: name=openstack-heat-api-cfn state=stopped enabled=no
diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml
index 8879bcb2..7f8fa1fe 100644
--- a/puppet/services/heat-api-cloudwatch.yaml
+++ b/puppet/services/heat-api-cloudwatch.yaml
@@ -30,8 +30,23 @@ parameters:
default:
tag: openstack.heat.api.cloudwatch
path: /var/log/heat/heat-api-cloudwatch.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
@@ -51,19 +66,34 @@ outputs:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api_cloudwatch::workers: {get_param: HeatWorkers}
- tripleo.heat_api_cloudwatch.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api_cloudwatch.firewall_rules:
'125 heat_cloudwatch':
dport:
- 8003
- 13003
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api_cloudwatch::bind_host:
+ get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]
+ heat::wsgi::apache_api_cloudwatch::ssl: {get_param: EnableInternalTLS}
+ heat::api_cloudwatch::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api_cloudwatch::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api_cloudwatch::bind_host:
+ get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]
+ heat::wsgi::apache_api_cloudwatch::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api_cloudwatch::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api_cloudwatch
upgrade_tasks:
@@ -76,7 +106,16 @@ outputs:
shell: /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b'
when: heat_api_cloudwatch_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api_cloudwatch service
+ - name: check for heat_api_cloudwatch running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cloudwatch_wsgi"
+ register: heat_api_cloudwatch_apache
+ ignore_errors: true
+ - name: Stop heat_api_cloudwatch service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_cloudwatch_apache.rc == 0
+ - name: Stop and disable heat_api_cloudwatch service (pre-upgrade not under httpd)
tags: step1
when: heat_api_cloudwatch_enabled.rc == 0
- service: name=openstack-heat-api-cloudwatch state=stopped
+ service: name=openstack-heat-api-cloudwatch state=stopped enabled=no
diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml
index 2464011b..f8128bb8 100644
--- a/puppet/services/heat-api.yaml
+++ b/puppet/services/heat-api.yaml
@@ -38,8 +38,29 @@ parameters:
default:
tag: openstack.heat.api
path: /var/log/heat/heat-api.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ HeatApiPolicies:
+ description: |
+ A hash of policies to configure for Heat API.
+ e.g. { heat-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
@@ -59,19 +80,33 @@ outputs:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api::workers: {get_param: HeatWorkers}
- tripleo.heat_api.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api.firewall_rules:
'125 heat_api':
dport:
- 8004
- 13004
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+ heat::policy::policies: {get_param: HeatApiPolicies}
+ heat::api::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api
service_config_settings:
@@ -94,7 +129,16 @@ outputs:
shell: /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b'
when: heat_api_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api service
+ - name: check for heat_api running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_wsgi"
+ register: heat_api_apache
+ ignore_errors: true
+ - name: Stop heat_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_apache.rc == 0
+ - name: Stop and disable heat_api service (pre-upgrade not under httpd)
tags: step1
when: heat_api_enabled.rc == 0
- service: name=openstack-heat-api state=stopped
+ service: name=openstack-heat-api state=stopped enabled=no
diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml
index e83a9edd..6ada9c25 100644
--- a/puppet/services/heat-base.yaml
+++ b/puppet/services/heat-base.yaml
@@ -125,7 +125,9 @@ outputs:
value: 'role:admin'
heat::rabbit_heartbeat_timeout_threshold: 60
heat::keystone::authtoken::project_name: 'service'
- heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ heat::keystone::authtoken::user_domain_name: 'Default'
+ heat::keystone::authtoken::project_domain_name: 'Default'
+ heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
heat::keystone::authtoken::password: {get_param: HeatPassword}
heat::keystone::domain::domain_name: 'heat_stack'
diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml
index a166f3a7..98dac4c9 100644
--- a/puppet/services/heat-engine.yaml
+++ b/puppet/services/heat-engine.yaml
@@ -112,7 +112,11 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/heat'
- '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
- heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
+ heat::keystone_ec2_uri:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, KeystoneV3Internal, uri]}
+ - '/ec2tokens'
heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
heat::engine::auth_encryption_key:
yaql:
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index 60b009a8..8fb13c16 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -40,6 +40,10 @@ parameters:
type: string
hidden: true
default: ''
+ HorizonSecureCookies:
+ description: Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon
+ type: boolean
+ default: true
MemcachedIPv6:
default: false
description: Enable IPv6 features in Memcached.
@@ -78,7 +82,7 @@ outputs:
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
options: ['FollowSymLinks','MultiViews']
horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
- horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
horizon::password_validator: {get_param: [HorizonPasswordValidator]}
horizon::password_validator_help: {get_param: [HorizonPasswordValidatorHelp]}
horizon::secret_key:
@@ -88,6 +92,7 @@ outputs:
passwords:
- {get_param: HorizonSecret}
- {get_param: [DefaultPasswords, horizon_secret]}
+ horizon::secure_cookies: {get_param: [HorizonSecureCookies]}
memcached_ipv6: {get_param: MemcachedIPv6}
-
if:
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
index 7aab6f8d..1f18cb1b 100644
--- a/puppet/services/ironic-api.yaml
+++ b/puppet/services/ironic-api.yaml
@@ -29,6 +29,12 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ IronicApiPolicies:
+ description: |
+ A hash of policies to configure for Ironic API.
+ e.g. { ironic-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
IronicBase:
@@ -49,8 +55,10 @@ outputs:
- get_attr: [IronicBase, role_data, config_settings]
- ironic::api::authtoken::password: {get_param: IronicPassword}
ironic::api::authtoken::project_name: 'service'
+ ironic::api::authtoken::user_domain_name: 'Default'
+ ironic::api::authtoken::project_domain_name: 'Default'
ironic::api::authtoken::username: 'ironic'
- ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
@@ -62,6 +70,7 @@ outputs:
ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
# This is used to build links in responses
ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+ ironic::policy::policies: {get_param: IronicApiPolicies}
tripleo.ironic_api.firewall_rules:
'133 ironic api':
dport:
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index f9547bef..be910d10 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -32,6 +32,15 @@ parameters:
created yet) and should be changed to an actual UUID in
a post-deployment stack update.
type: string
+ IronicDefaultNetworkInterface:
+ default: 'flat'
+ description: Network interface implementation to use by default.
+ Set to "flat" (the default) to use one flat provider network.
+ Set to "neutron" to make Ironic interact with the Neutron
+ ML2 driver to enable other network types and certain
+ advances networking features. Requires
+ IronicProvisioningNetwork to be correctly set.
+ type: string
IronicEnabledDrivers:
default: ['pxe_ipmitool', 'pxe_drac', 'pxe_ilo']
description: Enabled Ironic drivers
@@ -44,6 +53,19 @@ parameters:
default: 8088
description: Port to use for serving images when iPXE is used.
type: string
+ IronicPassword:
+ description: The password for the Ironic service and db account, used by the Ironic services
+ type: string
+ hidden: true
+ IronicProvisioningNetwork:
+ default: 'provisioning'
+ description: Name or UUID of the *overcloud* network used for provisioning
+ of bare metal nodes, if IronicDefaultNetworkInterface is
+ set to "neutron". The default value of "provisioning" can be
+ left during the initial deployment (when no networks are
+ created yet) and should be changed to an actual UUID in
+ a post-deployment stack update.
+ type: string
MonitoringSubscriptionIronicConductor:
default: 'overcloud-ironic-conductor'
type: string
@@ -65,11 +87,10 @@ outputs:
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
- # FIXME: I have no idea why neutron_url is in "api" manifest
- - ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
- ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
+ - ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase}
ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork}
+ ironic::conductor::provisioning_network: {get_param: IronicProvisioningNetwork}
ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers}
# We need an endpoint containing a real IP, not a VIP here
ironic_conductor_http_host: {get_param: [ServiceNetMap, IronicNetwork]}
@@ -91,6 +112,8 @@ outputs:
# NOTE(dtantsur): UEFI only works with iPXE currently for us
ironic::drivers::pxe::uefi_pxe_config_template: '$pybasedir/drivers/modules/ipxe_config.template'
ironic::drivers::pxe::uefi_pxe_bootfile_name: 'ipxe.efi'
+ ironic::drivers::interfaces::enabled_network_interfaces: ['flat', 'neutron']
+ ironic::drivers::interfaces::default_network_interface: {get_param: IronicDefaultNetworkInterface}
tripleo.ironic_conductor.firewall_rules:
'134 ironic conductor TFTP':
dport: 69
@@ -104,7 +127,40 @@ outputs:
# the VIP, but rather a real IP of the host.
ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
-
+ # Credentials to access other services
+ ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::glance::username: 'ironic'
+ ironic::glance::password: {get_param: IronicPassword}
+ ironic::glance::project_name: 'service'
+ ironic::glance::user_domain_name: 'Default'
+ ironic::glance::project_domain_name: 'Default'
+ ironic::neutron::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::neutron::username: 'ironic'
+ ironic::neutron::password: {get_param: IronicPassword}
+ ironic::neutron::project_name: 'service'
+ ironic::neutron::user_domain_name: 'Default'
+ ironic::neutron::project_domain_name: 'Default'
+ ironic::service_catalog::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::service_catalog::username: 'ironic'
+ ironic::service_catalog::password: {get_param: IronicPassword}
+ ironic::service_catalog::project_name: 'service'
+ ironic::service_catalog::user_domain_name: 'Default'
+ ironic::service_catalog::project_domain_name: 'Default'
+ ironic::swift::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::swift::username: 'ironic'
+ ironic::swift::password: {get_param: IronicPassword}
+ ironic::swift::project_name: 'service'
+ ironic::swift::user_domain_name: 'Default'
+ ironic::swift::project_domain_name: 'Default'
+ # ironic-inspector support is not implemented, but let's configure
+ # the credentials for consistency.
+ ironic::drivers::inspector::enabled: false
+ ironic::drivers::inspector::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::drivers::inspector::username: 'ironic'
+ ironic::drivers::inspector::password: {get_param: IronicPassword}
+ ironic::drivers::inspector::project_name: 'service'
+ ironic::drivers::inspector::user_domain_name: 'Default'
+ ironic::drivers::inspector::project_domain_name: 'Default'
step_config: |
include ::tripleo::profile::base::ironic::conductor
upgrade_tasks:
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
index fec455d1..2a335b67 100644
--- a/puppet/services/kernel.yaml
+++ b/puppet/services/kernel.yaml
@@ -22,6 +22,10 @@ parameters:
default: 1048576
description: Configures sysctl kernel.pid_max key
type: number
+ KernelDisableIPv6:
+ default: 0
+ description: Configures sysctl net.ipv6.{default/all}.disable_ipv6 keys
+ type: number
outputs:
role_data:
@@ -31,7 +35,7 @@ outputs:
config_settings:
kernel_modules:
nf_conntrack: {}
- ip_conntrack_proto_sctp: {}
+ nf_conntrack_proto_sctp: {}
sysctl_settings:
net.ipv4.tcp_keepalive_intvl:
value: 1
@@ -39,10 +43,28 @@ outputs:
value: 5
net.ipv4.tcp_keepalive_time:
value: 5
+ net.ipv4.conf.default.send_redirects:
+ value: 0
+ net.ipv4.conf.all.send_redirects:
+ value: 0
+ net.ipv4.conf.default.accept_redirects:
+ value: 0
+ net.ipv4.conf.default.secure_redirects:
+ value: 0
+ net.ipv4.conf.all.secure_redirects:
+ value: 0
+ net.ipv4.conf.default.log_martians:
+ value: 1
+ net.ipv4.conf.all.log_martians:
+ value: 1
net.nf_conntrack_max:
value: 500000
net.netfilter.nf_conntrack_max:
value: 500000
+ net.ipv6.conf.default.disable_ipv6:
+ value: {get_param: KernelDisableIPv6}
+ net.ipv6.conf.all.disable_ipv6:
+ value: {get_param: KernelDisableIPv6}
# prevent neutron bridges from autoconfiguring ipv6 addresses
net.ipv6.conf.all.accept_ra:
value: 0
@@ -52,9 +74,17 @@ outputs:
value: 0
net.ipv6.conf.default.autoconf:
value: 0
+ net.ipv6.conf.default.accept_redirects:
+ value: 0
+ net.ipv6.conf.all.accept_redirects:
+ value: 0
net.core.netdev_max_backlog:
value: 10000
kernel.pid_max:
value: {get_param: KernelPidMax}
+ kernel.dmesg_restrict:
+ value: 1
+ fs.suid_dumpable:
+ value: 0
step_config: |
include ::tripleo::profile::base::kernel
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index f40c8d99..8a0e750d 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -35,7 +35,7 @@ parameters:
KeystoneTokenProvider:
description: The keystone token format
type: string
- default: 'uuid'
+ default: 'fernet'
constraints:
- allowed_values: ['uuid', 'fernet']
ServiceNetMap:
@@ -119,27 +119,27 @@ parameters:
Cron to purge expired tokens - Ensure
default: 'present'
KeystoneCronTokenFlushMinute:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Minute
default: '1'
KeystoneCronTokenFlushHour:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Hour
- default: '0'
+ default: '*'
KeystoneCronTokenFlushMonthday:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Month Day
default: '*'
KeystoneCronTokenFlushMonth:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Month
default: '*'
KeystoneCronTokenFlushWeekday:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Week Day
default: '*'
@@ -158,6 +158,22 @@ parameters:
description: >
Cron to purge expired tokens - User
default: 'keystone'
+ KeystonePolicies:
+ description: |
+ A hash of policies to configure for Keystone.
+ e.g. { keystone-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
+ KeystoneLDAPDomainEnable:
+ description: Trigger to call ldap_backend puppet keystone define.
+ type: boolean
+ default: False
+ KeystoneLDAPBackendConfigs:
+ description: Hash containing the configurations for the LDAP backends
+ configured in keystone.
+ type: json
+ default: {}
+ hidden: true
resources:
@@ -171,6 +187,7 @@ resources:
conditions:
keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]}
+ keystone_ldap_domain_enabled: {equals: [{get_param: KeystoneLDAPDomainEnable}, True]}
outputs:
role_data:
@@ -197,6 +214,7 @@ outputs:
keystone::admin_token: {get_param: AdminToken}
keystone::admin_password: {get_param: AdminPassword}
keystone::roles::admin::password: {get_param: AdminPassword}
+ keystone::policy::policies: {get_param: KeystonePolicies}
keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
keystone::token_provider: {get_param: KeystoneTokenProvider}
@@ -232,7 +250,7 @@ outputs:
keystone::cron::token_flush::maxdelay: 3600
keystone::roles::admin::service_tenant: 'service'
keystone::roles::admin::admin_tenant: 'admin'
- keystone::cron::token_flush::destination: '/dev/null'
+ keystone::cron::token_flush::destination: '/var/log/keystone/keystone-tokenflush.log'
keystone::config::keystone_config:
ec2/driver:
value: 'keystone.contrib.ec2.backends.sql.Ec2'
@@ -293,6 +311,15 @@ outputs:
keystone::cron::token_flush::maxdelay: {get_param: KeystoneCronTokenFlushMaxDelay}
keystone::cron::token_flush::destination: {get_param: KeystoneCronTokenFlushDestination}
keystone::cron::token_flush::user: {get_param: KeystoneCronTokenFlushUser}
+ -
+ if:
+ - keystone_ldap_domain_enabled
+ -
+ tripleo::profile::base::keystone::ldap_backend_enable: True
+ keystone::using_domain_config: True
+ tripleo::profile::base::keystone::ldap_backends_config:
+ get_param: KeystoneLDAPBackendConfigs
+ - {}
step_config: |
include ::tripleo::profile::base::keystone
@@ -305,6 +332,13 @@ outputs:
keystone::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ horizon:
+ if:
+ - keystone_ldap_domain_enabled
+ -
+ horizon::keystone_multidomain_support: true
+ horizon::keystone_default_domain: 'Default'
+ - {}
# Ansible tasks to handle upgrade
upgrade_tasks:
- name: Stop keystone service (running under httpd)
diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml
index 7b78c82e..4061ca28 100644
--- a/puppet/services/manila-api.yaml
+++ b/puppet/services/manila-api.yaml
@@ -48,9 +48,11 @@ outputs:
map_merge:
- get_attr: [ManilaBase, role_data, config_settings]
- manila::keystone::authtoken::password: {get_param: ManilaPassword}
- manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ manila::keystone::authtoken::auth_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
manila::keystone::authtoken::project_name: 'service'
+ manila::keystone::authtoken::user_domain_name: 'Default'
+ manila::keystone::authtoken::project_domain_name: 'Default'
tripleo.manila_api.firewall_rules:
'150 manila':
dport:
diff --git a/puppet/services/mistral-api.yaml b/puppet/services/mistral-api.yaml
index 1c7d6bd3..02c69392 100644
--- a/puppet/services/mistral-api.yaml
+++ b/puppet/services/mistral-api.yaml
@@ -22,6 +22,12 @@ parameters:
default: 1
description: The number of workers for the mistral-api.
type: number
+ MistralApiPolicies:
+ description: |
+ A hash of policies to configure for Mistral API.
+ e.g. { mistral-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
MistralBase:
@@ -41,6 +47,7 @@ outputs:
- get_attr: [MistralBase, role_data, config_settings]
- mistral::api::api_workers: {get_param: MistralWorkers}
mistral::api::bind_host: {get_param: [ServiceNetMap, MistralApiNetwork]}
+ mistral::policy::policies: {get_param: MistralApiPolicies}
tripleo.mistral_api.firewall_rules:
'133 mistral':
dport:
diff --git a/puppet/services/mistral-base.yaml b/puppet/services/mistral-base.yaml
index e1030346..d5c21694 100644
--- a/puppet/services/mistral-base.yaml
+++ b/puppet/services/mistral-base.yaml
@@ -74,7 +74,11 @@ outputs:
mistral::keystone_password: {get_param: MistralPassword}
mistral::keystone_tenant: 'service'
mistral::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- mistral::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
+ mistral::keystone_ec2_uri:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, KeystoneV3Internal, uri]}
+ - '/ec2tokens'
mistral::identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
service_config_settings:
keystone:
diff --git a/puppet/services/monitoring/sensu-base.yaml b/puppet/services/monitoring/sensu-base.yaml
index a8303a59..2fa1569c 100644
--- a/puppet/services/monitoring/sensu-base.yaml
+++ b/puppet/services/monitoring/sensu-base.yaml
@@ -29,7 +29,18 @@ parameters:
default: false
description: >
RabbitMQ client subscriber parameter to specify an SSL connection
- to the RabbitMQ host.
+ to the RabbitMQ host. Set MonitoringRabbitUseSSL to true without
+ specifying a private key or cert chain to use SSL transport,
+ but not cert auth.
+ type: string
+ MonitoringRabbitSSLPrivateKey:
+ default: ''
+ description: Private key to be used by Sensu to connect to RabbitMQ host.
+ type: string
+ MonitoringRabbitSSLCertChain:
+ default: ''
+ description: >
+ Private SSL cert chain to be used by Sensu to connect to RabbitMQ host.
type: string
MonitoringRabbitPassword:
description: The RabbitMQ password used for monitoring purposes.
@@ -71,6 +82,8 @@ outputs:
sensu::rabbitmq_password: {get_param: MonitoringRabbitPassword}
sensu::rabbitmq_port: {get_param: MonitoringRabbitPort}
sensu::rabbitmq_ssl: {get_param: MonitoringRabbitUseSSL}
+ sensu::rabbitmq_ssl_private_key: {get_param: MonitoringRabbitSSLPrivateKey}
+ sensu::rabbitmq_ssl_cert_chain: {get_param: MonitoringRabbitSSLCertChain}
sensu::rabbitmq_user: {get_param: MonitoringRabbitUserName}
sensu::rabbitmq_vhost: {get_param: MonitoringRabbitVhost}
sensu::redact: {get_param: SensuRedactVariables}
diff --git a/puppet/services/monitoring/sensu-client.yaml b/puppet/services/monitoring/sensu-client.yaml
index aba2b1ed..4b5f36ac 100644
--- a/puppet/services/monitoring/sensu-client.yaml
+++ b/puppet/services/monitoring/sensu-client.yaml
@@ -81,4 +81,4 @@ outputs:
- name: Install sensu package if it was disabled
tags: step3
yum: name=sensu state=latest
- when: sensu_client.rc != 0
+ when: sensu_client_enabled.rc != 0
diff --git a/puppet/services/network/contrail-vrouter.yaml b/puppet/services/network/contrail-vrouter.yaml
index db9f0836..0cd1f829 100644
--- a/puppet/services/network/contrail-vrouter.yaml
+++ b/puppet/services/network/contrail-vrouter.yaml
@@ -27,7 +27,7 @@ parameters:
description: vRouter physical interface
type: string
ContrailVrouterGateway:
- default: '192.0.2.1'
+ default: '192.168.24.1'
description: vRouter default gateway
type: string
ContrailVrouterNetmask:
diff --git a/puppet/services/neutron-api.yaml b/puppet/services/neutron-api.yaml
index bb191ff0..9b9d1c72 100644
--- a/puppet/services/neutron-api.yaml
+++ b/puppet/services/neutron-api.yaml
@@ -57,6 +57,15 @@ parameters:
default:
tag: openstack.neutron.api
path: /var/log/neutron/server.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ NeutronApiPolicies:
+ description: |
+ A hash of policies to configure for Neutron API.
+ e.g. { neutron-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
# DEPRECATED: the following options are deprecated and are currently maintained
# for backwards compatibility. They will be removed in the Ocata cycle.
@@ -71,10 +80,6 @@ parameters:
removed in Ocata. Future releases will enable L3 HA by default if it is
appropriate for the deployment type. Alternate mechanisms will be
available to override.
- EnableInternalTLS:
- type: boolean
- default: false
-
parameter_groups:
- label: deprecated
description: |
@@ -128,18 +133,21 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ovs_neutron'
- '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
- neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ neutron::policy::policies: {get_param: NeutronApiPolicies}
+ neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
neutron::server::api_workers: {get_param: NeutronWorkers}
neutron::server::rpc_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron::server::enable_proxy_headers_parsing: true
neutron::keystone::authtoken::password: {get_param: NeutronPassword}
- neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] }
+ neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneInternal, uri_no_suffix ] }
neutron::server::notifications::tenant_name: 'service'
neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_param: NovaPassword}
neutron::keystone::authtoken::project_name: 'service'
+ neutron::keystone::authtoken::user_domain_name: 'Default'
+ neutron::keystone::authtoken::project_domain_name: 'Default'
neutron::server::sync_db: true
tripleo.neutron_api.firewall_rules:
'114 neutron api':
@@ -202,3 +210,5 @@ outputs:
tags: step1
when: neutron_server_enabled.rc == 0
service: name=neutron-server state=stopped
+ metadata_settings:
+ get_attr: [TLSProxyBase, role_data, metadata_settings]
diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml
index 43657bd9..b41cb3cc 100644
--- a/puppet/services/neutron-base.yaml
+++ b/puppet/services/neutron-base.yaml
@@ -22,10 +22,18 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ DatabaseSyncTimeout:
+ default: 300
+ description: DB Sync Timeout default
+ type: number
NeutronDhcpAgentsPerNetwork:
type: number
- default: 3
+ default: 0
description: The number of neutron dhcp agents to schedule per network
+ NeutronDnsDomain:
+ type: string
+ default: openstacklocal
+ description: Domain to use for building the hostnames.
NeutronCorePlugin:
default: 'ml2'
description: |
@@ -44,10 +52,10 @@ parameters:
description: Set to True to enable debugging on all services.
EnableConfigPurge:
type: boolean
- default: true
+ default: false
description: >
- Remove configuration that is not generated by TripleO. Setting
- to false may result in configuration remnants after updates/upgrades.
+ Remove configuration that is not generated by TripleO. Used to avoid
+ configuration remnants after upgrades.
NeutronGlobalPhysnetMtu:
type: number
default: 1500
@@ -72,24 +80,33 @@ parameters:
via parameter_defaults in the resource registry.
type: json
+conditions:
+ dhcp_agents_zero: {equals : [{get_param: NeutronDhcpAgentsPerNetwork}, 0]}
+
outputs:
role_data:
description: Role data for the Neutron base service.
value:
service_name: neutron_base
config_settings:
- neutron::rabbit_password: {get_param: RabbitPassword}
- neutron::rabbit_user: {get_param: RabbitUserName}
- neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- neutron::rabbit_port: {get_param: RabbitClientPort}
- neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
- neutron::core_plugin: {get_param: NeutronCorePlugin}
- neutron::service_plugins: {get_param: NeutronServicePlugins}
- neutron::debug: {get_param: Debug}
- neutron::purge_config: {get_param: EnableConfigPurge}
- neutron::allow_overlapping_ips: true
- neutron::rabbit_heartbeat_timeout_threshold: 60
- neutron::host: '%{::fqdn}'
- neutron::db::database_db_max_retries: -1
- neutron::db::database_max_retries: -1
- neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu}
+ map_merge:
+ - neutron::rabbit_password: {get_param: RabbitPassword}
+ neutron::rabbit_user: {get_param: RabbitUserName}
+ neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ neutron::rabbit_port: {get_param: RabbitClientPort}
+ neutron::core_plugin: {get_param: NeutronCorePlugin}
+ neutron::service_plugins: {get_param: NeutronServicePlugins}
+ neutron::debug: {get_param: Debug}
+ neutron::purge_config: {get_param: EnableConfigPurge}
+ neutron::allow_overlapping_ips: true
+ neutron::dns_domain: {get_param: NeutronDnsDomain}
+ neutron::rabbit_heartbeat_timeout_threshold: 60
+ neutron::host: '%{::fqdn}'
+ neutron::db::database_db_max_retries: -1
+ neutron::db::database_max_retries: -1
+ neutron::db::sync::db_sync_timeout: {get_param: DatabaseSyncTimeout}
+ neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu}
+ - if:
+ - dhcp_agents_zero
+ - {}
+ - tripleo::profile::base::neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
diff --git a/puppet/services/neutron-bgpvpn-api.yaml b/puppet/services/neutron-bgpvpn-api.yaml
new file mode 100644
index 00000000..f01cf6f1
--- /dev/null
+++ b/puppet/services/neutron-bgpvpn-api.yaml
@@ -0,0 +1,34 @@
+heat_template_version: ocata
+
+description: >
+ BGPVPN API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ BgpvpnServiceProvider:
+ default: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
+ description: Backend to use as a service provider for BGPVPN
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the BGPVPN role.
+ value:
+ service_name: neutron_bgpvpn_api
+ config_settings:
+ neutron::services::bgpvpn::service_providers: {get_param: BgpvpnServiceProvider}
+ step_config: |
+ include ::tripleo::profile::base::neutron::bgpvpn
diff --git a/puppet/services/neutron-bigswitch-agent.yaml b/puppet/services/neutron-bigswitch-agent.yaml
new file mode 100644
index 00000000..8f56e0a9
--- /dev/null
+++ b/puppet/services/neutron-bigswitch-agent.yaml
@@ -0,0 +1,29 @@
+heat_template_version: ocata
+
+description: >
+ Installs bigswitch agent and enables the services
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+
+outputs:
+ role_data:
+ description: Configure the bigswitch agent services
+ value:
+ service_name: neutron_bigswitch_agent
+ step_config: |
+ include ::tripleo::profile::base::neutron::agents::bigswitch
diff --git a/puppet/services/neutron-compute-plugin-nuage.yaml b/puppet/services/neutron-compute-plugin-nuage.yaml
index 04431e28..ea717690 100644
--- a/puppet/services/neutron-compute-plugin-nuage.yaml
+++ b/puppet/services/neutron-compute-plugin-nuage.yaml
@@ -22,6 +22,10 @@ parameters:
description: The password for the nova service account, used by nova-api.
type: string
hidden: true
+ NuageMetadataPort:
+ description: TCP Port to listen for metadata server requests
+ type: string
+ default: '9697'
outputs:
role_data:
@@ -32,5 +36,11 @@ outputs:
tripleo::profile::base::neutron::agents::nuage::nova_os_tenant_name: 'service'
tripleo::profile::base::neutron::agents::nuage::nova_os_password: {get_param: NovaPassword}
tripleo::profile::base::neutron::agents::nuage::nova_auth_ip: {get_param: [EndpointMap, KeystoneInternal, host]}
+ tripleo.neutron_compute_plugin_nuage.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '100 metadata agent':
+ dport: {get_param: NuageMetadataPort}
step_config: |
include ::tripleo::profile::base::neutron::agents::nuage
diff --git a/puppet/services/neutron-compute-plugin-ovn.yaml b/puppet/services/neutron-compute-plugin-ovn.yaml
index e3a4da99..0dca29ab 100644
--- a/puppet/services/neutron-compute-plugin-ovn.yaml
+++ b/puppet/services/neutron-compute-plugin-ovn.yaml
@@ -48,6 +48,7 @@ outputs:
ovn::controller::ovn_encap_type: {get_param: OVNTunnelEncapType}
ovn::controller::ovn_encap_ip: {get_param: [ServiceNetMap, NeutronApiNetwork]}
ovn::controller::ovn_bridge_mappings: {get_param: NeutronBridgeMappings}
+ nova::compute::force_config_drive: true
tripleo.neutron_compute_plugin_ovn.firewall_rules:
'118 neutron vxlan networks':
proto: 'udp'
diff --git a/puppet/services/neutron-l2gw-api.yaml b/puppet/services/neutron-l2gw-api.yaml
new file mode 100644
index 00000000..b6f0d281
--- /dev/null
+++ b/puppet/services/neutron-l2gw-api.yaml
@@ -0,0 +1,54 @@
+heat_template_version: ocata
+
+description: >
+ L2 Gateway service plugin configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ L2gwServiceDefaultInterfaceName:
+ default: 'FortyGigE1/0/1'
+ description: default interface name of the L2 gateway
+ type: string
+ L2gwServiceDefaultDeviceName:
+ default: 'Switch1'
+ description: default device name of the L2 gateway
+ type: string
+ L2gwServiceQuotaL2Gateway:
+ default: 5
+ description: quota of the L2 gateway
+ type: number
+ L2gwServicePeriodicMonitoringInterval:
+ default: 5
+ description: The periodic interval at which the plugin
+ type: number
+ L2gwServiceProvider:
+ default: ["L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.L2gwDriver:default"]
+ description: Backend to use as a service provider for L2 Gateway
+ type: comma_delimited_list
+
+outputs:
+ role_data:
+ description: Role data for the L2 Gateway role.
+ value:
+ service_name: neutron_l2gw_api
+ config_settings:
+ neutron::services::l2gw::default_interface_name: {get_param: L2gwServiceDefaultInterfaceName}
+ neutron::services::l2gw::default_device_name: {get_param: L2gwServiceDefaultDeviceName}
+ neutron::services::l2gw::quota_l2_gateway: {get_param: L2gwServiceQuotaL2Gateway}
+ neutron::services::l2gw::periodic_monitoring_interval: {get_param: L2gwServicePeriodicMonitoringInterval}
+ neutron::services::l2gw::service_providers: {get_param: L2gwServiceProvider}
+ step_config: |
+ include tripleo::profile::base::neutron::l2gw
diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml
index 01471ba2..ef2485d4 100644
--- a/puppet/services/neutron-ovs-agent.yaml
+++ b/puppet/services/neutron-ovs-agent.yaml
@@ -82,6 +82,9 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ OpenVswitchUpgrade:
+ type: ./openvswitch-upgrade.yaml
+
outputs:
role_data:
description: Role data for the Neutron OVS agent service.
@@ -121,16 +124,22 @@ outputs:
step_config: |
include ::tripleo::profile::base::neutron::ovs
upgrade_tasks:
- - name: Check if neutron_ovs_agent is deployed
- command: systemctl is-enabled neutron-openvswitch-agent
- tags: common
- ignore_errors: True
- register: neutron_ovs_agent_enabled
- - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
- shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
- when: neutron_ovs_agent_enabled.rc == 0
- tags: step0,validation
- - name: Stop neutron_ovs_agent service
- tags: step1
- when: neutron_ovs_agent_enabled.rc == 0
- service: name=neutron-openvswitch-agent state=stopped
+ yaql:
+ expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
+ data:
+ ovs_upgrade:
+ get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ neutron_ovs_upgrade:
+ - name: Check if neutron_ovs_agent is deployed
+ command: systemctl is-enabled neutron-openvswitch-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_ovs_agent_enabled
+ - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_ovs_agent_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop neutron_ovs_agent service
+ tags: step1
+ when: neutron_ovs_agent_enabled.rc == 0
+ service: name=neutron-openvswitch-agent state=stopped
diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml
index e25bc495..80516fe6 100644
--- a/puppet/services/neutron-ovs-dpdk-agent.yaml
+++ b/puppet/services/neutron-ovs-dpdk-agent.yaml
@@ -62,6 +62,9 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ OpenVswitchUpgrade:
+ type: ./openvswitch-upgrade.yaml
+
outputs:
role_data:
description: Role data for the Neutron OVS DPDK Agent service.
@@ -69,7 +72,10 @@ outputs:
service_name: neutron_ovs_dpdk_agent
config_settings:
map_merge:
- - get_attr: [NeutronOvsAgent, role_data, config_settings]
+ - map_replace:
+ - get_attr: [NeutronOvsAgent, role_data, config_settings]
+ - keys:
+ tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
- neutron::agents::ml2::ovs::enable_dpdk: true
neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType}
neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir}
@@ -79,3 +85,5 @@ outputs:
vswitch::dpdk::socket_mem: {get_param: NeutronDpdkSocketMemory}
vswitch::dpdk::driver_type: {get_param: NeutronDpdkDriverType}
step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
+ upgrade_tasks:
+ get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
diff --git a/puppet/services/neutron-plugin-ml2-odl.yaml b/puppet/services/neutron-plugin-ml2-odl.yaml
new file mode 100644
index 00000000..acacadfa
--- /dev/null
+++ b/puppet/services/neutron-plugin-ml2-odl.yaml
@@ -0,0 +1,45 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Neutron ML2/OpenDaylight plugin configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OpenDaylightPortBindingController:
+ description: OpenDaylight port binding controller
+ type: string
+ default: 'network-topology'
+
+resources:
+
+ NeutronMl2Base:
+ type: ./neutron-plugin-ml2.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron ML2/ODL plugin.
+ value:
+ service_name: neutron_plugin_ml2_odl
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMl2Base, role_data, config_settings]
+ - neutron::plugins::ml2::opendaylight::port_binding_controller: {get_param: OpenDaylightPortBindingController}
+ step_config: |
+ include ::tripleo::profile::base::neutron::plugins::ml2
diff --git a/puppet/services/neutron-plugin-nuage.yaml b/puppet/services/neutron-plugin-nuage.yaml
index e09cd704..6229a3f1 100644
--- a/puppet/services/neutron-plugin-nuage.yaml
+++ b/puppet/services/neutron-plugin-nuage.yaml
@@ -19,10 +19,6 @@ parameters:
via parameter_defaults in the resource registry.
type: json
# Config specific parameters, to be provided via parameter_defaults
- NeutronNuageOSControllerIp:
- description: IP address of the OpenStack Controller
- type: string
-
NeutronNuageNetPartitionName:
description: Specifies the title that you will see on the VSD
type: string
@@ -76,8 +72,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::plugins::nuage::nuage_oscontroller_ip: {get_param: NeutronNuageOSControllerIp}
- neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName}
+ - neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName}
neutron::plugins::nuage::nuage_vsd_ip: {get_param: NeutronNuageVSDIp}
neutron::plugins::nuage::nuage_vsd_username: {get_param: NeutronNuageVSDUsername}
neutron::plugins::nuage::nuage_vsd_password: {get_param: NeutronNuageVSDPassword}
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index f27b53f2..21910cc4 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -62,6 +62,12 @@ parameters:
default: 300
description: Timeout for Nova db sync
type: number
+ NovaApiPolicies:
+ description: |
+ A hash of policies to configure for Nova API.
+ e.g. { nova-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
conditions:
nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
@@ -110,8 +116,10 @@ outputs:
- 13774
- 8775
nova::keystone::authtoken::project_name: 'service'
+ nova::keystone::authtoken::user_domain_name: 'Default'
+ nova::keystone::authtoken::project_domain_name: 'Default'
nova::keystone::authtoken::password: {get_param: NovaPassword}
- nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
nova::api::enabled: true
nova::api::default_floating_pool: {get_param: NovaDefaultFloatingPool}
@@ -143,6 +151,7 @@ outputs:
nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
nova::api::instance_name_template: {get_param: InstanceNameTemplate}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
+ nova::policy::policies: {get_param: NovaApiPolicies}
-
if:
- nova_workers_zero
@@ -218,14 +227,14 @@ outputs:
- name: Run puppet apply to set tranport_url in nova.conf
tags: step5
when: is_bootstrap_node
- command: puppet apply --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+ command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
register: puppet_apply_nova_api_upgrade
failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
changed_when: puppet_apply_nova_api_upgrade.rc == 2
- name: Setup cell_v2 (map cell0)
tags: step5
when: is_bootstrap_node
- command: nova-manage cell_v2 map_cell0
+ shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
- name: Setup cell_v2 (create default cell)
tags: step5
when: is_bootstrap_node
@@ -241,15 +250,15 @@ outputs:
command: nova-manage db sync
async: {get_param: NovaDbSyncTimeout}
poll: 10
- - name: Setup cell_v2 (migrate hosts)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 map_cell_and_hosts
- name: Setup cell_v2 (get cell uuid)
tags: step5
when: is_bootstrap_node
shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
register: nova_api_cell_uuid
+ - name: Setup cell_v2 (migrate hosts)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
- name: Setup cell_v2 (migrate instances)
tags: step5
when: is_bootstrap_node
diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml
index ceacb0b2..9e7f0145 100644
--- a/puppet/services/nova-base.yaml
+++ b/puppet/services/nova-base.yaml
@@ -52,16 +52,20 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ DatabaseSyncTimeout:
+ default: 300
+ description: DB Sync Timeout default
+ type: number
Debug:
type: string
default: ''
description: Set to True to enable debugging on all services.
EnableConfigPurge:
type: boolean
- default: true
+ default: false
description: >
- Remove configuration that is not generated by TripleO. Setting
- to false may result in configuration remnants after updates/upgrades.
+ Remove configuration that is not generated by TripleO. Used to avoid
+ configuration remnants after upgrades.
NovaIPv6:
default: false
description: Enable IPv6 features in Nova
@@ -151,6 +155,16 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/nova'
- '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ nova::cell0_database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://nova:'
+ - {get_param: NovaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/nova_cell0'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
nova::api_database_connection:
list_join:
- ''
@@ -188,6 +202,8 @@ outputs:
nova::network::neutron::neutron_auth_type: 'v3password'
nova::db::database_db_max_retries: -1
nova::db::database_max_retries: -1
+ nova::db::sync::db_sync_timeout: {get_param: DatabaseSyncTimeout}
+ nova::db::sync_api::db_sync_timeout: {get_param: DatabaseSyncTimeout}
nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
nova::use_ipv6: {get_param: NovaIPv6}
nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge}
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index d208bede..b1711436 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -52,7 +52,7 @@ parameters:
For different formats, refer to the nova.conf documentation for
pci_passthrough_whitelist configuration
type: json
- default: {}
+ default: ''
NovaVcpuPinSet:
description: >
A list or range of physical CPU cores to reserve for virtual machine
@@ -79,6 +79,13 @@ parameters:
type: string
description: Nova Compute upgrade level
default: auto
+ MigrationSshKey:
+ type: json
+ description: >
+ SSH key for migration.
+ Expects a dictionary with keys 'public_key' and 'private_key'.
+ Values should be identical to SSH public/private key files.
+ default: {}
resources:
NovaBase:
@@ -101,12 +108,17 @@ outputs:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- nova::compute::libvirt::manage_libvirt_services: false
- nova::compute::pci_passthrough: {get_param: NovaPCIPassthrough}
+ nova::compute::pci_passthrough:
+ str_replace:
+ template: "JSON_PARAM"
+ params:
+ JSON_PARAM: {get_param: NovaPCIPassthrough}
nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
# we manage migration in nova common puppet profile
nova::compute::libvirt::migration_support: false
tripleo::profile::base::nova::manage_migration: true
+ tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
tripleo::profile::base::nova::nova_compute_enabled: true
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
diff --git a/puppet/services/nova-ironic.yaml b/puppet/services/nova-ironic.yaml
index 5eb2170a..f1d8dff7 100644
--- a/puppet/services/nova-ironic.yaml
+++ b/puppet/services/nova-ironic.yaml
@@ -44,10 +44,14 @@ outputs:
nova::compute::vnc_enabled: false
nova::ironic::common::password: {get_param: IronicPassword}
nova::ironic::common::project_name: 'service'
- nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
nova::ironic::common::username: 'ironic'
nova::ironic::common::api_endpoint: {get_param: [EndpointMap, IronicInternal, uri]}
nova::network::neutron::dhcp_domain: ''
nova::scheduler::filter::scheduler_host_manager: 'ironic_host_manager'
step_config: |
include tripleo::profile::base::nova::compute::ironic
+ upgrade_tasks:
+ - name: Stop openstack-nova-compute service
+ tags: step1
+ service: name=openstack-nova-compute state=stopped enabled=no
diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml
index faf1ae48..21a5e78a 100644
--- a/puppet/services/nova-libvirt.yaml
+++ b/puppet/services/nova-libvirt.yaml
@@ -32,6 +32,36 @@ parameters:
MonitoringSubscriptionNovaLibvirt:
default: 'overcloud-nova-libvirt'
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ UseTLSTransportForLiveMigration:
+ type: boolean
+ default: true
+ description: If set to true and if EnableInternalTLS is enabled, it will
+ set the libvirt URI's transport to tls and configure the
+ relevant keys for libvirt.
+ LibvirtCACert:
+ type: string
+ default: '/etc/ipa/ca.crt'
+ description: This specifies the CA certificate to use for TLS in libvirt.
+ This file will be symlinked to the default CA path in libvirt,
+ which is /etc/pki/CA/cacert.pem. Note that due to limitations
+ GNU TLS, which is the TLS backend for libvirt, the file must
+ be less than 65K (so we can't use the system's CA bundle). The
+ current default reflects TripleO's default CA, which is
+ FreeIPA. It will only be used if internal TLS is enabled.
+
+conditions:
+
+ use_tls_for_live_migration:
+ and:
+ - equals:
+ - {get_param: EnableInternalTLS}
+ - true
+ - equals:
+ - {get_param: UseTLSTransportForLiveMigration}
+ - true
resources:
NovaBase:
@@ -66,10 +96,61 @@ outputs:
tripleo.nova_libvirt.firewall_rules:
'200 nova_libvirt':
dport:
- - 16509
- 16514
- '49152-49215'
- '5900-5999'
+ -
+ if:
+ - use_tls_for_live_migration
+ -
+ generate_service_certificates: true
+ tripleo::profile::base::nova::libvirt_tls: true
+ nova::migration::libvirt::live_migration_inbound_addr:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ tripleo::certmonger::ca::libvirt::origin_ca_pem:
+ get_param: LibvirtCACert
+ tripleo::certmonger::libvirt_dirs::certificate_dir: '/etc/pki/libvirt'
+ tripleo::certmonger::libvirt_dirs::key_dir: '/etc/pki/libvirt/private'
+ libvirt_certificates_specs:
+ libvirt-server-cert:
+ service_certificate: '/etc/pki/libvirt/servercert.pem'
+ service_key: '/etc/pki/libvirt/private/serverkey.pem'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ principal:
+ str_replace:
+ template: "libvirt/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ libvirt-client-cert:
+ service_certificate: '/etc/pki/libvirt/clientcert.pem'
+ service_key: '/etc/pki/libvirt/private/clientkey.pem'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ principal:
+ str_replace:
+ template: "libvirt/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ - {}
step_config: |
include tripleo::profile::base::nova::libvirt
+ metadata_settings:
+ if:
+ - use_tls_for_live_migration
+ -
+ - service: libvirt
+ network: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ type: node
+ - null
diff --git a/puppet/services/octavia-api.yaml b/puppet/services/octavia-api.yaml
index 909a3030..2f898a67 100644
--- a/puppet/services/octavia-api.yaml
+++ b/puppet/services/octavia-api.yaml
@@ -34,6 +34,12 @@ parameters:
default:
tag: openstack.octavia.api
path: /var/log/octavia/api.log
+ OctaviaApiPolicies:
+ description: |
+ A hash of policies to configure for Octavia API.
+ e.g. { octavia-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
@@ -57,6 +63,7 @@ outputs:
map_merge:
- get_attr: [OctaviaBase, role_data, config_settings]
- octavia::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ octavia::policy::policies: {get_param: OctaviaApiPolicies}
octavia::db::database_connection:
list_join:
- ''
diff --git a/puppet/services/octavia-base.yaml b/puppet/services/octavia-base.yaml
index b537a2bc..db15aa15 100644
--- a/puppet/services/octavia-base.yaml
+++ b/puppet/services/octavia-base.yaml
@@ -24,10 +24,10 @@ parameters:
description: Set to True to enable debugging on all services.
EnableConfigPurge:
type: boolean
- default: true
+ default: false
description: >
- Remove configuration that is not generated by TripleO. Setting
- to false may result in configuration remnants after updates/upgrades.
+ Remove configuration that is not generated by TripleO. Used to avoid
+ configuration remnants after upgrades.
RabbitPassword:
description: The password for RabbitMQ
type: string
@@ -56,7 +56,7 @@ outputs:
octavia::debug: {get_param: Debug}
octavia::purge_config: {get_param: EnableConfigPurge}
octavia::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- tripleo::profile::base::octavia::rabbit_user: {get_param: RabbitUserName}
- tripleo::profile::base::octavia::rabbit_password: {get_param: RabbitPassword}
- tripleo::profile::base::octavia::rabbit_port: {get_param: RabbitClientPort}
+ octavia::rabbit_userid: {get_param: RabbitUserName}
+ octavia::rabbit_password: {get_param: RabbitPassword}
+ octavia::rabbit_port: {get_param: RabbitClientPort}
diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml
index 1e7aa479..6882aeff 100644
--- a/puppet/services/opendaylight-api.yaml
+++ b/puppet/services/opendaylight-api.yaml
@@ -28,7 +28,7 @@ parameters:
OpenDaylightFeatures:
description: List of features to install with ODL
type: comma_delimited_list
- default: ["odl-netvirt-openstack","odl-netvirt-ui"]
+ default: ["odl-netvirt-openstack","odl-netvirt-ui","odl-jolokia"]
OpenDaylightApiVirtualIP:
type: string
default: ''
@@ -59,12 +59,36 @@ outputs:
opendaylight::extra_features: {get_param: OpenDaylightFeatures}
opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
- opendaylight::nb_connection_protocol: {get_param: OpenDayLightConnectionProtocol}
+ opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
tripleo.opendaylight_api.firewall_rules:
'137 opendaylight api':
dport:
- {get_param: OpenDaylightPort}
- 6640
- 6653
+ - 2550
step_config: |
include tripleo::profile::base::neutron::opendaylight
+ upgrade_tasks:
+ - name: Check if opendaylight is deployed
+ command: systemctl is-enabled opendaylight
+ tags: common
+ ignore_errors: True
+ register: opendaylight_enabled
+ - name: "PreUpgrade step0,validation: Check service opendaylight is running"
+ shell: /usr/bin/systemctl show 'opendaylight' --property ActiveState | grep '\bactive\b'
+ when: opendaylight_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop opendaylight service
+ tags: step1
+ when: opendaylight_enabled.rc == 0
+ service: name=opendaylight state=stopped
+ - name: Removes ODL snapshots, data, journal directories
+ file:
+ state: absent
+ path: /opt/opendaylight/{{item}}
+ tags: step2
+ with_items:
+ - snapshots
+ - data
+ - journal
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index cfec3c48..ed572b4d 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -48,6 +48,10 @@ parameters:
default: {}
type: json
+resources:
+ OpenVswitchUpgrade:
+ type: ./openvswitch-upgrade.yaml
+
outputs:
role_data:
description: Role data for the OpenDaylight service.
@@ -60,11 +64,7 @@ outputs:
opendaylight_check_url: {get_param: OpenDaylightCheckURL}
opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::plugins::ovs::opendaylight::provider_mappings:
- str_replace:
- template: MAPPINGS
- params:
- MAPPINGS: {get_param: OpenDaylightProviderMappings}
+ neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
tripleo.opendaylight_ovs.firewall_rules:
'118 neutron vxlan networks':
proto: 'udp'
@@ -73,3 +73,23 @@ outputs:
proto: 'gre'
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
+ upgrade_tasks:
+ yaql:
+ expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
+ data:
+ ovs_upgrade:
+ get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ opendaylight_upgrade:
+ - name: Check if openvswitch is deployed
+ command: systemctl is-enabled openvswitch
+ tags: common
+ ignore_errors: True
+ register: openvswitch_enabled
+ - name: "PreUpgrade step0,validation: Check service openvswitch is running"
+ shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b'
+ when: openvswitch_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop openvswitch service
+ tags: step1
+ when: openvswitch_enabled.rc == 0
+ service: name=openvswitch state=stopped
diff --git a/puppet/services/openvswitch-upgrade.yaml b/puppet/services/openvswitch-upgrade.yaml
new file mode 100644
index 00000000..fea1ba96
--- /dev/null
+++ b/puppet/services/openvswitch-upgrade.yaml
@@ -0,0 +1,50 @@
+heat_template_version: ocata
+
+description: >
+ Openvswitch package special handling for upgrade.
+
+outputs:
+ role_data:
+ description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
+ value:
+ service_name: openvswitch_upgrade
+ upgrade_tasks:
+ - name: Check openvswitch version.
+ tags: step2
+ register: ovs_version
+ ignore_errors: true
+ shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+ - name: Check openvswitch packaging.
+ tags: step2
+ shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+ register: ovs_packaging_issue
+ ignore_errors: true
+ - block:
+ - name: "Ensure empty directory: emptying."
+ file:
+ state: absent
+ path: /root/OVS_UPGRADE
+ - name: "Ensure empty directory: creating."
+ file:
+ state: directory
+ path: /root/OVS_UPGRADE
+ owner: root
+ group: root
+ mode: 0750
+ - name: Download OVS packages.
+ command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+ - name: Get rpm list for manual upgrade of OVS.
+ shell: ls -1 /root/OVS_UPGRADE/*.rpm
+ register: ovs_list_of_rpms
+ - name: Manual upgrade of OVS
+ shell: |
+ rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+ rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+ args:
+ chdir: /root/OVS_UPGRADE
+ with_items:
+ - "{{ovs_list_of_rpms.stdout_lines}}"
+ tags: step2
+ when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+ or
+ ovs_packaging_issue|succeeded"
diff --git a/puppet/services/ovn-dbs.yaml b/puppet/services/ovn-dbs.yaml
index 7f81afde..6b8be77c 100644
--- a/puppet/services/ovn-dbs.yaml
+++ b/puppet/services/ovn-dbs.yaml
@@ -36,5 +36,11 @@ outputs:
ovn::northbound::port: {get_param: OVNNorthboundServerPort}
ovn::southbound::port: {get_param: OVNSouthboundServerPort}
ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]}
+ tripleo.ovn_dbs.firewall_rules:
+ '121 OVN DB server ports':
+ proto: 'tcp'
+ dport:
+ - {get_param: OVNNorthboundServerPort}
+ - {get_param: OVNSouthboundServerPort}
step_config: |
include ::tripleo::profile::base::neutron::ovn_northd
diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml
index 5be58c18..f7a0edf8 100644
--- a/puppet/services/pacemaker.yaml
+++ b/puppet/services/pacemaker.yaml
@@ -87,10 +87,16 @@ parameters:
\[(?<pid>[^ ]*)\]
(?<host>[^ ]*)
(?<message>.*)$/
+
+ EnableLoadBalancer:
+ default: true
+ description: Whether to deploy a LoadBalancer on the Controller
+ type: boolean
+
PacemakerResources:
type: comma_delimited_list
description: List of resources managed by pacemaker
- default: ['rabbitmq','haproxy']
+ default: ['rabbitmq', 'galera']
outputs:
role_data:
@@ -135,6 +141,8 @@ outputs:
- name: Check pacemaker cluster running before upgrade
tags: step0,validation
pacemaker_cluster: state=online check_and_fail=true
+ async: 30
+ poll: 4
- name: Stop pacemaker cluster
tags: step2
pacemaker_cluster: state=offline
@@ -143,5 +151,13 @@ outputs:
pacemaker_cluster: state=online
- name: Check pacemaker resource
tags: step4
- pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=500
+ pacemaker_is_active:
+ resource: "{{ item }}"
+ max_wait: 500
with_items: {get_param: PacemakerResources}
+ - name: Check pacemaker haproxy resource
+ tags: step4
+ pacemaker_is_active:
+ resource: haproxy
+ max_wait: 500
+ when: {get_param: EnableLoadBalancer}
diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml
index b018df35..caada950 100644
--- a/puppet/services/pacemaker/rabbitmq.yaml
+++ b/puppet/services/pacemaker/rabbitmq.yaml
@@ -68,3 +68,5 @@ outputs:
fi
pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
when: is_bootstrap_node and migrate_rabbit_ha_mode
+ metadata_settings:
+ get_attr: [RabbitMQServiceBase, role_data, metadata_settings]
diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml
index eed98257..43e7aa18 100644
--- a/puppet/services/panko-api.yaml
+++ b/puppet/services/panko-api.yaml
@@ -24,6 +24,12 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ PankoApiPolicies:
+ description: |
+ A hash of policies to configure for Panko API.
+ e.g. { panko-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
PankoBase:
@@ -58,6 +64,7 @@ outputs:
"%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, PankoApiNetwork]}
+ panko::policy::policies: {get_param: PankoApiPolicies}
panko::api::service_name: 'httpd'
panko::api::enable_proxy_headers_parsing: true
tripleo.panko_api.firewall_rules:
diff --git a/puppet/services/panko-base.yaml b/puppet/services/panko-base.yaml
index 998e64ee..fda13450 100644
--- a/puppet/services/panko-base.yaml
+++ b/puppet/services/panko-base.yaml
@@ -50,8 +50,10 @@ outputs:
panko::debug: {get_param: Debug}
panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::project_name: 'service'
+ panko::keystone::authtoken::user_domain_name: 'Default'
+ panko::keystone::authtoken::project_domain_name: 'Default'
panko::keystone::authtoken::password: {get_param: PankoPassword}
- panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::auth::auth_password: {get_param: PankoPassword}
panko::auth::auth_region: 'regionOne'
diff --git a/puppet/services/qdr.yaml b/puppet/services/qdr.yaml
new file mode 100644
index 00000000..f8746cec
--- /dev/null
+++ b/puppet/services/qdr.yaml
@@ -0,0 +1,60 @@
+heat_template_version: ocata
+
+description: >
+ Qpid dispatch router service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ RabbitUserName:
+ default: guest
+ description: The username for Qdr
+ type: string
+ RabbitPassword:
+ description: The password for Qdr
+ type: string
+ hidden: true
+ RabbitClientPort:
+ description: Listening port for Qdr
+ default: 5672
+ type: number
+ MonitoringSubscriptionQdr:
+ default: 'overcloud-qdr'
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the Qdr role.
+ value:
+ service_name: rabbitmq
+ monitoring_subscription: {get_param: MonitoringSubscriptionQdr}
+ global_config_settings:
+ messaging_notify_service_name: 'amqp'
+ messaging_rpc_service_name: 'amqp'
+ keystone::messaging::amqp::amqp_pre_settled: 'notify'
+ config_settings:
+ tripleo.rabbitmq.firewall_rules:
+ '109 qdr':
+ dport:
+ - {get_param: RabbitClientPort}
+ qdr::listener_addr: {get_param: [ServiceNetMap, QdrNetwork]}
+ # cannot pass qdr::listener_port directly because it needs to be a string
+ # we do the conversion in the puppet layer
+ tripleo::profile::base::qdr::qdr_listener_port: {get_param: RabbitClientPort}
+ tripleo::profile::base::qdr::qdr_username: {get_param: RabbitUserName}
+ tripleo::profile::base::qdr::qdr_password: {get_param: RabbitPassword}
+
+ step_config: |
+ include ::tripleo::profile::base::qdr
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index 2c4ccbc9..47479783 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -48,6 +48,12 @@ parameters:
MonitoringSubscriptionRabbitmq:
default: 'overcloud-rabbitmq'
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
outputs:
role_data:
@@ -56,51 +62,79 @@ outputs:
service_name: rabbitmq
monitoring_subscription: {get_param: MonitoringSubscriptionRabbitmq}
config_settings:
- rabbitmq::file_limit: {get_param: RabbitFDLimit}
- rabbitmq::default_user: {get_param: RabbitUserName}
- rabbitmq::default_pass: {get_param: RabbitPassword}
- rabbit_ipv6: {get_param: RabbitIPv6}
- tripleo.rabbitmq.firewall_rules:
- '109 rabbitmq':
- dport:
- - 4369
- - 5672
- - 25672
- rabbitmq::delete_guest_user: false
- rabbitmq::wipe_db_on_cookie_change: true
- rabbitmq::port: '5672'
- rabbitmq::package_provider: yum
- rabbitmq::package_source: undef
- rabbitmq::repos_ensure: false
- rabbitmq::tcp_keepalive: true
- rabbitmq_environment:
- NODE_PORT: ''
- NODE_IP_ADDRESS: ''
- RABBITMQ_NODENAME: "rabbit@%{::hostname}"
- RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
- 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}"
- rabbitmq_kernel_variables:
- inet_dist_listen_min: '25672'
- inet_dist_listen_max: '25672'
- rabbitmq_config_variables:
- cluster_partition_handling: 'pause_minority'
- queue_master_locator: '<<"min-masters">>'
- loopback_users: '[]'
- rabbitmq::erlang_cookie:
- yaql:
- expression: $.data.passwords.where($ != '').first()
- data:
- passwords:
- - {get_param: RabbitCookie}
- - {get_param: [DefaultPasswords, rabbit_cookie]}
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
- rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
+ map_merge:
+ -
+ rabbitmq::file_limit: {get_param: RabbitFDLimit}
+ rabbitmq::default_user: {get_param: RabbitUserName}
+ rabbitmq::default_pass: {get_param: RabbitPassword}
+ rabbit_ipv6: {get_param: RabbitIPv6}
+ tripleo.rabbitmq.firewall_rules:
+ '109 rabbitmq':
+ dport:
+ - 4369
+ - 5672
+ - 25672
+ rabbitmq::delete_guest_user: false
+ rabbitmq::wipe_db_on_cookie_change: true
+ rabbitmq::port: '5672'
+ rabbitmq::package_provider: yum
+ rabbitmq::package_source: undef
+ rabbitmq::repos_ensure: false
+ rabbitmq::tcp_keepalive: true
+ rabbitmq_environment:
+ NODE_PORT: ''
+ NODE_IP_ADDRESS: ''
+ RABBITMQ_NODENAME: "rabbit@%{::hostname}"
+ RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+ 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}"
+ rabbitmq_kernel_variables:
+ inet_dist_listen_min: '25672'
+ inet_dist_listen_max: '25672'
+ rabbitmq_config_variables:
+ cluster_partition_handling: 'pause_minority'
+ queue_master_locator: '<<"min-masters">>'
+ loopback_users: '[]'
+ rabbitmq::erlang_cookie:
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: RabbitCookie}
+ - {get_param: [DefaultPasswords, rabbit_cookie]}
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
+ rabbitmq::ssl: {get_param: EnableInternalTLS}
+ rabbitmq::ssl_port: '5672'
+ rabbitmq::ssl_depth: 1
+ rabbitmq::ssl_only: {get_param: EnableInternalTLS}
+ rabbitmq::ssl_interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ # TODO(jaosorior): Remove this once we set a proper default in
+ # puppet-tripleo
+ tripleo::profile::base::rabbitmq::enable_internal_tls: {get_param: EnableInternalTLS}
+ -
+ if:
+ - internal_tls_enabled
+ - generate_service_certificates: true
+ tripleo::profile::base::rabbitmq::certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/rabbitmq.crt'
+ service_key: '/etc/pki/tls/private/rabbitmq.key'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ principal:
+ str_replace:
+ template: "rabbitmq/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ - {}
step_config: |
include ::tripleo::profile::base::rabbitmq
upgrade_tasks:
@@ -110,4 +144,11 @@ outputs:
- name: Start rabbitmq service
tags: step4
service: name=rabbitmq-server state=started
-
+ metadata_settings:
+ if:
+ - internal_tls_enabled
+ -
+ - service: rabbitmq
+ network: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ type: node
+ - null
diff --git a/puppet/services/sahara-api.yaml b/puppet/services/sahara-api.yaml
index 96b3d6e3..d9f2115a 100644
--- a/puppet/services/sahara-api.yaml
+++ b/puppet/services/sahara-api.yaml
@@ -38,6 +38,12 @@ parameters:
default:
tag: openstack.sahara.api
path: /var/log/sahara/sahara-api.log
+ SaharaApiPolicies:
+ description: |
+ A hash of policies to configure for Sahara API.
+ e.g. { sahara-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
SaharaBase:
@@ -60,6 +66,7 @@ outputs:
map_merge:
- get_attr: [SaharaBase, role_data, config_settings]
- sahara::port: {get_param: [EndpointMap, SaharaInternal, port]}
+ sahara::policy::policies: {get_param: SaharaApiPolicies}
sahara::service::api::api_workers: {get_param: SaharaWorkers}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml
index 224989be..d5131f61 100644
--- a/puppet/services/sahara-base.yaml
+++ b/puppet/services/sahara-base.yaml
@@ -70,12 +70,14 @@ outputs:
sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
sahara::rabbit_port: {get_param: RabbitClientPort}
sahara::debug: {get_param: Debug}
+ # Remove admin_password when https://review.openstack.org/442619 is merged.
sahara::admin_password: {get_param: SaharaPassword}
- sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- sahara::identity_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
sahara::use_neutron: true
sahara::plugins: {get_param: SaharaPlugins}
sahara::rpc_backend: rabbit
- sahara::admin_tenant_name: 'service'
sahara::db::database_db_max_retries: -1
sahara::db::database_max_retries: -1
+ sahara::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ sahara::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ sahara::keystone::authtoken::password: {get_param: SaharaPassword}
+ sahara::keystone::authtoken::project_name: 'service'
diff --git a/puppet/services/securetty.yaml b/puppet/services/securetty.yaml
new file mode 100644
index 00000000..6d32fe82
--- /dev/null
+++ b/puppet/services/securetty.yaml
@@ -0,0 +1,36 @@
+heat_template_version: ocata
+
+description: >
+ Configure securetty values
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ TtyValues:
+ default: {}
+ description: Configures console values in securetty
+ type: json
+ constraints:
+ - length: { min: 1}
+
+outputs:
+ role_data:
+ description: Console data for the securetty
+ value:
+ service_name: securetty
+ config_settings:
+ tripleo::profile::base::securetty::tty_list: {get_param: TtyValues}
+ step_config: |
+ include ::tripleo::profile::base::securetty
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
index a2286d16..9820b431 100644
--- a/puppet/services/services.yaml
+++ b/puppet/services/services.yaml
@@ -90,14 +90,11 @@ outputs:
# fluentd user.
yaql:
expression: >
- set($.data.groups.flatten()).where($)
+ set(($.data.default + $.data.extra + $.data.role_data.where($ != null).select($.get('logging_groups'))).flatten()).where($)
data:
- groups:
- - [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}]
- - yaql:
- expression: list($.data.role_data.where($ != null).select($.get('logging_groups')).where($ != null))
- data: {role_data: {get_attr: [ServiceChain, role_data]}}
- - [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}]
+ default: {get_attr: [LoggingConfiguration, LoggingDefaultGroups]}
+ extra: {get_attr: [LoggingConfiguration, LoggingExtraGroups]}
+ role_data: {get_attr: [ServiceChain, role_data]}
config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
global_config_settings:
map_merge:
diff --git a/puppet/services/sshd.yaml b/puppet/services/sshd.yaml
index 41e144a0..12998c33 100644
--- a/puppet/services/sshd.yaml
+++ b/puppet/services/sshd.yaml
@@ -29,6 +29,6 @@ outputs:
value:
service_name: sshd
config_settings:
- BannerText: {get_param: BannerText}
+ tripleo::profile::base::sshd::bannertext: {get_param: BannerText}
step_config: |
include ::tripleo::profile::base::sshd
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index 9b0d2de1..0ecc942c 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -31,9 +31,9 @@ parameters:
description: Timeout for requests going from swift-proxy to swift a/c/o services.
type: number
SwiftWorkers:
- default: 0
+ default: auto
description: Number of workers for Swift service.
- type: number
+ type: string
KeystoneRegion:
type: string
default: 'regionOne'
@@ -63,10 +63,14 @@ parameters:
Rabbit client subscriber parameter to specify
an SSL connection to the RabbitMQ host.
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
conditions:
ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
+ use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
resources:
SwiftBase:
@@ -76,6 +80,14 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ TLSProxyBase:
+ type: OS::TripleO::Services::TLSProxyBase
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
outputs:
role_data:
description: Role data for the Swift proxy service.
@@ -85,7 +97,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [SwiftBase, role_data, config_settings]
-
+ - get_attr: [TLSProxyBase, role_data, config_settings]
- swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
swift::proxy::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
swift::proxy::authtoken::password: {get_param: SwiftPassword}
@@ -146,7 +158,22 @@ outputs:
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- swift::proxy::proxy_local_net_ip: {get_param: [ServiceNetMap, SwiftProxyNetwork]}
+ tripleo::profile::base::swift::proxy::tls_proxy_bind_ip:
+ get_param: [ServiceNetMap, SwiftProxyNetwork]
+ tripleo::profile::base::swift::proxy::tls_proxy_fqdn:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, SwiftProxyNetwork]}
+ tripleo::profile::base::swift::proxy::tls_proxy_port:
+ get_param: [EndpointMap, SwiftInternal, port]
+ swift::proxy::port: {get_param: [EndpointMap, SwiftInternal, port]}
+ swift::proxy::proxy_local_net_ip:
+ if:
+ - use_tls_proxy
+ - 'localhost'
+ - {get_param: [ServiceNetMap, SwiftProxyNetwork]}
step_config: |
include ::tripleo::profile::base::swift::proxy
service_config_settings:
@@ -169,3 +196,5 @@ outputs:
- name: Stop swift_proxy service
tags: step1
service: name=openstack-swift-proxy state=stopped
+ metadata_settings:
+ get_attr: [TLSProxyBase, role_data, metadata_settings]
diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml
index 2e3c818f..f62d5e18 100644
--- a/puppet/services/swift-ringbuilder.yaml
+++ b/puppet/services/swift-ringbuilder.yaml
@@ -42,6 +42,14 @@ parameters:
default: true
description: 'Use a local directory for Swift storage services when building rings'
type: boolean
+ SwiftRingGetTempurl:
+ default: ''
+ description: A temporary Swift URL to download rings from.
+ type: string
+ SwiftRingPutTempurl:
+ default: ''
+ description: A temporary Swift URL to upload rings to.
+ type: string
conditions:
swift_use_local_dir:
@@ -59,6 +67,8 @@ outputs:
value:
service_name: swift_ringbuilder
config_settings:
+ tripleo::profile::base::swift::ringbuilder::swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
+ tripleo::profile::base::swift::ringbuilder::swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
tripleo::profile::base::swift::ringbuilder::build_ring: {get_param: SwiftRingBuild}
tripleo::profile::base::swift::ringbuilder::replicas: {get_param: SwiftReplicas}
tripleo::profile::base::swift::ringbuilder::part_power: {get_param: SwiftPartPower}
diff --git a/puppet/services/tacker.yaml b/puppet/services/tacker.yaml
index 6f92066e..c14e061b 100644
--- a/puppet/services/tacker.yaml
+++ b/puppet/services/tacker.yaml
@@ -47,6 +47,12 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ TackerPolicies:
+ description: |
+ A hash of policies to configure for Tacker.
+ e.g. { tacker-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
outputs:
role_data:
@@ -75,8 +81,10 @@ outputs:
tacker::server::bind_host: {get_param: [ServiceNetMap, TackerApiNetwork]}
tacker::keystone::authtoken::project_name: 'service'
- tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ tacker::keystone::authtoken::user_domain_name: 'Default'
+ tacker::keystone::authtoken::project_domain_name: 'Default'
+ tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
tacker::db::mysql::password: {get_param: TackerPassword}
tacker::db::mysql::user: tacker
@@ -85,10 +93,12 @@ outputs:
tacker::db::mysql::allowed_hosts:
- '%'
- {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ tacker::policy::policies: {get_param: TackerPolicies}
service_config_settings:
keystone:
tacker::keystone::auth::tenant: 'service'
+ tacker::keystone::auth::region: {get_param: KeystoneRegion}
tacker::keystone::auth::password: {get_param: TackerPassword}
tacker::keystone::auth::public_url: {get_param: [EndpointMap, TackerPublic, uri]}
tacker::keystone::auth::internal_url: {get_param: [EndpointMap, TackerInternal, uri]}
diff --git a/puppet/services/tripleo-firewall.yaml b/puppet/services/tripleo-firewall.yaml
index 67e14d9c..ff2b067f 100644
--- a/puppet/services/tripleo-firewall.yaml
+++ b/puppet/services/tripleo-firewall.yaml
@@ -37,3 +37,9 @@ outputs:
tripleo::firewall::purge_firewall_rules: {get_param: PurgeFirewallRules}
step_config: |
include ::tripleo::firewall
+ upgrade_tasks:
+ - name: blank ipv6 rule before activating ipv6 firewall.
+ tags: step3
+ shell: cat /etc/sysconfig/ip6tables > /etc/sysconfig/ip6tables.n-o-upgrade; cat</dev/null>/etc/sysconfig/ip6tables
+ args:
+ creates: /etc/sysconfig/ip6tables.n-o-upgrade
diff --git a/puppet/services/vpp.yaml b/puppet/services/vpp.yaml
index 59866d39..7c8f8a28 100644
--- a/puppet/services/vpp.yaml
+++ b/puppet/services/vpp.yaml
@@ -42,6 +42,16 @@ outputs:
step_config: |
include ::tripleo::profile::base::vpp
upgrade_tasks:
+ - name: Check if vpp is deployed
+ command: systemctl is-enabled vpp
+ tags: common
+ ignore_errors: True
+ register: vpp_enabled
+ - name: "PreUpgrade step0,validation: Check service vpp is running"
+ shell: /usr/bin/systemctl show 'vpp' --property ActiveState | grep '\bactive\b'
+ when: vpp_enabled.rc == 0
+ tags: step0,validation
- name: Stop vpp service
- tags: step2
+ tags: step1
+ when: vpp_enabled.rc == 0
service: name=vpp state=stopped
diff --git a/puppet/services/zaqar.yaml b/puppet/services/zaqar.yaml
index a320f694..33769d02 100644
--- a/puppet/services/zaqar.yaml
+++ b/puppet/services/zaqar.yaml
@@ -30,6 +30,12 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ ZaqarPolicies:
+ description: |
+ A hash of policies to configure for Zaqar.
+ e.g. { zaqar-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
outputs:
@@ -38,6 +44,7 @@ outputs:
value:
service_name: zaqar
config_settings:
+ zaqar::policy::policies: {get_param: ZaqarPolicies}
zaqar::keystone::authtoken::password: {get_param: ZaqarPassword}
zaqar::keystone::authtoken::project_name: 'service'
zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
diff --git a/releasenotes/notes/Enable-TLS-for-libvirt-0aab48cd8339da0f.yaml b/releasenotes/notes/Enable-TLS-for-libvirt-0aab48cd8339da0f.yaml
new file mode 100644
index 00000000..e8941b7c
--- /dev/null
+++ b/releasenotes/notes/Enable-TLS-for-libvirt-0aab48cd8339da0f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ If TLS in the internal network is enabled, libvirt's transport defaults to
+ using TLS. This can be changed by setting the ``UseTLSTransportForLiveMigration``
+ parameter, which is ``true`` by default.
diff --git a/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml b/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml
new file mode 100644
index 00000000..50b8167e
--- /dev/null
+++ b/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Keystone's default token provider is now fernet instead of UUID
+upgrade:
+ - When upgrading, old tokens will not work anymore due to the provider
+ changing from UUID to fernet.
diff --git a/releasenotes/notes/add-all-hosts-to-hostsentry-20a8ee8a1a210ce2.yaml b/releasenotes/notes/add-all-hosts-to-hostsentry-20a8ee8a1a210ce2.yaml
new file mode 100644
index 00000000..b0ad9d93
--- /dev/null
+++ b/releasenotes/notes/add-all-hosts-to-hostsentry-20a8ee8a1a210ce2.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - Previously only the VIPs and their associated hostnames were present
+ in the HostsEntry output, due to the hosts_entries output on the
+ hosts-config.yaml nested stack being empty. It was referencing an
+ invalid attribute. See
+ https://bugs.launchpad.net/tripleo/+bug/1683517
+
+
diff --git a/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml b/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml
new file mode 100644
index 00000000..2af6aa72
--- /dev/null
+++ b/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Add support for BGPVPN Neutron service plugin
diff --git a/releasenotes/notes/add-ceilometer-agent-ipmi-2c86726d0373d354.yaml b/releasenotes/notes/add-ceilometer-agent-ipmi-2c86726d0373d354.yaml
new file mode 100644
index 00000000..d1f73407
--- /dev/null
+++ b/releasenotes/notes/add-ceilometer-agent-ipmi-2c86726d0373d354.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Add support to configure Ceilometer Agent Ipmi profiles.
diff --git a/releasenotes/notes/add-ipv6-diable-options-9aaee219bb87ac6a.yaml b/releasenotes/notes/add-ipv6-diable-options-9aaee219bb87ac6a.yaml
new file mode 100644
index 00000000..8b57f587
--- /dev/null
+++ b/releasenotes/notes/add-ipv6-diable-options-9aaee219bb87ac6a.yaml
@@ -0,0 +1,7 @@
+---
+security:
+ - |
+ Add IPv6 disable option and make it configurable for user to disable IPv6
+ when it's not used, this will descrease the risk of ipv6 attack.
+ Both net.ipv6.conf.default.disable_ipv6 & net.ipv6.conf.all.disable_ipv6
+ will be explicitly set to the default value (0) which is enabled.
diff --git a/releasenotes/notes/add-l2gw-api-support-2206d3d14f409088.yaml b/releasenotes/notes/add-l2gw-api-support-2206d3d14f409088.yaml
new file mode 100644
index 00000000..81835323
--- /dev/null
+++ b/releasenotes/notes/add-l2gw-api-support-2206d3d14f409088.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Add support for L2 Gateway Neutron service plugin
diff --git a/releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml b/releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml
new file mode 100644
index 00000000..19452f27
--- /dev/null
+++ b/releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Add capabilities to configure LDAP backends as for keystone domains.
+ This can be done by using the KeystoneLDAPDomainEnable and
+ KeystoneLDAPBackendConfigs parameters.
diff --git a/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml b/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml
new file mode 100644
index 00000000..882ee4e5
--- /dev/null
+++ b/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Adds support for OpenDaylight HA clustering. Now when specifying
+ three or more ODL roles, ODL will be deployed in a cluster, and
+ use port 2550 for cluster communication.
diff --git a/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml b/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml
new file mode 100644
index 00000000..b3a62ced
--- /dev/null
+++ b/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - The relevant parameters have been added to deploy the heat APIs over httpd.
+ This means that the HeatWorkers now affect httpd instead of the heat API
+ themselves, and that the apache hieradata will also be deployed in the
+ nodes where the heat APIs run.
diff --git a/releasenotes/notes/add-qdr-99a27dffef42c13e.yaml b/releasenotes/notes/add-qdr-99a27dffef42c13e.yaml
new file mode 100644
index 00000000..163536dd
--- /dev/null
+++ b/releasenotes/notes/add-qdr-99a27dffef42c13e.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - Introduce the ability to deploy the qpid-dispatch-router (Qdr) for
+ the oslo.messaging AMQP 1.0 driver backend. The Qdr provides
+ direct messaging (e.g. brokerless) communications for
+ oslo.messaging services. To facilitate simple use for evaluation
+ in an overcloud deployment, the Qdr aliases the RabbitMQ service
+ to provide the messaging backend.
diff --git a/releasenotes/notes/add_db_sync_timeout-c9b2f401cca0b37d.yaml b/releasenotes/notes/add_db_sync_timeout-c9b2f401cca0b37d.yaml
new file mode 100644
index 00000000..ecf35933
--- /dev/null
+++ b/releasenotes/notes/add_db_sync_timeout-c9b2f401cca0b37d.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Adds DatabaseSyncTimeout parameter to Nova and Neutron templates.
diff --git a/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml
new file mode 100644
index 00000000..ec22942a
--- /dev/null
+++ b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ NeutronDhcpAgents had a default value of 3 that, even though unused in
+ practice was a bad default value. Changing the default value to a
+ sentinel value and making the hiera conditional allows deploy-time
+ logic in puppet to provide a default value based on the number of dhcp
+ agents being deployed.
diff --git a/releasenotes/notes/api-policy-4ca739519537f6f4.yaml b/releasenotes/notes/api-policy-4ca739519537f6f4.yaml
new file mode 100644
index 00000000..54beb305
--- /dev/null
+++ b/releasenotes/notes/api-policy-4ca739519537f6f4.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ TripleO is now able to configure role-based access API policies with new
+ parameters for each API service.
+ For example, Nova API service has now NovaApiPolicies and the value
+ could be { nova-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ It will configure /etc/nova/policy.json file and configure context_is_admin
+ to true. Puppet will take care of this configuration and API services are
+ restarted when the file is touched.
+ We're also adding augeas resource to the list of Puppet providers that
+ container deployments grab in the catalog to generate configurations, so
+ this feature can be used when deploying TripleO in containers.
diff --git a/releasenotes/notes/big-switch-agent-4c743a2112251234.yaml b/releasenotes/notes/big-switch-agent-4c743a2112251234.yaml
new file mode 100644
index 00000000..49ede200
--- /dev/null
+++ b/releasenotes/notes/big-switch-agent-4c743a2112251234.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Updated bigswitch environment file to include the bigswitch agent
+ installation and correct support for the restproxy configuration.
diff --git a/releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml b/releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml
new file mode 100644
index 00000000..298a8ece
--- /dev/null
+++ b/releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - The initial firewall will now be purged by the deployed-server bootstrap
+ scripts. This is needed to prevent possible issues with bootstrapping the
+ initial Pacemaker cluster. See
+ https://bugs.launchpad.net/tripleo/+bug/1679234
diff --git a/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml b/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml
new file mode 100644
index 00000000..09067296
--- /dev/null
+++ b/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - The ``NeutronExternalNetworkBridge`` parameter changed its default value
+ from ``br-ex`` to an empty string value. It means that by default Neutron
+ L3 agent will be able to serve multiple external networks. (It was always
+ the case for those who were using templates with the value of the parameter
+ overridden by an empty string value.)
+deprecations:
+ - The ``NeutronExternalNetworkBridge`` parameter is deprecated and will be
+ removed in a next release.
diff --git a/releasenotes/notes/disable-ceilo-api-dfe5d0947563bbe0.yaml b/releasenotes/notes/disable-ceilo-api-dfe5d0947563bbe0.yaml
new file mode 100644
index 00000000..2661f7c9
--- /dev/null
+++ b/releasenotes/notes/disable-ceilo-api-dfe5d0947563bbe0.yaml
@@ -0,0 +1,4 @@
+---
+deprecations:
+ - Deprecate and disable ceilometer Api by default. This can be enabled
+ by passing in an env file to deploy command.
diff --git a/releasenotes/notes/disable-core-dump-for-setuid-programs-e83a2a5da908b9c3.yaml b/releasenotes/notes/disable-core-dump-for-setuid-programs-e83a2a5da908b9c3.yaml
new file mode 100644
index 00000000..3168a549
--- /dev/null
+++ b/releasenotes/notes/disable-core-dump-for-setuid-programs-e83a2a5da908b9c3.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+ - |
+ The fs.suid_dumpable kernel parameter is now explicitly set to 0 to prevent
+ exposing sensitive data through core dumps of processes with elevated
+ permissions. Deployments that set or depend on non-zero values for
+ fs.suid_dumpable may be affected by upgrading.
+security:
+ - |
+ Explicitly disable core dump for setuid programs by setting
+ fs.suid_dumpable = 0, this will descrease the risk of unauthorized access
+ of core dump file generated by setuid program.
diff --git a/releasenotes/notes/disable-kernel-parameter-for-icmp-redirects-f325f91d71b58b5f.yaml b/releasenotes/notes/disable-kernel-parameter-for-icmp-redirects-f325f91d71b58b5f.yaml
new file mode 100644
index 00000000..0f226a84
--- /dev/null
+++ b/releasenotes/notes/disable-kernel-parameter-for-icmp-redirects-f325f91d71b58b5f.yaml
@@ -0,0 +1,19 @@
+---
+upgrade:
+ - The net.ipv4.conf.default.send_redirects & net.ipv4.conf.all.send_redirects
+ are now set to 0 to prevent a compromised host from sending invalid ICMP
+ redirects to other router devices.
+ - The net.ipv4.conf.default.accept_redirects,
+ net.ipv6.conf.default.accept_redirects & net.ipv6.conf.all.accept_redirects
+ are now set to 0 to prevent forged ICMP packet from altering host's routing
+ tables.
+ - The net.ipv4.conf.default.secure_redirects &
+ net.ipv4.conf.all.secure_redirects are now set to 0 to disable acceptance
+ of secure ICMP redirected packets.
+security:
+ - Invalide ICMP redirects may corrupt routing and have users access a system
+ set up by the attacker as opposed to a valid system.
+ - Routing tables may be altered by bogus ICMP redirect messages and send
+ packets to incorrect networks.
+ - Secure ICMP redirects are the same as ICMP redirects, except they come from
+ gateways listed on the default gateway list.
diff --git a/releasenotes/notes/docker-service-all-roles-5c22a018caeafcf0.yaml b/releasenotes/notes/docker-service-all-roles-5c22a018caeafcf0.yaml
new file mode 100644
index 00000000..734db08a
--- /dev/null
+++ b/releasenotes/notes/docker-service-all-roles-5c22a018caeafcf0.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ When deploying with environments/docker.yaml, the docker service
+ is now deployed on all predefined roles.
diff --git a/releasenotes/notes/enable-logging-suspicious-packets-d5545586f917d2ca.yaml b/releasenotes/notes/enable-logging-suspicious-packets-d5545586f917d2ca.yaml
new file mode 100644
index 00000000..bb2543f2
--- /dev/null
+++ b/releasenotes/notes/enable-logging-suspicious-packets-d5545586f917d2ca.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+ - |
+ The net.ipv4.conf.default.log_martians & net.ipv4.conf.all.log_martians are
+ now set to 1 to enable logging of suspicious packets.
+security:
+ - |
+ Logging of suspicious packets allows an administrator to investigate the
+ spoofed packets sent to their system.
diff --git a/releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml b/releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml
new file mode 100644
index 00000000..83b05bbb
--- /dev/null
+++ b/releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Added support for external swift proxy. Users may need to
+ configure endpoints pointing to swift proxy service
+ already available.
diff --git a/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml
new file mode 100644
index 00000000..da995949
--- /dev/null
+++ b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml
@@ -0,0 +1,6 @@
+---
+security:
+ - |
+ Secure EtcdInitialClusterToken by removing the default value
+ and make the parameter hidden.
+ Fixes `bug 1673266 <https://bugs.launchpad.net/tripleo/+bug/1673266>`__.
diff --git a/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml b/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml
new file mode 100644
index 00000000..682171c1
--- /dev/null
+++ b/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - Fixes an issue when using the CinderNfsServers
+ parameter_defaults setting. It now works using a
+ single share as well as a comma-separated list of
+ shares.
diff --git a/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml b/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml
new file mode 100644
index 00000000..bb18aed8
--- /dev/null
+++ b/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - Fixes firewall rules from neutron OVS agent not being
+ inherited correctly and applied in neutron OVS DPDK
+ template.
diff --git a/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml b/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml
new file mode 100644
index 00000000..79cea05e
--- /dev/null
+++ b/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - Fixes OpenDaylightProviderMappings parsing on a
+ comma delimited list.
diff --git a/releasenotes/notes/get-occ-config-local-connector-5bbec3f591a9f311.yaml b/releasenotes/notes/get-occ-config-local-connector-5bbec3f591a9f311.yaml
new file mode 100644
index 00000000..ef8877ae
--- /dev/null
+++ b/releasenotes/notes/get-occ-config-local-connector-5bbec3f591a9f311.yaml
@@ -0,0 +1,10 @@
+---
+fixes:
+ - The deployed-server Heat agent configuration script,
+ get-occ-config.sh, is now updated to configure the
+ local data source for os-collect-config instead of
+ configuring /etc/os-collect-config.conf directly. Doing
+ so means that the configuration template for os-apply-config
+ no longer has to be deleted as the file will be rendered
+ correctly with the right data. See
+ https://bugs.launchpad.net/tripleo/+bug/1679705
diff --git a/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml b/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml
new file mode 100644
index 00000000..2f2513c9
--- /dev/null
+++ b/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Deploy Gnocchi with Keystone v3 endpoints and make
+ sure it doesn't rely on Keystone v2 anymore.
diff --git a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml b/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml
deleted file mode 100644
index edcc1250..00000000
--- a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-deprecations:
- - The environments/puppet-pacemaker.yaml file is now deprecated and the HA
- deployment is now the default. In order to get the non-HA deployment use
- environments/nonha-arch.yaml explicitly.
diff --git a/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml b/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml
new file mode 100644
index 00000000..d2b2eb94
--- /dev/null
+++ b/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - openstack-selinux is now installed by the deployed-server
+ bootstrap scripts. Previously, it was not installed, so
+ if SELinux was set to enforcing, all OpenStack policy
+ was missing.
diff --git a/releasenotes/notes/ironic-neutron-integration-76c4f9e0d10785e4.yaml b/releasenotes/notes/ironic-neutron-integration-76c4f9e0d10785e4.yaml
new file mode 100644
index 00000000..dd99acc7
--- /dev/null
+++ b/releasenotes/notes/ironic-neutron-integration-76c4f9e0d10785e4.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ Allow setting the Ironic provisioning network UUID or name via new
+ ``IronicProvisioningNetwork`` configuration.
+ - |
+ Enable support for "neutron" Ironic networking plugin, enabling advanced
+ integration with Neutron, such as VLAN/VXLAN network support, bonding and
+ security groups.
diff --git a/releasenotes/notes/leave-satellite-repo-enabled-8b60528bd5450c7b.yaml b/releasenotes/notes/leave-satellite-repo-enabled-8b60528bd5450c7b.yaml
new file mode 100644
index 00000000..c327265a
--- /dev/null
+++ b/releasenotes/notes/leave-satellite-repo-enabled-8b60528bd5450c7b.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Previously the RHEL registration script disabled the satellite repo after
+ installing the necessary packages from it. This makes it awkward to
+ update those packages later, so the repo will no longer be disabled.
diff --git a/releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml b/releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml
new file mode 100644
index 00000000..d0624265
--- /dev/null
+++ b/releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - Since panko is enabled by default, include it the default dispatcher
+ for ceilometer events.
diff --git a/releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml b/releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml
new file mode 100644
index 00000000..45ca9fe5
--- /dev/null
+++ b/releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ Add support for cold migration over ssh.
+
+ This enables nova cold migration.
+
+ This also switches to SSH as the default transport for live-migration.
+ The tripleo-common mistral action that generates passwords supplies the
+ MigrationSshKey parameter that enables this.
+deprecations:
+ - |
+ The TCP transport is no longer used for live-migration and the firewall
+ port has been closed.
diff --git a/releasenotes/notes/ovn-fcd4b0168e6745a8.yaml b/releasenotes/notes/ovn-fcd4b0168e6745a8.yaml
new file mode 100644
index 00000000..f5ccec06
--- /dev/null
+++ b/releasenotes/notes/ovn-fcd4b0168e6745a8.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Support configuring NeutronBridgeMappings
+ - Set force_config_drive to true as OVN doesn't support metadata service
+ - Add necessary iptables rules to allow Geneve traffic and ovsdb-server
+ traffic for Northbound and Southbound databases.
diff --git a/releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml b/releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml
new file mode 100644
index 00000000..8c210823
--- /dev/null
+++ b/releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml
@@ -0,0 +1,12 @@
+---
+issues:
+ - During the ovs upgrade for 2.5 to 2.6 we need to workaround the classic
+ yum update command by handling the upgrade of the package separately to not
+ loose the IPs and the connectivity on the nodes. The workaround is
+ discussed here https://bugs.launchpad.net/tripleo/+bug/1669714
+upgrade:
+ - The upgrade from openvswitch 2.5 to 2.6 is handled gracefully and there should
+ be no user impact in particular no restart of the openvswitch service. For more
+ information please see the related bug above which also links the relevant code reviews.
+ The workaround (transparent to the user/doesn't require any input) is to download the OVS
+ package and install with --nopostun and --notriggerun options provided by the rpm binary.
diff --git a/releasenotes/notes/pluggable-server-type-per-role-314f38f8e5d4c84e.yaml b/releasenotes/notes/pluggable-server-type-per-role-314f38f8e5d4c84e.yaml
new file mode 100644
index 00000000..5b58d3d4
--- /dev/null
+++ b/releasenotes/notes/pluggable-server-type-per-role-314f38f8e5d4c84e.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - The server resource type, OS::TripleO::Server can now be
+ mapped per role instead of globally. This allows users to
+ mix baremetal (OS::Nova::Server) and
+ deployed-server (OS::Heat::DeployedServer) server resources
+ in the same deployment. See
+ https://blueprints.launchpad.net/tripleo/+spec/pluggable-server-type-per-role
diff --git a/releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml b/releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml
new file mode 100644
index 00000000..09d3be03
--- /dev/null
+++ b/releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml
@@ -0,0 +1,20 @@
+---
+upgrade:
+ - |
+ The default network for the ctlplane changed from 192.0.2.0/24 to
+ 192.168.24.0/24. All references to the ctlplane network in the templates
+ have been updated to reflect this change. When upgrading from a previous
+ release, if the default network was used for the ctlplane (192.0.2.0/24),
+ then it is necessary to provide as input, via environment file, the correct
+ setting for all the parameters that previously defaulted to 192.0.2.x and
+ now default to 192.168.24.x; there is an environment file which could be
+ used on upgrade `environments/updates/update-from-192_0_2-subnet.yaml` to
+ cover a simple scenario but it won't be enough for scenarios using an
+ external load balancer, Contrail or Cisto N1KV. Follows a list of params to
+ be provided on upgrade.
+ From contrail-net.yaml: EC2MetadataIp, ControlPlaneDefaultRoute
+ From external-loadbalancer-vip-v6.yaml: ControlFixedIPs
+ From external-loadbalancer-vip.yaml: ControlFixedIPs
+ From network-environment.yaml: EC2MetadataIp, ControlPlaneDefaultRoute
+ From neutron-ml2-cisco-n1kv.yaml: N1000vVSMIP, N1000vMgmtGatewayIP
+ From contrail-vrouter.yaml: ContrailVrouterGateway
diff --git a/releasenotes/notes/restrict-access-to-kernel-message-buffer-809160674b92a073.yaml b/releasenotes/notes/restrict-access-to-kernel-message-buffer-809160674b92a073.yaml
new file mode 100644
index 00000000..c24e8921
--- /dev/null
+++ b/releasenotes/notes/restrict-access-to-kernel-message-buffer-809160674b92a073.yaml
@@ -0,0 +1,11 @@
+---
+upgrade:
+ - |
+ The kernel.dmesg_restrict is now set to 1 to prevent exposure of sensitive
+ kernel address information with unprivileged access. Deployments that set
+ or depend on values other than 1 for kernel.dmesg_restrict may be affected
+ by upgrading.
+security:
+ - |
+ Kernel syslog contains sensitive kernel address information, setting
+ kernel.dmesg_restrict to avoid unprivileged access to this information.
diff --git a/releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml b/releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml
new file mode 100644
index 00000000..86622bc1
--- /dev/null
+++ b/releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml
@@ -0,0 +1,3 @@
+---
+fixes:
+ - Add knobs to limit memory comsumed by mongodb with systemd
diff --git a/releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml b/releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml
new file mode 100644
index 00000000..dadbfa4b
--- /dev/null
+++ b/releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml
@@ -0,0 +1,18 @@
+---
+features:
+ - |
+ Adds tags to roles that allow an operator to specify custom tags to use
+ when trying to find functionality available from a role. Currently a role
+ with both the 'primary' and 'controller' tag is consider to be the primary
+ role. Historically the role named 'Controller' was the 'primary' role and
+ this primary designation is used to determine items like memcache ip
+ addresses. If no roles have the both the 'primary' and 'controller' tags,
+ the first role specified in the roles_data.yaml is used as the primary
+ role.
+upgrade:
+ - |
+ If using custom roles data, the logic was changed to leverage the first
+ role listed in the roles_data.yaml file to be the primary role. This can
+ be worked around by adding the 'primary' and 'controller' tags to the
+ custom controller role in your roles_data.yaml to ensure that the defined
+ custom controller role is still considered the primary role.
diff --git a/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml b/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml
new file mode 100644
index 00000000..c744e0f7
--- /dev/null
+++ b/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Sahara is now deployed with keystone_authtoken parameters and move
+ forward with Keystone v3 version.
diff --git a/releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml b/releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml
new file mode 100644
index 00000000..07407f20
--- /dev/null
+++ b/releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - We need ceilometer user in cases where ceilometer API is disabled.
+ This is to ensure other ceilometer services can still authenticate
+ with keystone.
diff --git a/releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml b/releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml
new file mode 100644
index 00000000..20146b0a
--- /dev/null
+++ b/releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - The ``pci_passthrough`` hiera value should be passed as a string
+ (`bug 1675036 <https://bugs.launchpad.net/tripleo/+bug/1675036>`__).
diff --git a/releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml b/releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml
new file mode 100644
index 00000000..8b533b1a
--- /dev/null
+++ b/releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - SSH host key exchange. The ssh host keys are collected from each host,
+ combined, and written to /etc/ssh/ssh_known_hosts.
diff --git a/releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml b/releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml
new file mode 100644
index 00000000..70051f65
--- /dev/null
+++ b/releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - The token flush cron job has been modified to run hourly instead of once
+ a day. This is because this was causing issues with larger deployments, as
+ the operation would take too long and sometimes even fail because of the
+ transaction being so large. Note that this only affects people using the
+ UUID token provider.
diff --git a/releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml b/releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml
new file mode 100644
index 00000000..ad1f39c4
--- /dev/null
+++ b/releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Adds a new boolean parameter for RHEL Registration called
+ 'UpdateOnRHELRegistration' that when enabled will trigger a yum update
+ on the node after the registration process completes.
diff --git a/releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml b/releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml
new file mode 100644
index 00000000..29d32cb7
--- /dev/null
+++ b/releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Add name and description fields to plan-environment.yaml
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 8da995b0..ec158ceb 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -52,9 +52,9 @@ copyright = u'2017, TripleO Developers'
# built documents.
#
# The full version, including alpha/beta/rc tags.
-release = '6.0.0.0b3'
+release = '7.0.0.0b1'
# The short X.Y version.
-version = '6.0.0'
+version = '7.0.0'
# The full version, including alpha/beta/rc tags.
diff --git a/requirements.txt b/requirements.txt
index 057aa287..df8a71f5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=1.8 # Apache-2.0
+pbr>=2.0.0 # Apache-2.0
Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
six>=1.9.0 # MIT
diff --git a/roles_data.yaml b/roles_data.yaml
index 95b25d98..8d3b5078 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -28,11 +28,21 @@
# ServicesDefault: (list) optional default list of services to be deployed
# on the role, defaults to an empty list. Sets the default for the
# {{role.name}}Services parameter in overcloud.yaml
-
-- name: Controller # the 'primary' role goes first
+#
+# tags: (list) list of tags used by other parts of the deployment process to
+# find the role for a specific type of functionality. Currently a role
+# with both 'primary' and 'controller' is used as the primary role for the
+# deployment process. If no roles have have 'primary' and 'controller', the
+# first role in this file is used as the primary role.
+#
+- name: Controller
CountDefault: 1
+ tags:
+ - primary
+ - controller
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
@@ -42,6 +52,10 @@
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::CinderBackendDellPs
+ - OS::TripleO::Services::CinderBackendDellSc
+ - OS::TripleO::Services::CinderBackendNetApp
+ - OS::TripleO::Services::CinderBackendScaleIO
- OS::TripleO::Services::Congress
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
@@ -52,7 +66,9 @@
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronBgpVpnApi
- OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL2gwApi
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NeutronApi
@@ -75,12 +91,13 @@
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::ExternalSwiftProxy
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- - OS::TripleO::Services::CeilometerApi
- OS::TripleO::Services::CeilometerCollector
- OS::TripleO::Services::CeilometerExpirer
- OS::TripleO::Services::CeilometerAgentCentral
@@ -126,6 +143,7 @@
- OS::TripleO::Services::OctaviaHousekeeping
- OS::TripleO::Services::OctaviaWorker
- OS::TripleO::Services::Vpp
+ - OS::TripleO::Services::Docker
- name: Compute
CountDefault: 1
@@ -133,12 +151,14 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Kernel
@@ -157,16 +177,19 @@
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Vpp
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Docker
- name: BlockStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::SensuClient
@@ -174,17 +197,20 @@
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Docker
- name: ObjectStorage
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
@@ -193,15 +219,18 @@
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Docker
- name: CephStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
@@ -210,3 +239,4 @@
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Docker
diff --git a/roles_data_undercloud.yaml b/roles_data_undercloud.yaml
index 5070ef38..df2e196b 100644
--- a/roles_data_undercloud.yaml
+++ b/roles_data_undercloud.yaml
@@ -1,6 +1,9 @@
-- name: Undercloud # the 'primary' role goes first
+- name: Undercloud
CountDefault: 1
disable_constraints: True
+ tags:
+ - primary
+ - controller
ServicesDefault:
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::MySQL
@@ -34,3 +37,11 @@
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::AodhApi
+ - OS::TripleO::Services::AodhEvaluator
+ - OS::TripleO::Services::AodhNotifier
+ - OS::TripleO::Services::AodhListener
+ - OS::TripleO::Services::GnocchiApi
+ - OS::TripleO::Services::GnocchiMetricd
+ - OS::TripleO::Services::GnocchiStatsd
+ - OS::TripleO::Services::PankoApi
diff --git a/scripts/hosts-config.sh b/scripts/hosts-config.sh
index f456b316..b3109a0c 100755
--- a/scripts/hosts-config.sh
+++ b/scripts/hosts-config.sh
@@ -13,14 +13,16 @@ write_entries() {
if grep -q "^# HEAT_HOSTS_START" "$file"; then
temp=$(mktemp)
- awk -v v="$entries" '/^# HEAT_HOSTS_START/ {
- print $0
- print v
- f=1
- }f &&!/^# HEAT_HOSTS_END$/{next}/^# HEAT_HOSTS_END$/{f=0}!f' "$file" > "$temp"
- echo "INFO: Updating hosts file $file, check below for changes"
- diff "$file" "$temp" || true
- cat "$temp" > "$file"
+ (
+ sed '/^# HEAT_HOSTS_START/,$d' "$file"
+ echo -ne "\n# HEAT_HOSTS_START - Do not edit manually within this section!\n"
+ echo "$entries"
+ echo -ne "# HEAT_HOSTS_END\n\n"
+ sed '1,/^# HEAT_HOSTS_END/d' "$file"
+ ) > "$temp"
+ echo "INFO: Updating hosts file $file, check below for changes"
+ diff "$file" "$temp" || true
+ cat "$temp" > "$file"
else
echo -ne "\n# HEAT_HOSTS_START - Do not edit manually within this section!\n" >> "$file"
echo "$entries" >> "$file"
diff --git a/setup.py b/setup.py
index 782bb21f..566d8443 100644
--- a/setup.py
+++ b/setup.py
@@ -25,5 +25,5 @@ except ImportError:
pass
setuptools.setup(
- setup_requires=['pbr>=1.8'],
+ setup_requires=['pbr>=2.0.0'],
pbr=True)
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index 32987cb2..f9dffef0 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -23,6 +23,15 @@ envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
'tls-endpoints-public-ip.yaml',
'tls-everywhere-endpoints-dns.yaml']
ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
+REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
+ 'config_settings', 'step_config']
+OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
+ 'service_config_settings', 'host_prep_tasks',
+ 'metadata_settings', 'kolla_config']
+REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
+ 'config_image']
+OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags' ]
+
def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0])
@@ -69,6 +78,7 @@ def validate_hci_compute_services_default(env_filename, env_tpl):
return 1
return 0
+
def validate_mysql_connection(settings):
no_op = lambda *args: False
error_status = [0]
@@ -109,6 +119,58 @@ def validate_mysql_connection(settings):
return error_status[0]
+def validate_docker_service(filename, tpl):
+ if 'outputs' in tpl and 'role_data' in tpl['outputs']:
+ if 'value' not in tpl['outputs']['role_data']:
+ print('ERROR: invalid role_data for filename: %s'
+ % filename)
+ return 1
+ role_data = tpl['outputs']['role_data']['value']
+
+ for section_name in REQUIRED_DOCKER_SECTIONS:
+ if section_name not in role_data:
+ print('ERROR: %s is required in role_data for %s.'
+ % (section_name, filename))
+ return 1
+
+ for section_name in role_data.keys():
+ if section_name in REQUIRED_DOCKER_SECTIONS:
+ continue
+ else:
+ if section_name in OPTIONAL_DOCKER_SECTIONS:
+ continue
+ else:
+ print('ERROR: %s is extra in role_data for %s.'
+ % (section_name, filename))
+ return 1
+
+ if 'puppet_config' in role_data:
+ puppet_config = role_data['puppet_config']
+ for key in puppet_config:
+ if key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
+ continue
+ else:
+ if key in OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS:
+ continue
+ else:
+ print('ERROR: %s should not be in puppet_config section.'
+ % key)
+ return 1
+ for key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
+ if key not in puppet_config:
+ print('ERROR: %s is required in puppet_config for %s.'
+ % (key, filename))
+ return 1
+
+ if 'parameters' in tpl:
+ for param in required_params:
+ if param not in tpl['parameters']:
+ print('ERROR: parameter %s is required for %s.'
+ % (param, filename))
+ return 1
+ return 0
+
+
def validate_service(filename, tpl):
if 'outputs' in tpl and 'role_data' in tpl['outputs']:
if 'value' not in tpl['outputs']['role_data']:
@@ -154,10 +216,16 @@ def validate(filename):
% filename)
return 1
+ # qdr aliases rabbitmq service to provide alternative messaging backend
if (filename.startswith('./puppet/services/') and
- filename != './puppet/services/services.yaml'):
+ filename not in ['./puppet/services/services.yaml',
+ './puppet/services/qdr.yaml']):
retval = validate_service(filename, tpl)
+ if (filename.startswith('./docker/services/') and
+ filename != './docker/services/services.yaml'):
+ retval = validate_docker_service(filename, tpl)
+
if filename.endswith('hyperconverged-ceph.yaml'):
retval = validate_hci_compute_services_default(filename, tpl)
diff --git a/validation-scripts/all-nodes.sh b/validation-scripts/all-nodes.sh
index 0b8b3523..f1f4cc11 100644
--- a/validation-scripts/all-nodes.sh
+++ b/validation-scripts/all-nodes.sh
@@ -67,5 +67,23 @@ function ping_default_gateways() {
echo "SUCCESS"
}
+# Verify the FQDN from the nova/ironic deployment matches
+# FQDN in the heat templates.
+function fqdn_check() {
+ HOSTNAME=$(hostname)
+ SHORT_NAME=$(hostname -s)
+ FQDN_FROM_HOSTS=$(awk '$3 == "'${SHORT_NAME}'"{print $2}' /etc/hosts)
+ echo -n "Checking hostname vs /etc/hosts entry..."
+ if [[ $HOSTNAME != $FQDN_FROM_HOSTS ]]; then
+ echo "FAILURE"
+ echo -e "System hostname: ${HOSTNAME}\nEntry from /etc/hosts: ${FQDN_FROM_HOSTS}\n"
+ exit 1
+ fi
+ echo "SUCCESS"
+}
+
ping_controller_ips "$ping_test_ips"
ping_default_gateways
+if [[ $validate_fqdn == "True" ]];then
+ fqdn_check
+fi