aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--capabilities-map.yaml324
-rw-r--r--docker/firstboot/start_docker_agents.sh29
-rw-r--r--environments/ceph-radosgw.yaml5
-rw-r--r--environments/cinder-netapp-config.yaml2
-rw-r--r--environments/debug.yaml5
-rw-r--r--environments/enable-internal-tls.yaml6
-rw-r--r--environments/hyperconverged-ceph.yaml11
-rw-r--r--environments/logging-environment.yaml29
-rw-r--r--environments/low-memory-usage.yaml15
-rw-r--r--environments/major-upgrade-aodh-migration.yaml6
-rw-r--r--environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml7
-rw-r--r--environments/major-upgrade-pacemaker-converge.yaml4
-rw-r--r--environments/major-upgrade-pacemaker-init.yaml6
-rw-r--r--environments/major-upgrade-pacemaker.yaml6
-rw-r--r--environments/major-upgrade-remove-sahara.yaml6
-rw-r--r--environments/manage-firewall.yaml2
-rw-r--r--environments/manila-cephfsnative-config.yaml17
-rw-r--r--environments/manila-generic-config.yaml14
-rw-r--r--environments/manila-netapp-config.yaml29
-rw-r--r--environments/monitoring-environment.yaml30
-rw-r--r--environments/neutron-ml2-ovn.yaml18
-rw-r--r--environments/neutron-opencontrail.yaml2
-rw-r--r--environments/neutron-opendaylight-l3.yaml6
-rw-r--r--environments/neutron-opendaylight.yaml6
-rw-r--r--environments/neutron-ovs-dpdk.yaml18
-rw-r--r--environments/neutron-ovs-dvr.yaml15
-rwxr-xr-xenvironments/neutron-sriov.yaml22
-rw-r--r--environments/services/haproxy-internal-tls-certmonger.yaml4
-rw-r--r--environments/services/haproxy-public-tls-certmonger.yaml4
-rw-r--r--environments/tls-endpoints-public-dns.yaml3
-rw-r--r--environments/tls-endpoints-public-ip.yaml3
-rw-r--r--environments/tls-everywhere-endpoints-dns.yaml55
-rw-r--r--environments/use-dns-for-vips.yaml5
-rw-r--r--extraconfig/all_nodes/mac_hostname.j2.yaml (renamed from extraconfig/all_nodes/mac_hostname.yaml)52
-rw-r--r--extraconfig/all_nodes/random_string.j2.yaml (renamed from extraconfig/all_nodes/random_string.yaml)14
-rw-r--r--extraconfig/all_nodes/swap-partition.j2.yaml44
-rw-r--r--extraconfig/all_nodes/swap-partition.yaml86
-rw-r--r--extraconfig/all_nodes/swap.j2.yaml58
-rw-r--r--extraconfig/all_nodes/swap.yaml104
-rw-r--r--extraconfig/tasks/major_upgrade_block_storage.sh14
-rw-r--r--extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml62
-rwxr-xr-xextraconfig/tasks/major_upgrade_ceph_mon.sh82
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh101
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh104
-rw-r--r--extraconfig/tasks/major_upgrade_compute.sh17
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh162
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh40
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_3.sh26
-rw-r--r--extraconfig/tasks/major_upgrade_object_storage.sh14
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml81
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml (renamed from extraconfig/tasks/major_upgrade_pacemaker_init.yaml)78
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh123
-rw-r--r--extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml12
-rw-r--r--extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp97
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh281
-rwxr-xr-xextraconfig/tasks/pacemaker_resource_restart.sh26
-rwxr-xr-xextraconfig/tasks/yum_update.sh15
-rw-r--r--hosts-config.yaml30
-rw-r--r--j2_excludes.yaml10
-rw-r--r--net-config-bond.yaml12
-rw-r--r--network/config/bond-with-vlans/ceph-storage.yaml5
-rw-r--r--network/config/bond-with-vlans/cinder-storage.yaml5
-rw-r--r--network/config/bond-with-vlans/compute-dpdk.yaml192
-rw-r--r--network/config/bond-with-vlans/compute.yaml5
-rw-r--r--network/config/bond-with-vlans/controller-no-external.yaml5
-rw-r--r--network/config/bond-with-vlans/controller-v6.yaml5
-rw-r--r--network/config/bond-with-vlans/controller.yaml5
-rw-r--r--network/config/bond-with-vlans/swift-storage.yaml5
-rw-r--r--network/endpoints/endpoint_data.yaml15
-rw-r--r--network/endpoints/endpoint_map.yaml249
-rw-r--r--network/external.yaml5
-rw-r--r--network/external_v6.yaml5
-rw-r--r--network/internal_api.yaml1
-rw-r--r--network/internal_api_v6.yaml1
-rw-r--r--network/management.yaml7
-rw-r--r--network/ports/external_from_pool_v6.yaml2
-rw-r--r--network/ports/from_service.yaml6
-rw-r--r--network/ports/from_service_v6.yaml6
-rw-r--r--network/ports/internal_api_from_pool_v6.yaml2
-rw-r--r--network/ports/management_from_pool_v6.yaml2
-rw-r--r--network/ports/net_ip_list_map.yaml69
-rw-r--r--network/ports/storage_from_pool_v6.yaml2
-rw-r--r--network/ports/storage_mgmt_from_pool_v6.yaml2
-rw-r--r--network/ports/tenant_from_pool_v6.yaml2
-rw-r--r--network/service_net_map.j2.yaml (renamed from network/service_net_map.yaml)34
-rw-r--r--network/storage.yaml1
-rw-r--r--network/storage_mgmt.yaml1
-rw-r--r--network/storage_mgmt_v6.yaml1
-rw-r--r--network/storage_v6.yaml1
-rw-r--r--network/tenant.yaml1
-rw-r--r--network/tenant_v6.yaml1
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml (renamed from overcloud-resource-registry-puppet.yaml)121
l---------overcloud-without-mergepy.yaml1
-rw-r--r--overcloud.j2.yaml653
-rw-r--r--overcloud.yaml1029
-rw-r--r--puppet/all-nodes-config.yaml202
-rw-r--r--puppet/blockstorage-role.yaml (renamed from puppet/cinder-storage.yaml)75
-rw-r--r--puppet/ceph-storage-post.yaml80
-rw-r--r--puppet/cephstorage-role.yaml (renamed from puppet/ceph-storage.yaml)63
-rw-r--r--puppet/cinder-storage-post.yaml90
-rw-r--r--puppet/compute-post.yaml92
-rw-r--r--puppet/compute-role.yaml (renamed from puppet/compute.yaml)58
-rw-r--r--puppet/config.role.j2.yaml (renamed from puppet/controller-config.yaml)16
-rw-r--r--puppet/controller-config-pacemaker.yaml3
-rw-r--r--puppet/controller-post.yaml116
-rw-r--r--puppet/controller-role.yaml (renamed from puppet/controller.yaml)270
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml14
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml26
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml12
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml18
-rw-r--r--puppet/extraconfig/tls/tls-cert-inject.yaml6
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp21
-rw-r--r--puppet/manifests/overcloud_compute.pp21
-rw-r--r--puppet/manifests/overcloud_object.pp21
-rw-r--r--puppet/manifests/overcloud_role.pp (renamed from puppet/manifests/overcloud_controller.pp)9
-rw-r--r--puppet/manifests/overcloud_volume.pp21
-rw-r--r--puppet/objectstorage-role.yaml (renamed from puppet/swift-storage.yaml)89
-rw-r--r--puppet/post.j2.yaml139
-rw-r--r--puppet/role.role.j2.yaml452
-rw-r--r--puppet/services/README.rst2
-rw-r--r--puppet/services/aodh-api.yaml27
-rw-r--r--puppet/services/aodh-base.yaml40
-rw-r--r--puppet/services/aodh-evaluator.yaml4
-rw-r--r--puppet/services/aodh-listener.yaml4
-rw-r--r--puppet/services/aodh-notifier.yaml4
-rw-r--r--puppet/services/apache-internal-tls-certmonger.yaml50
-rw-r--r--puppet/services/apache.yaml66
-rw-r--r--puppet/services/ceilometer-agent-central.yaml21
-rw-r--r--puppet/services/ceilometer-agent-compute.yaml4
-rw-r--r--puppet/services/ceilometer-agent-notification.yaml12
-rw-r--r--puppet/services/ceilometer-api.yaml29
-rw-r--r--puppet/services/ceilometer-base.yaml29
-rw-r--r--puppet/services/ceilometer-collector.yaml25
-rw-r--r--puppet/services/ceilometer-expirer.yaml5
-rw-r--r--puppet/services/ceph-base.yaml40
-rw-r--r--puppet/services/ceph-client.yaml4
-rw-r--r--puppet/services/ceph-external.yaml6
-rw-r--r--puppet/services/ceph-mon.yaml15
-rw-r--r--puppet/services/ceph-osd.yaml4
-rw-r--r--puppet/services/ceph-rgw.yaml79
-rw-r--r--puppet/services/cinder-api.yaml77
-rw-r--r--puppet/services/cinder-backup.yaml4
-rw-r--r--puppet/services/cinder-base.yaml8
-rw-r--r--puppet/services/cinder-scheduler.yaml12
-rw-r--r--puppet/services/cinder-volume.yaml18
-rw-r--r--puppet/services/database/mongodb.yaml12
-rw-r--r--puppet/services/database/mysql.yaml6
-rw-r--r--puppet/services/glance-api.yaml171
-rw-r--r--puppet/services/glance-base.yaml110
-rw-r--r--puppet/services/glance-registry.yaml99
-rw-r--r--puppet/services/gnocchi-api.yaml50
-rw-r--r--puppet/services/gnocchi-base.yaml16
-rw-r--r--puppet/services/gnocchi-metricd.yaml9
-rw-r--r--puppet/services/gnocchi-statsd.yaml4
-rw-r--r--puppet/services/haproxy-internal-tls-certmonger.yaml51
-rw-r--r--puppet/services/haproxy-public-tls-certmonger.yaml37
-rw-r--r--puppet/services/haproxy.yaml69
-rw-r--r--puppet/services/heat-api-cfn.yaml26
-rw-r--r--puppet/services/heat-api-cloudwatch.yaml12
-rw-r--r--puppet/services/heat-api.yaml26
-rw-r--r--puppet/services/heat-base.yaml10
-rw-r--r--puppet/services/heat-engine.yaml34
-rw-r--r--puppet/services/horizon.yaml30
-rw-r--r--puppet/services/ironic-api.yaml42
-rw-r--r--puppet/services/ironic-base.yaml7
-rw-r--r--puppet/services/ironic-conductor.yaml55
-rw-r--r--puppet/services/keepalived.yaml7
-rw-r--r--puppet/services/kernel.yaml4
-rw-r--r--puppet/services/keystone.yaml206
-rw-r--r--puppet/services/logging/fluentd-base.yaml37
-rw-r--r--puppet/services/logging/fluentd-client.yaml64
-rw-r--r--puppet/services/logging/fluentd-config.yaml154
-rw-r--r--puppet/services/manila-api.yaml33
-rw-r--r--puppet/services/manila-backend-cephfs.yaml57
-rw-r--r--puppet/services/manila-backend-generic.yaml89
-rw-r--r--puppet/services/manila-backend-netapp.yaml108
-rw-r--r--puppet/services/manila-base.yaml72
-rw-r--r--puppet/services/manila-scheduler.yaml6
-rw-r--r--puppet/services/manila-share.yaml4
-rw-r--r--puppet/services/memcached.yaml4
-rw-r--r--puppet/services/monitoring/sensu-base.yaml68
-rw-r--r--puppet/services/monitoring/sensu-client.yaml49
-rw-r--r--puppet/services/network/contrail-analytics.yaml90
-rw-r--r--puppet/services/network/contrail-base.yaml100
-rw-r--r--puppet/services/network/contrail-config.yaml72
-rw-r--r--puppet/services/network/contrail-control.yaml54
-rw-r--r--puppet/services/network/contrail-database.yaml51
-rw-r--r--puppet/services/network/contrail-webui.yaml69
-rw-r--r--puppet/services/neutron-api.yaml131
-rw-r--r--puppet/services/neutron-base.yaml15
-rw-r--r--puppet/services/neutron-compute-plugin-ovn.yaml45
-rw-r--r--puppet/services/neutron-dhcp.yaml12
-rw-r--r--puppet/services/neutron-l3-compute-dvr.yaml12
-rw-r--r--puppet/services/neutron-l3.yaml15
-rw-r--r--puppet/services/neutron-metadata.yaml26
-rw-r--r--puppet/services/neutron-midonet.yaml4
-rw-r--r--puppet/services/neutron-ovs-agent.yaml29
-rw-r--r--puppet/services/neutron-ovs-dpdk-agent.yaml8
-rw-r--r--puppet/services/neutron-plugin-ml2-ovn.yaml79
-rw-r--r--puppet/services/neutron-plugin-opencontrail.yaml2
-rw-r--r--puppet/services/neutron-sriov-agent.yaml27
-rw-r--r--puppet/services/nova-api.yaml134
-rw-r--r--puppet/services/nova-base.yaml26
-rw-r--r--puppet/services/nova-compute.yaml30
-rw-r--r--puppet/services/nova-conductor.yaml23
-rw-r--r--puppet/services/nova-consoleauth.yaml12
-rw-r--r--puppet/services/nova-libvirt.yaml8
-rw-r--r--puppet/services/nova-metadata.yaml42
-rw-r--r--puppet/services/nova-scheduler.yaml12
-rw-r--r--puppet/services/nova-vnc-proxy.yaml (renamed from puppet/services/nova-vncproxy.yaml)29
-rw-r--r--puppet/services/opendaylight-api.yaml17
-rw-r--r--puppet/services/opendaylight-ovs.yaml11
-rw-r--r--puppet/services/pacemaker.yaml74
-rw-r--r--puppet/services/pacemaker/ceilometer-agent-central.yaml4
-rw-r--r--puppet/services/pacemaker/ceilometer-agent-notification.yaml4
-rw-r--r--puppet/services/pacemaker/ceilometer-api.yaml4
-rw-r--r--puppet/services/pacemaker/ceilometer-collector.yaml4
-rw-r--r--puppet/services/pacemaker/cinder-api.yaml3
-rw-r--r--puppet/services/pacemaker/cinder-backup.yaml1
-rw-r--r--puppet/services/pacemaker/cinder-scheduler.yaml3
-rw-r--r--puppet/services/pacemaker/cinder-volume.yaml4
-rw-r--r--puppet/services/pacemaker/database/mongodb.yaml2
-rw-r--r--puppet/services/pacemaker/database/mysql.yaml17
-rw-r--r--puppet/services/pacemaker/database/redis.yaml2
-rw-r--r--puppet/services/pacemaker/glance-api.yaml3
-rw-r--r--puppet/services/pacemaker/glance-registry.yaml3
-rw-r--r--puppet/services/pacemaker/gnocchi-api.yaml4
-rw-r--r--puppet/services/pacemaker/gnocchi-metricd.yaml4
-rw-r--r--puppet/services/pacemaker/gnocchi-statsd.yaml4
-rw-r--r--puppet/services/pacemaker/haproxy.yaml1
-rw-r--r--puppet/services/pacemaker/heat-api-cfn.yaml3
-rw-r--r--puppet/services/pacemaker/heat-api-cloudwatch.yaml3
-rw-r--r--puppet/services/pacemaker/heat-api.yaml3
-rw-r--r--puppet/services/pacemaker/heat-engine.yaml3
-rw-r--r--puppet/services/pacemaker/horizon.yaml1
-rw-r--r--puppet/services/pacemaker/keystone.yaml3
-rw-r--r--puppet/services/pacemaker/manila-share.yaml1
-rw-r--r--puppet/services/pacemaker/memcached.yaml1
-rw-r--r--puppet/services/pacemaker/neutron-dhcp.yaml3
-rw-r--r--puppet/services/pacemaker/neutron-l3.yaml3
-rw-r--r--puppet/services/pacemaker/neutron-metadata.yaml3
-rw-r--r--puppet/services/pacemaker/neutron-midonet.yaml1
-rw-r--r--puppet/services/pacemaker/neutron-ovs-agent.yaml3
-rw-r--r--puppet/services/pacemaker/neutron-server.yaml1
-rw-r--r--puppet/services/pacemaker/nova-api.yaml3
-rw-r--r--puppet/services/pacemaker/nova-conductor.yaml3
-rw-r--r--puppet/services/pacemaker/nova-consoleauth.yaml3
-rw-r--r--puppet/services/pacemaker/nova-scheduler.yaml3
-rw-r--r--puppet/services/pacemaker/nova-vnc-proxy.yaml (renamed from puppet/services/pacemaker/nova-vncproxy.yaml)7
-rw-r--r--puppet/services/pacemaker/rabbitmq.yaml1
-rw-r--r--puppet/services/pacemaker/sahara-api.yaml3
-rw-r--r--puppet/services/pacemaker/sahara-engine.yaml3
-rw-r--r--puppet/services/rabbitmq.yaml22
-rw-r--r--puppet/services/sahara-api.yaml33
-rw-r--r--puppet/services/sahara-base.yaml8
-rw-r--r--puppet/services/sahara-engine.yaml12
-rw-r--r--puppet/services/services.yaml56
-rw-r--r--puppet/services/snmp.yaml4
-rw-r--r--puppet/services/swift-proxy.yaml54
-rw-r--r--puppet/services/swift-ringbuilder.yaml22
-rw-r--r--puppet/services/swift-storage.yaml6
-rw-r--r--puppet/services/time/ntp.yaml2
-rw-r--r--puppet/services/tripleo-firewall.yaml11
-rw-r--r--puppet/services/vip-hosts.yaml56
-rw-r--r--puppet/swift-devices-and-proxy-config.yaml45
-rw-r--r--puppet/swift-storage-post.yaml90
-rw-r--r--puppet/vip-config.yaml58
-rw-r--r--roles_data.yaml165
-rwxr-xr-xtools/yaml-validate.py22
269 files changed, 7946 insertions, 3351 deletions
diff --git a/capabilities-map.yaml b/capabilities-map.yaml
index c7816b7e..ae747621 100644
--- a/capabilities-map.yaml
+++ b/capabilities-map.yaml
@@ -41,17 +41,17 @@
root_template: overcloud.yaml
root_environment: overcloud-resource-registry-puppet.yaml
topics:
- - title: Basic Configuration
+ - title: Base Resources Configuration
description:
environment_groups:
- title:
- description: Enable basic configuration required for OpenStack Deployment
+ description: Enable base configuration for all resources required for OpenStack Deployment
environments:
- file: overcloud-resource-registry-puppet.yaml
- title: Default Configuration
+ title: Base resources configuration
description:
- - title: Deployment options
+ - title: Deployment Options
description:
environment_groups:
- title: High Availability
@@ -62,6 +62,15 @@ topics:
description: Enable configuration of an Overcloud controller with Pacemaker
requires:
- overcloud-resource-registry-puppet.yaml
+ - title: Pacemaker options
+ description:
+ environments:
+ - file: environments/puppet-pacemaker-no-restart.yaml
+ title: Pacemaker No Restart
+ description:
+ requires:
+ - environments/puppet-pacemaker.yaml
+ - overcloud-resource-registry-puppet.yaml
- title: Docker RDO
description: >
Docker container with heat agents for containerized compute node
@@ -71,26 +80,114 @@ topics:
description:
requires:
- overcloud-resource-registry-puppet.yaml
+ - title: Enable TLS
+ description: >
+ environments:
+ - file: environments/enable-tls.yaml
+ title: TLS
+ description: >
+ Use this option to pass in certificates for SSL deployments.
+ For these values to take effect, one of the TLS endpoints
+ environments must also be used.
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: TLS Endpoints
+ description: >
+ environments:
+ - file: environments/tls-endpoints-public-dns.yaml
+ title: SSL-enabled deployment with DNS name as public endpoint
+ description: >
+ Use this environment when deploying an SSL-enabled overcloud where the public
+ endpoint is a DNS name.
+ requires:
+ - environments/enable-tls.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/tls-endpoints-public-ip.yaml
+ title: SSL-enabled deployment with IP address as public endpoint
+ description: >
+ Use this environment when deploying an SSL-enabled overcloud where the public
+ endpoint is an IP address.
+ requires:
+ - environments/enable-tls.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - title: External load balancer
+ description: >
+ Enable external load balancer
+ environments:
+ - file: environments/external-loadbalancer-vip-v6.yaml
+ title: External load balancer IPv6
+ description: >
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/external-loadbalancer-vip.yaml
+ title: External load balancer IPv4
+ description: >
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+
+ - title: Additional Services
+ description: Deploy additional Overcloud services
+ environment_groups:
+ - title: Manila
+ description:
+ environments:
+ - file: environments/manila-generic-config.yaml
+ title: Manila
+ description: Enable Manila generic driver backend
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Sahara
+ description:
+ environments:
+ - file: environments/services/sahara.yaml
+ title: Sahara
+ description: Deploy Sahara service
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Ironic
+ description:
+ environments:
+ - file: environments/services/ironic.yaml
+ title: Ironic
+ description: Deploy Ironic service
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Mistral
+ description:
+ environments:
+ - file: environments/services/mistral.yaml
+ title: Mistral
+ description: Deploy Mistral service
+ requires:
+ - overcloud-resource-registry-puppet.yaml
# - title: Network Interface Configuration
# description:
# environment_groups:
- - title: Overlay network Configuration
+ - title: Overlay Network Configuration
description:
environment_groups:
- title: Network Isolation
- description: >
- Enable the creation of Neutron networks for
- isolated Overcloud traffic and configure each role to assign ports
- (related to that role) on these networks.
+ description:
environments:
- file: environments/network-isolation.yaml
title: Network Isolation
- description: Enable Network Isolation
+ description: >
+ Enable the creation of Neutron networks for
+ isolated Overcloud traffic and configure each role to assign ports
+ (related to that role) on these networks.
requires:
- overcloud-resource-registry-puppet.yaml
- - title: Single nic or Bonding
+ - file: environments/network-isolation-v6.yaml
+ title: Network Isolation IPv6
+ description: >
+ Enable the creation of IPv6 Neutron networks for isolated Overcloud
+ traffic and configure each role to assign ports (related
+ to that role) on these networks.
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Single NIC or Bonding
description: >
Configure roles to use pair of bonded nics or to use Vlans on a
single nic. This option assumes use of Network Isolation.
@@ -104,23 +201,105 @@ topics:
requires:
- environments/network-isolation.yaml
- overcloud-resource-registry-puppet.yaml
+ - file: environments/net-bond-with-vlans-no-external.yaml
+ title: Bond with Vlans No External Ports
+ description: >
+ Configure each role to use a pair of bonded nics (nic2 and
+ nic3) and configures an IP address on each relevant isolated network
+ for each role. This option assumes use of Network Isolation.
+ Sets external ports to noop.
+ requires:
+ - environments/network-isolation.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/net-bond-with-vlans-v6.yaml
+ title: Bond with Vlans IPv6
+ description: >
+ Configure each role to use a pair of bonded nics (nic2 and
+ nic3) and configures an IP address on each relevant isolated network
+ for each role, with IPv6 on the External network.
+ This option assumes use of Network Isolation IPv6.
+ requires:
+ - environments/network-isolation-v6.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/net-multiple-nics.yaml
+ title: Multiple NICs
+ description: >
+ Configures each role to use a separate NIC for
+ each isolated network.
+ This option assumes use of Network Isolation.
+ requires:
+ - environments/network-isolation.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/net-multiple-nics-v6.yaml
+ title: Multiple NICs IPv6
+ description: >
+ Configure each role to use a separate NIC for
+ each isolated network with IPv6 on the External network.
+ This option assumes use of Network Isolation IPv6.
+ requires:
+ - environments/network-isolation-v6.yaml
+ - overcloud-resource-registry-puppet.yaml
- file: environments/net-single-nic-with-vlans.yaml
- title: Single nic with Vlans
+ title: Single NIC with Vlans
+ description: >
+ Configure each role to use Vlans on a single NIC for
+ each isolated network. This option assumes use of Network Isolation.
+ requires:
+ - environments/network-isolation.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/net-single-nic-with-vlans-no-external.yaml
+ title: Single NIC with Vlans No External Ports
+ description: >
+ Configure each role to use Vlans on a single NIC for
+ each isolated network. This option assumes use of Network Isolation.
+ Sets external ports to noop.
+ requires:
+ - environments/network-isolation.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/net-single-nic-linux-bridge-with-vlans.yaml
+ title: Single NIC with Linux Bridge Vlans
description: >
- Configure each role to use Vlans on a single nic for
+ Configure each role to use Vlans on a single NIC for
each isolated network. This option assumes use of Network Isolation.
requires:
- environments/network-isolation.yaml
- overcloud-resource-registry-puppet.yaml
+ - file: environments/net-single-nic-with-vlans-v6.yaml
+ title: Single NIC with Vlans IPv6
+ description: >
+ Configures each role to use Vlans on a single NIC for
+ each isolated network with IPv6 on the External network.
+ This option assumes use of Network Isolation IPv6
+ requires:
+ - environments/network-isolation-v6.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - title: Management Network
+ description: >
+ Enable the creation of a system management network. This
+ creates a Neutron network for isolated Overcloud
+ system management traffic and configures each role to
+ assign a port (related to that role) on that network.
+ environments:
+ - file: environments/network-management.yaml
+ title: Management Network
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/network-management-v6.yaml
+ title: Management Network IPv6
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- title: Neutron Plugin Configuration
description:
environment_groups:
- - title: BigSwitch extensions or Cisco N1KV backend
- description:
+ - title: Neutron Plugins
+ description: >
+ Enable various Neutron plugins and backends
environments:
- file: environments/neutron-ml2-bigswitch.yaml
- title: BigSwitch extensions
+ title: BigSwitch Extensions
description: >
Enable Big Switch extensions, configured via puppet
requires:
@@ -131,28 +310,101 @@ topics:
Enable a Cisco N1KV backend, configured via puppet
requires:
- overcloud-resource-registry-puppet.yaml
- - title: Cisco Neutron plugin
- description: >
- Enable a Cisco Neutron plugin
- environments:
- file: environments/neutron-ml2-cisco-nexus-ucsm.yaml
title: Cisco Neutron plugin
description:
requires:
- overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-midonet.yaml
+ title: Deploy MidoNet Services
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-nuage-config.yaml
+ title: Neutron Nuage backend
+ description: Enables Neutron Nuage backend on the controller
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-opencontrail.yaml
+ title: OpenContrail Extensions
+ description: Enables OpenContrail extensions
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-opendaylight.yaml
+ title: OpenDaylight
+ description: Enables OpenDaylight
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-opendaylight-l3.yaml
+ title: OpenDaylight with L3 DVR
+ description: Enables OpenDaylight with L3 DVR
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-ovs-dpdk.yaml
+ title: DPDK with OVS
+ description: Deploy DPDK with OVS
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-ovs-dvr.yaml
+ title: DVR
+ description: Enables DVR in the Overcloud
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-plumgrid.yaml
+ title: PLUMgrid extensions
+ description: Enables PLUMgrid extensions
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+
+ - title: Nova Extensions
+ description:
+ environment_groups:
+ - title: Nova Extensions
+ description:
+ environments:
+ - file: environments/nova-nuage-config.yaml
+ title: Nuage backend
+ description: >
+ Enables Nuage backend on the Compute
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- title: Storage
description:
environment_groups:
- - title: Cinder NetApp backend
+ - title: Cinder backup service
+ description:
+ environments:
+ - file: environments/cinder-backup.yaml
+ title: Cinder backup service
+ description: >
+ OpenStack Cinder Backup service with Pacemaker configured
+ with Puppet
+ requires:
+ - environments/puppet-pacemaker.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - title: Cinder backend
description: >
- Enable a Cinder NetApp backend, configured via puppet
+ Enable various Cinder backends
environments:
- file: environments/cinder-netapp-config.yaml
title: Cinder NetApp backend
description:
requires:
- overcloud-resource-registry-puppet.yaml
+ - file: environments/cinder-dellsc-config.yaml
+ title: Cinder Dell Storage Center ISCSI backend
+ description: >
+ Enables a Cinder Dell Storage Center ISCSI backend, configured
+ via puppet
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/cinder-eqlx-config.yaml
+ title: Cinder EQLX backend
+ description: >
+ Enables a Cinder EQLX backend, configured via puppet
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- title: Externally managed Ceph
description: >
Enable the use of an externally managed Ceph cluster
@@ -224,3 +476,31 @@ topics:
description:
requires:
- overcloud-resource-registry-puppet.yaml
+ - title: Manage Firewall
+ description:
+ environments:
+ - file: environments/manage-firewall.yaml
+ title: Manage Firewall
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+
+ - title: Operational Tools
+ description:
+ environment_groups:
+ - title: Monitoring agents
+ description: Enable monitoring agents
+ environments:
+ - file: environments/monitoring-environment.yaml
+ title: enable monitoring agents
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Centralized logging support
+ description: Enable centralized logging clients (fluentd)
+ environments:
+ - file: environments/logging-environment.yaml
+ title: Enable fluentd client
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh
index 65c4e6dc..acb44ce5 100644
--- a/docker/firstboot/start_docker_agents.sh
+++ b/docker/firstboot/start_docker_agents.sh
@@ -18,12 +18,14 @@ echo "127.0.0.1 $HOSTNAME.localdomain $HOSTNAME" >> /etc/hosts
#echo "ADD_REGISTRY='--registry-mirror $docker_registry'" >> /etc/sysconfig/docker
# Local docker registry 1.8
-if [ $docker_namespace_is_registry ]; then
+# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is
+# a place holder for text replacement done via heat
+if [ "$docker_namespace_is_registry" = True ]; then
/usr/bin/systemctl stop docker.service
# if namespace is used with local registry, trim all namespacing
trim_var=$docker_registry
registry_host="${trim_var%%/*}"
- /bin/sed -i "s/# INSECURE_REGISTRY='--insecure-registry[ ]'/INSECURE_REGISTRY='--insecure-registry $registry_host'/g" /etc/sysconfig/docker
+ /bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker
/usr/bin/systemctl start --no-block docker.service
fi
@@ -32,6 +34,25 @@ DOCKER_PULL_PID=$!
mkdir -p /var/lib/etc-data/json-config #FIXME: this should be a docker data container
+# NOTE(flaper87): Heat Agent required mounts
+AGENT_COMMAND_MOUNTS="-v /var/lib/etc-data:/var/lib/etc-data \
+ -v /run:/run \
+ -v /etc:/host/etc \
+ -v /usr/bin/atomic:/usr/bin/atomic \
+ -v /var/lib/dhclient:/var/lib/dhclient \
+ -v /var/lib/cloud:/var/lib/cloud \
+ -v /var/lib/heat-cfntools:/var/lib/heat-cfntools \
+ -v /etc/sysconfig/docker:/etc/sysconfig/docker \
+ -v /usr/lib64/libseccomp.so.2:/usr/lib64/libseccomp.so.2"
+
+
+# NOTE(flaper87): Some of these commands may not be present depending on the
+# atomic version.
+for docker_cmd in docker docker-current docker-latest; do
+ if [ -f "/usr/bin/$docker_cmd" ]; then
+ AGENT_COMMAND_MOUNTS+=" -v /usr/bin/$docker_cmd:/usr/bin/$docker_cmd"
+ fi
+done
# heat-docker-agents service
cat <<EOF > /etc/systemd/system/heat-docker-agents.service
@@ -46,7 +67,9 @@ User=root
Restart=on-failure
ExecStartPre=-/usr/bin/docker kill heat-agents
ExecStartPre=-/usr/bin/docker rm heat-agents
-ExecStart=/usr/bin/docker run --name heat-agents --privileged --net=host -v /var/lib/etc-data:/var/lib/etc-data -v /run:/run -v /etc:/host/etc -v /usr/bin/atomic:/usr/bin/atomic -v /var/lib/dhclient:/var/lib/dhclient -v /var/lib/cloud:/var/lib/cloud -v /var/lib/heat-cfntools:/var/lib/heat-cfntools -v /usr/bin/docker:/usr/bin/docker --entrypoint=/usr/bin/os-collect-config $agent_image
+ExecStart=/usr/bin/docker run --name heat-agents --privileged --net=host \
+ $AGENT_COMMAND_MOUNTS \
+ --entrypoint=/usr/bin/os-collect-config $agent_image
ExecStop=/usr/bin/docker stop heat-agents
[Install]
diff --git a/environments/ceph-radosgw.yaml b/environments/ceph-radosgw.yaml
new file mode 100644
index 00000000..a9221a2a
--- /dev/null
+++ b/environments/ceph-radosgw.yaml
@@ -0,0 +1,5 @@
+resource_registry:
+ OS::TripleO::Services::CephRgw: ../puppet/services/ceph-rgw.yaml
+ OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::SwiftStorage: OS::Heat::None
+ OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
diff --git a/environments/cinder-netapp-config.yaml b/environments/cinder-netapp-config.yaml
index 0437cc67..b9a84342 100644
--- a/environments/cinder-netapp-config.yaml
+++ b/environments/cinder-netapp-config.yaml
@@ -25,5 +25,5 @@ parameter_defaults:
CinderNetappControllerIps: ''
CinderNetappSaPassword: ''
CinderNetappStoragePools: ''
- CinderNetappEseriesHostType: 'linux_dm_mp'
+ CinderNetappHostType: ''
CinderNetappWebservicePath: '/devmgr/v2'
diff --git a/environments/debug.yaml b/environments/debug.yaml
new file mode 100644
index 00000000..b938555c
--- /dev/null
+++ b/environments/debug.yaml
@@ -0,0 +1,5 @@
+# A Heat environment file which can be used to enable the debug
+# setting in the overcloud openstack services configuration.
+
+parameter_defaults:
+ Debug: true
diff --git a/environments/enable-internal-tls.yaml b/environments/enable-internal-tls.yaml
new file mode 100644
index 00000000..5116c6da
--- /dev/null
+++ b/environments/enable-internal-tls.yaml
@@ -0,0 +1,6 @@
+# A Heat environment file which can be used to enable a
+# a TLS for in the internal network via certmonger
+parameter_defaults:
+ EnableInternalTLS: true
+resource_registry:
+ OS::TripleO::Services::ApacheTLS: ../../puppet/services/apache-internal-tls-certmonger.yaml
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml
new file mode 100644
index 00000000..cee4ae4a
--- /dev/null
+++ b/environments/hyperconverged-ceph.yaml
@@ -0,0 +1,11 @@
+# If not using an isolated StorageMgmt network, the following regitry mapping
+# should be commented.
+resource_registry:
+ OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+
+parameter_defaults:
+ ComputeServices:
+ - OS::TripleO::Services::CephOSD
+
+parameter_merge_strategies:
+ ComputeServices: merge \ No newline at end of file
diff --git a/environments/logging-environment.yaml b/environments/logging-environment.yaml
new file mode 100644
index 00000000..c583ca79
--- /dev/null
+++ b/environments/logging-environment.yaml
@@ -0,0 +1,29 @@
+## A Heat environment file which can be used to set up
+## logging agents
+
+resource_registry:
+ OS::TripleO::Services::FluentdClient: ../puppet/services/logging/fluentd-client.yaml
+
+#parameter_defaults:
+
+## Simple configuration
+#
+# LoggingServers:
+# - host: log0.example.com
+# port: 24224
+# - host: log1.example.com
+# port: 24224
+#
+## Example SSL configuration
+## (note the use of port 24284 for ssl connections)
+#
+# LoggingServers:
+# - host: 192.0.2.11
+# port: 24284
+# LoggingUsesSSL: true
+# LoggingSharedKey: secret
+# LoggingSSLCertificate: |
+# -----BEGIN CERTIFICATE-----
+# ...certificate data here...
+# -----END CERTIFICATE-----
+
diff --git a/environments/low-memory-usage.yaml b/environments/low-memory-usage.yaml
new file mode 100644
index 00000000..ad428686
--- /dev/null
+++ b/environments/low-memory-usage.yaml
@@ -0,0 +1,15 @@
+# Lower the memory usage of overcloud.
+parameter_defaults:
+ CeilometerWorkers: 1
+ CinderWorkers: 1
+ GlanceWorkers: 1
+ HeatWorkers: 1
+ KeystoneWorkers: 1
+ NeutronWorkers: 1
+ NovaWorkers: 1
+ SaharaWorkers: 1
+ SwiftWorkers: 1
+ GnocchiMetricdWorkers: 1
+
+ ApacheMaxRequestWorkers: 32
+ ApacheServerLimit: 32
diff --git a/environments/major-upgrade-aodh-migration.yaml b/environments/major-upgrade-aodh-migration.yaml
index c1dbde42..9d6ce73e 100644
--- a/environments/major-upgrade-aodh-migration.yaml
+++ b/environments/major-upgrade-aodh-migration.yaml
@@ -3,8 +3,4 @@ resource_registry:
OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
# no-op the rest
- OS::TripleO::ControllerPostDeployment: OS::Heat::None
- OS::TripleO::ComputePostDeployment: OS::Heat::None
- OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None
- OS::TripleO::BlockStoragePostDeployment: OS::Heat::None
- OS::TripleO::CephStoragePostDeployment: OS::Heat::None
+ OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml b/environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml
new file mode 100644
index 00000000..6798c255
--- /dev/null
+++ b/environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml
@@ -0,0 +1,7 @@
+resource_registry:
+
+ # This initiates the upgrades for ceilometer api to run under apache wsgi
+ OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
+
+ # no-op the rest
+ OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-pacemaker-converge.yaml b/environments/major-upgrade-pacemaker-converge.yaml
index f023cb32..e9a5f9be 100644
--- a/environments/major-upgrade-pacemaker-converge.yaml
+++ b/environments/major-upgrade-pacemaker-converge.yaml
@@ -1,2 +1,6 @@
parameter_defaults:
UpgradeLevelNovaCompute: ''
+
+resource_registry:
+ OS::TripleO::Services::SaharaApi: ../puppet/services/sahara-api.yaml
+ OS::TripleO::Services::SaharaEngine: ../puppet/services/sahara-engine.yaml
diff --git a/environments/major-upgrade-pacemaker-init.yaml b/environments/major-upgrade-pacemaker-init.yaml
index d97f8fc1..f4f361df 100644
--- a/environments/major-upgrade-pacemaker-init.yaml
+++ b/environments/major-upgrade-pacemaker-init.yaml
@@ -3,8 +3,4 @@ parameter_defaults:
resource_registry:
OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml
- OS::TripleO::ControllerPostDeployment: OS::Heat::None
- OS::TripleO::ComputePostDeployment: OS::Heat::None
- OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None
- OS::TripleO::BlockStoragePostDeployment: OS::Heat::None
- OS::TripleO::CephStoragePostDeployment: OS::Heat::None
+ OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-pacemaker.yaml b/environments/major-upgrade-pacemaker.yaml
index 95f09666..9fb51a4d 100644
--- a/environments/major-upgrade-pacemaker.yaml
+++ b/environments/major-upgrade-pacemaker.yaml
@@ -3,8 +3,4 @@ parameter_defaults:
resource_registry:
OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker.yaml
- OS::TripleO::ControllerPostDeployment: OS::Heat::None
- OS::TripleO::ComputePostDeployment: OS::Heat::None
- OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None
- OS::TripleO::BlockStoragePostDeployment: OS::Heat::None
- OS::TripleO::CephStoragePostDeployment: OS::Heat::None
+ OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-remove-sahara.yaml b/environments/major-upgrade-remove-sahara.yaml
new file mode 100644
index 00000000..e0aaf130
--- /dev/null
+++ b/environments/major-upgrade-remove-sahara.yaml
@@ -0,0 +1,6 @@
+parameter_defaults:
+ KeepSaharaServicesOnUpgrade: false
+resource_registry:
+ OS::TripleO::Services::SaharaApi: OS::Heat::None
+ OS::TripleO::Services::SaharaEngine: OS::Heat::None
+
diff --git a/environments/manage-firewall.yaml b/environments/manage-firewall.yaml
deleted file mode 100644
index 5d48698e..00000000
--- a/environments/manage-firewall.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-parameter_defaults:
- ManageFirewall: true
diff --git a/environments/manila-cephfsnative-config.yaml b/environments/manila-cephfsnative-config.yaml
new file mode 100644
index 00000000..825a5066
--- /dev/null
+++ b/environments/manila-cephfsnative-config.yaml
@@ -0,0 +1,17 @@
+# A Heat environment file which can be used to enable a
+# a Manila CephFS Native driver backend.
+resource_registry:
+ OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ # Only manila-share is pacemaker managed:
+ OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::Tripleo::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
+
+
+parameter_defaults:
+ ManilaCephFSNativeBackendName: cephfsnative
+ ManilaCephFSNativeDriverHandlesShareServers: false
+ ManilaCephFSNativeCephFSConfPath: '/etc/ceph/ceph.conf'
+ ManilaCephFSNativeCephFSAuthId: 'manila'
+ ManilaCephFSNativeCephFSClusterName: 'ceph'
+ ManilaCephFSNativeCephFSEnableSnapshots: true
diff --git a/environments/manila-generic-config.yaml b/environments/manila-generic-config.yaml
index 74011c66..9344bc6e 100644
--- a/environments/manila-generic-config.yaml
+++ b/environments/manila-generic-config.yaml
@@ -1,14 +1,16 @@
-# A Heat environment file which can be used to enable a
-# a Manila generic driver backend.
+# This environment file enables Manila with the Generic backend.
resource_registry:
OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
# Only manila-share is pacemaker managed:
OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
-
+ OS::Tripleo::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml
parameter_defaults:
- ManilaGenericEnableBackend: true
+ ManilaServiceInstanceUser: ''
+ ManilaServiceInstancePassword: ''
+ ManilaServiceInstanceFlavorId: 2
+ ManilaServiceNetworkCidr: '172.16.0.0/16'
ManilaGenericBackendName: tripleo_generic
ManilaGenericDriverHandlesShareServers: true
ManilaGenericSmbTemplateConfigPath: '$state_path/smb.conf'
@@ -20,7 +22,3 @@ parameter_defaults:
ManilaGenericServiceInstanceSmbConfigPath: '$share_mount_path/smb.conf'
ManilaGenericShareVolumeFsType: 'ext4'
ManilaGenericCinderVolumeType: ''
- ManilaGenericServiceInstanceUser: ''
- ManilaGenericServiceInstancePassword: ''
- ManilaGenericServiceInstanceFlavorId: 2
- ManilaGenericServiceNetworkCidr: '172.16.0.0/16'
diff --git a/environments/manila-netapp-config.yaml b/environments/manila-netapp-config.yaml
new file mode 100644
index 00000000..3dadfe5d
--- /dev/null
+++ b/environments/manila-netapp-config.yaml
@@ -0,0 +1,29 @@
+# This environment file enables Manila with the Netapp backend.
+resource_registry:
+ OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ # Only manila-share is pacemaker managed:
+ OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::Tripleo::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
+
+parameter_defaults:
+ ManilaNetappBackendName: tripleo_netapp
+ ManilaNetappDriverHandlesShareServers: true
+ ManilaNetappLogin: ''
+ ManilaNetappPassword: ''
+ ManilaNetappServerHostname: ''
+ ManilaNetappTransportType: 'http'
+ ManilaNetappStorageFamily: 'ontap_cluster'
+ ManilaNetappServerPort: 80
+ ManilaNetappVolumeNameTemplate: 'share_%(share_id)s'
+ ManilaNetappVserver: ''
+ ManilaNetappVserverNameTemplate: 'os_%s'
+ ManilaNetappLifNameTemplate: 'os_%(net_allocation_id)s'
+ ManilaNetappAggrNameSearchPattern: '(.*)'
+ ManilaNetappRootVolumeAggr: ''
+ ManilaNetappRootVolume: 'root'
+ ManilaNetappPortNameSearchPattern: '(.*)'
+ ManilaNetappTraceFlags: ''
+ ManilaNetappEnabledShareProtocols: 'nfs3, nfs4.0'
+ ManilaNetappVolumeSnapshotReservePercent: 5
+ ManilaNetappSnapmirrorQuiesceTimeout: 3600
diff --git a/environments/monitoring-environment.yaml b/environments/monitoring-environment.yaml
new file mode 100644
index 00000000..62ab06dc
--- /dev/null
+++ b/environments/monitoring-environment.yaml
@@ -0,0 +1,30 @@
+## A Heat environment file which can be used to set up monitoring
+## and logging agents
+
+resource_registry:
+ OS::TripleO::Services::SensuClient: ../puppet/services/monitoring/sensu-client.yaml
+
+#parameter_defaults:
+ #### Sensu settings ####
+ ##MonitoringRabbitHost: 10.10.10.10
+ ##MonitoringRabbitPort: 5672
+ ##MonitoringRabbitUserName: sensu
+ ##MonitoringRabbitPassword: sensu
+ ##MonitoringRabbitUseSSL: false
+ ##MonitoringRabbitVhost: "/sensu"
+ ##SensuClientCustomConfig:
+ ## - api:
+ ## - warning: 10
+ ## critical: 20
+ ## openstack:
+ ## - username: admin
+ ## password: changeme
+ ## project_name: admin
+ ## auth_url: http://controller:5000/v2.0
+ ## region_name: RegionOne
+
+ #### EFK settings ####
+ ## TBD
+
+ #### Grafana/Graphite settings ####
+ ## TBD
diff --git a/environments/neutron-ml2-ovn.yaml b/environments/neutron-ml2-ovn.yaml
new file mode 100644
index 00000000..821ad0c2
--- /dev/null
+++ b/environments/neutron-ml2-ovn.yaml
@@ -0,0 +1,18 @@
+# A Heat environment file which can be used to enable OVN
+# extensions, configured via puppet
+resource_registry:
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginML2OVN
+ OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-ovn.yaml
+
+parameter_defaults:
+ NeutronMechanismDrivers: ovn
+ OVNDbHost: '0.0.0.0'
+ OVNSouthboundServerPort: 6642
+ OVNNorthboundServerPort: 6641
+ OVNDbConnectionTimeout: 60
+ OVNVifType: ovs
+ OVNNeutronSyncMode: log
+ OVNQosDriver: ovn-qos
+ OVNTunnelEncapType: geneve
diff --git a/environments/neutron-opencontrail.yaml b/environments/neutron-opencontrail.yaml
index f2209ce2..51575b86 100644
--- a/environments/neutron-opencontrail.yaml
+++ b/environments/neutron-opencontrail.yaml
@@ -13,7 +13,7 @@ resource_registry:
parameter_defaults:
NeutronCorePlugin: neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2
- NeutronServicePlugins: neutron_plugin_contrail.plugins.opencontrail.loadbalancer.plugin.LoadBalancerPlugin
+ NeutronServicePlugins: neutron_plugin_contrail.plugins.opencontrail.loadbalancer.v2.plugin.LoadBalancerPluginV2
NeutronTunnelTypes: ''
# required params:
diff --git a/environments/neutron-opendaylight-l3.yaml b/environments/neutron-opendaylight-l3.yaml
index d61270b2..00be3048 100644
--- a/environments/neutron-opendaylight-l3.yaml
+++ b/environments/neutron-opendaylight-l3.yaml
@@ -2,12 +2,12 @@
resource_registry:
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
- OS::TripleO::Services::OpenDaylight: puppet/services/opendaylight-api.yaml
- OS::TripleO::Services::OpenDaylightOvs: puppet/services/opendaylight-ovs.yaml
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+ OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
+ OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
parameter_defaults:
- EnableOpenDaylightOnController: true
NeutronEnableForceMetadata: true
NeutronMechanismDrivers: 'opendaylight'
NeutronServicePlugins: "networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin"
diff --git a/environments/neutron-opendaylight.yaml b/environments/neutron-opendaylight.yaml
index 8fa2e542..35c90aab 100644
--- a/environments/neutron-opendaylight.yaml
+++ b/environments/neutron-opendaylight.yaml
@@ -2,10 +2,10 @@
resource_registry:
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
- OS::TripleO::Services::OpenDaylight: puppet/services/opendaylight-api.yaml
- OS::TripleO::Services::OpenDaylightOvs: puppet/services/opendaylight-ovs.yaml
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+ OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
+ OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
parameter_defaults:
- EnableOpenDaylightOnController: true
NeutronEnableForceMetadata: true
NeutronMechanismDrivers: 'opendaylight'
diff --git a/environments/neutron-ovs-dpdk.yaml b/environments/neutron-ovs-dpdk.yaml
new file mode 100644
index 00000000..004b8ac0
--- /dev/null
+++ b/environments/neutron-ovs-dpdk.yaml
@@ -0,0 +1,18 @@
+## A Heat environment that can be used to deploy DPDK with OVS
+resource_registry:
+ OS::TripleO::Services::ComputeNeutronOvsAgent: ../puppet/services/neutron-ovs-dpdk-agent.yaml
+
+parameter_defaults:
+ ## NeutronDpdkCoreList and NeutronDpdkMemoryChannels are REQUIRED settings.
+ ## Attempting to deploy DPDK without appropriate values will cause deployment to fail or lead to unstable deployments.
+ #NeutronDpdkCoreList: ""
+ #NeutronDpdkMemoryChannels: ""
+
+ NeutronDatapathType: "netdev"
+ NeutronVhostuserSocketDir: "/var/run/openvswitch"
+
+ #NeutronDpdkSocketMemory: ""
+ #NeutronDpdkDriverType: "vfio-pci"
+ #NovaReservedHostMemory: 4096
+ #NovaVcpuPinSet: ""
+
diff --git a/environments/neutron-ovs-dvr.yaml b/environments/neutron-ovs-dvr.yaml
index 223c2531..b658d3a5 100644
--- a/environments/neutron-ovs-dvr.yaml
+++ b/environments/neutron-ovs-dvr.yaml
@@ -9,6 +9,13 @@ resource_registry:
# connected to a physical network.
OS::TripleO::Compute::Net::SoftwareConfig: ../net-config-bridge.yaml
+ # DVR requires a port on the external network for each compute node.
+ # This will usually match the one currently in use for
+ # OS::TripleO::Controller::Ports::ExternalPort.
+ # Please review your network configuration before deploying to ensure that
+ # this is appropriate.
+ OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
+
parameter_defaults:
# DVR requires that the L2 population feature is enabled
@@ -22,3 +29,11 @@ parameter_defaults:
# We also need to set the proper agent mode for the L3 agent. This will only
# affect the agent on the controller node.
NeutronL3AgentMode: 'dvr_snat'
+
+ # L3 HA isn't supported for DVR enabled routers. If upgrading from a system
+ # where L3 HA is enabled and has neutron routers configured, it is
+ # recommended setting this value to true until such time all routers can be
+ # migrated to DVR routers. Once migration of the routers is complete,
+ # NeutronL3HA can be returned to false. All new systems should be deployed
+ # with NeutronL3HA set to false.
+ NeutronL3HA: false
diff --git a/environments/neutron-sriov.yaml b/environments/neutron-sriov.yaml
new file mode 100755
index 00000000..9b7e51f9
--- /dev/null
+++ b/environments/neutron-sriov.yaml
@@ -0,0 +1,22 @@
+## A Heat environment that can be used to deploy SR-IOV
+resource_registry:
+ OS::TripleO::Services::NeutronSriovAgent: ../puppet/services/neutron-sriov-agent.yaml
+
+parameter_defaults:
+ NeutronMechanismDrivers: ['openvswitch','sriovnicswitch']
+
+ # Add PciPassthroughFilter to the scheduler default filters
+ #NovaSchedulerDefaultFilters: ['RetryFilter','AvailabilityZoneFilter','RamFilter','ComputeFilter','ComputeCapabilitiesFilter','ImagePropertiesFilter','ServerGroupAntiAffinityFilter','ServerGroupAffinityFilter','PciPassthroughFilter']
+ #NovaSchedulerAvailableFilters: ["nova.scheduler.filters.all_filters","nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter"]
+
+ # Provide the vendorid:productid of the VFs
+ #NeutronSupportedPCIVendorDevs: ['8086:154c','8086:10ca','8086:1520']
+
+ #NeutronPhysicalDevMappings: "datacentre:ens20f2"
+
+ # Number of VFs that needs to be configured for a physical interface
+ #NeutronSriovNumVFs: "ens20f2:5"
+
+ #NovaPCIPassthrough:
+ # - devname: "ens20f2"
+ # physical_network: "datacentre"
diff --git a/environments/services/haproxy-internal-tls-certmonger.yaml b/environments/services/haproxy-internal-tls-certmonger.yaml
new file mode 100644
index 00000000..074fec4d
--- /dev/null
+++ b/environments/services/haproxy-internal-tls-certmonger.yaml
@@ -0,0 +1,4 @@
+# A Heat environment file which can be used to enable a
+# a TLS for HAProxy via certmonger
+resource_registry:
+ OS::TripleO::Services::HAProxyInternalTLS: ../../puppet/services/haproxy-internal-tls-certmonger.yaml
diff --git a/environments/services/haproxy-public-tls-certmonger.yaml b/environments/services/haproxy-public-tls-certmonger.yaml
new file mode 100644
index 00000000..d3ad3ad4
--- /dev/null
+++ b/environments/services/haproxy-public-tls-certmonger.yaml
@@ -0,0 +1,4 @@
+# A Heat environment file which can be used to enable a
+# a TLS for HAProxy via certmonger
+resource_registry:
+ OS::TripleO::Services::HAProxyPublicTLS: ../../puppet/services/haproxy-public-tls-certmonger.yaml
diff --git a/environments/tls-endpoints-public-dns.yaml b/environments/tls-endpoints-public-dns.yaml
index 7c8e850c..0a0996d3 100644
--- a/environments/tls-endpoints-public-dns.yaml
+++ b/environments/tls-endpoints-public-dns.yaml
@@ -8,6 +8,9 @@ parameter_defaults:
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
diff --git a/environments/tls-endpoints-public-ip.yaml b/environments/tls-endpoints-public-ip.yaml
index 80595c6c..5a2b8839 100644
--- a/environments/tls-endpoints-public-ip.yaml
+++ b/environments/tls-endpoints-public-ip.yaml
@@ -8,6 +8,9 @@ parameter_defaults:
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'}
diff --git a/environments/tls-everywhere-endpoints-dns.yaml b/environments/tls-everywhere-endpoints-dns.yaml
new file mode 100644
index 00000000..4c466fae
--- /dev/null
+++ b/environments/tls-everywhere-endpoints-dns.yaml
@@ -0,0 +1,55 @@
+# Use this environment when deploying an overcloud where all the endpoints are
+# DNS names and there's TLS in all endpoint types.
+parameter_defaults:
+ EndpointMap:
+ AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+ AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+ CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ CephRgwInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ CinderAdmin: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+ CinderInternal: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ GlanceAdmin: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+ GlanceInternal: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+ GlanceRegistryInternal: {protocol: 'https', port: '9191', host: 'CLOUDNAME'}
+ GnocchiAdmin: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+ GnocchiInternal: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+ HeatAdmin: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+ HeatInternal: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HeatCfnAdmin: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+ HeatCfnInternal: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+ IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+ IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
+ KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+ ManilaAdmin: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+ ManilaInternal: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'CLOUDNAME'}
+ NeutronAdmin: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+ NeutronInternal: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+ NovaAdmin: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+ NovaInternal: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaVNCProxyAdmin: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+ NovaVNCProxyInternal: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+ SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+ SwiftAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ SwiftInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
diff --git a/environments/use-dns-for-vips.yaml b/environments/use-dns-for-vips.yaml
new file mode 100644
index 00000000..daf07bc7
--- /dev/null
+++ b/environments/use-dns-for-vips.yaml
@@ -0,0 +1,5 @@
+# A Heat environment file which can be used to disable the writing of the VIPs
+# to the /etc/hosts file in the overcloud. Use this in case you have a working
+# DNS server that you will provide for the overcloud.
+resource_registry:
+ OS::TripleO::Services::VipHosts: OS::Heat::None
diff --git a/extraconfig/all_nodes/mac_hostname.yaml b/extraconfig/all_nodes/mac_hostname.j2.yaml
index 7d8704e3..75ffc9e6 100644
--- a/extraconfig/all_nodes/mac_hostname.yaml
+++ b/extraconfig/all_nodes/mac_hostname.j2.yaml
@@ -9,15 +9,7 @@ description: >
# out-of-tree templates they may require additional parameters if the
# in-tree templates add a new role.
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
# Note extra parameters can be defined, then passed data via the
# environment parameter_defaults, without modifying the parent template
@@ -37,47 +29,17 @@ resources:
# FIXME(shardy): Long term it'd be better if Heat SoftwareDeployments accepted
# list instead of a map, then we could join the lists of servers into one
# deployment instead of requiring one deployment per-role.
- CollectMacDeploymentsController:
+{% for role in roles %}
+ CollectMacDeployments{{role.name}}:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsController
- servers: {get_param: controller_servers}
- config: {get_resource: CollectMacConfig}
- actions: ['CREATE'] # Only do this on CREATE
-
- CollectMacDeploymentsCompute:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: CollectMacDeploymentsCompute
- servers: {get_param: compute_servers}
- config: {get_resource: CollectMacConfig}
- actions: ['CREATE'] # Only do this on CREATE
-
- CollectMacDeploymentsBlockStorage:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: CollectMacDeploymentsBlockStorage
- servers: {get_param: blockstorage_servers}
- config: {get_resource: CollectMacConfig}
- actions: ['CREATE'] # Only do this on CREATE
-
- CollectMacDeploymentsObjectStorage:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: CollectMacDeploymentsObjectStorage
- servers: {get_param: objectstorage_servers}
- config: {get_resource: CollectMacConfig}
- actions: ['CREATE'] # Only do this on CREATE
-
- CollectMacDeploymentsCephStorage:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: CollectMacDeploymentsCephStorage
- servers: {get_param: cephstorage_servers}
+ servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
+{% endfor %}
- # Now we distribute all-the-macs to all nodes
+ # Now we distribute all-the-macs to all Controller nodes
DistributeMacConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -101,7 +63,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: DistributeMacDeploymentsController
- servers: {get_param: controller_servers}
+ servers: {get_param: [servers, Controller]}
config: {get_resource: DistributeMacConfig}
input_values:
# FIXME(shardy): It'd be more convenient if we could join these
diff --git a/extraconfig/all_nodes/random_string.yaml b/extraconfig/all_nodes/random_string.j2.yaml
index d38701e2..9ce2ca8a 100644
--- a/extraconfig/all_nodes/random_string.yaml
+++ b/extraconfig/all_nodes/random_string.j2.yaml
@@ -10,15 +10,7 @@ description: >
# out-of-tree templates they may require additional parameters if the
# in-tree templates add a new role.
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
# Note extra parameters can be defined, then passed data via the
# environment parameter_defaults, without modifying the parent template
@@ -42,7 +34,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: RandomDeploymentsController
- servers: {get_param: controller_servers}
+ servers: {get_param: [servers, Controller]}
config: {get_resource: RandomConfig}
actions: ['CREATE'] # Only do this on CREATE
input_values:
@@ -52,7 +44,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: RandomDeploymentsCompute
- servers: {get_param: compute_servers}
+ servers: {get_param: [servers, Compute]}
config: {get_resource: RandomConfig}
actions: ['CREATE'] # Only do this on CREATE
input_values:
diff --git a/extraconfig/all_nodes/swap-partition.j2.yaml b/extraconfig/all_nodes/swap-partition.j2.yaml
new file mode 100644
index 00000000..36076b0c
--- /dev/null
+++ b/extraconfig/all_nodes/swap-partition.j2.yaml
@@ -0,0 +1,44 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Extra config to add swap space to nodes.
+
+# Parameters passed from the parent template - note if you maintain
+# out-of-tree templates they may require additional parameters if the
+# in-tree templates add a new role.
+parameters:
+ servers:
+ type: json
+ swap_partition_label:
+ type: string
+ description: Swap partition label
+ default: 'swap1'
+
+
+resources:
+
+ SwapConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -eux
+ swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label)
+ swapon $swap_partition
+ echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
+ inputs:
+ - name: swap_partition_label
+ description: Swap partition label
+ default: 'swap1'
+
+{% for role in roles %}
+ {{role.name}}SwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ swap_partition_label: {get_param: swap_partition_label}
+ actions: ["CREATE"]
+{% endfor %}
diff --git a/extraconfig/all_nodes/swap-partition.yaml b/extraconfig/all_nodes/swap-partition.yaml
deleted file mode 100644
index e6fa9eca..00000000
--- a/extraconfig/all_nodes/swap-partition.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Extra config to add swap space to nodes.
-
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
-parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
- type: json
- swap_partition_label:
- type: string
- description: Swap partition label
- default: 'swap1'
-
-
-resources:
-
- SwapConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: |
- #!/bin/bash
- set -eux
- swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label)
- swapon $swap_partition
- echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
- inputs:
- - name: swap_partition_label
- description: Swap partition label
- default: 'swap1'
-
- ControllerSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: controller_servers}
- input_values:
- swap_partition_label: {get_param: swap_partition_label}
- actions: ["CREATE"]
-
- ComputeSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: compute_servers}
- input_values:
- swap_partition_label: {get_param: swap_partition_label}
- actions: ["CREATE"]
-
- BlockStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: blockstorage_servers}
- input_values:
- swap_partition_label: {get_param: swap_partition_label}
- actions: ["CREATE"]
-
- ObjectStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: objectstorage_servers}
- input_values:
- swap_partition_label: {get_param: swap_partition_label}
- actions: ["CREATE"]
-
- CephStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: cephstorage_servers}
- input_values:
- swap_partition_label: {get_param: swap_partition_label}
- actions: ["CREATE"]
diff --git a/extraconfig/all_nodes/swap.j2.yaml b/extraconfig/all_nodes/swap.j2.yaml
new file mode 100644
index 00000000..ce65dacb
--- /dev/null
+++ b/extraconfig/all_nodes/swap.j2.yaml
@@ -0,0 +1,58 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Extra config to add swap space to nodes.
+
+# Parameters passed from the parent template - note if you maintain
+# out-of-tree templates they may require additional parameters if the
+# in-tree templates add a new role.
+parameters:
+ servers:
+ type: json
+ swap_size_megabytes:
+ type: string
+ description: Amount of swap space to allocate in megabytes
+ default: '4096'
+ swap_path:
+ type: string
+ description: Full path to location of swap file
+ default: '/swap'
+
+
+resources:
+
+ SwapConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -eux
+ if [ ! -f $swap_path ]; then
+ dd if=/dev/zero of=$swap_path count=$swap_size_megabytes bs=1M
+ chmod 0600 $swap_path
+ mkswap $swap_path
+ swapon $swap_path
+ else
+ echo "$swap_path already exists"
+ fi
+ echo "$swap_path swap swap defaults 0 0" >> /etc/fstab
+ inputs:
+ - name: swap_size_megabytes
+ description: Amount of swap space to allocate in megabytes
+ default: '4096'
+ - name: swap_path
+ description: Full path to location of swap file
+ default: '/swap'
+
+{% for role in roles %}
+ {{role.name}}SwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ swap_size_megabytes: {get_param: swap_size_megabytes}
+ swap_path: {get_param: swap_path}
+ actions: ["CREATE"]
+{% endfor %}
diff --git a/extraconfig/all_nodes/swap.yaml b/extraconfig/all_nodes/swap.yaml
deleted file mode 100644
index 5383ffc9..00000000
--- a/extraconfig/all_nodes/swap.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Extra config to add swap space to nodes.
-
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
-parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
- type: json
- swap_size_megabytes:
- type: string
- description: Amount of swap space to allocate in megabytes
- default: '4096'
- swap_path:
- type: string
- description: Full path to location of swap file
- default: '/swap'
-
-
-resources:
-
- SwapConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: |
- #!/bin/bash
- set -eux
- if [ ! -f $swap_path ]; then
- dd if=/dev/zero of=$swap_path count=$swap_size_megabytes bs=1M
- chmod 0600 $swap_path
- mkswap $swap_path
- swapon $swap_path
- else
- echo "$swap_path already exists"
- fi
- echo "$swap_path swap swap defaults 0 0" >> /etc/fstab
- inputs:
- - name: swap_size_megabytes
- description: Amount of swap space to allocate in megabytes
- default: '4096'
- - name: swap_path
- description: Full path to location of swap file
- default: '/swap'
-
- ControllerSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: controller_servers}
- input_values:
- swap_size_megabytes: {get_param: swap_size_megabytes}
- swap_path: {get_param: swap_path}
- actions: ["CREATE"]
-
- ComputeSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: compute_servers}
- input_values:
- swap_size_megabytes: {get_param: swap_size_megabytes}
- swap_path: {get_param: swap_path}
- actions: ["CREATE"]
-
- BlockStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: blockstorage_servers}
- input_values:
- swap_size_megabytes: {get_param: swap_size_megabytes}
- swap_path: {get_param: swap_path}
- actions: ["CREATE"]
-
- ObjectStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: objectstorage_servers}
- input_values:
- swap_size_megabytes: {get_param: swap_size_megabytes}
- swap_path: {get_param: swap_path}
- actions: ["CREATE"]
-
- CephStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: cephstorage_servers}
- input_values:
- swap_size_megabytes: {get_param: swap_size_megabytes}
- swap_path: {get_param: swap_path}
- actions: ["CREATE"]
diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh
index 07666245..f161c049 100644
--- a/extraconfig/tasks/major_upgrade_block_storage.sh
+++ b/extraconfig/tasks/major_upgrade_block_storage.sh
@@ -4,5 +4,19 @@
#
set -eu
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ mkdir OVS_UPGRADE || true
+ pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ echo "Updating openvswitch with nopostun"
+ rpm -U --nopostun ./*.rpm
+ popd
+else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
+fi
+
yum -y install python-zaqarclient # needed for os-collect-config
yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
new file mode 100644
index 00000000..c87e6824
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
@@ -0,0 +1,62 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Software-config for ceilometer configuration under httpd during upgrades
+
+parameters:
+ servers:
+ type: json
+ input_values:
+ type: json
+ description: input values for the software deployments
+resources:
+ CeilometerWsgiMitakaNewtonPreUpgradeConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: puppet
+ config:
+ get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp
+
+ CeilometerWsgiMitakaNewtonUpgradeConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\nset -e\n\n"
+ - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - "disable_standalone_ceilometer_api\n\n"
+
+ CeilometerWsgiMitakaNewtonPostUpgradeConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -e
+ /usr/bin/systemctl reload httpd
+
+ CeilometerWsgiMitakaNewtonPreUpgradeDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig}
+
+ CeilometerWsgiMitakaNewtonUpgradeConfigDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
+ properties:
+ name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig}
+
+ CeilometerWsgiMitakaNewtonPostUpgradeDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
+ properties:
+ name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig}
diff --git a/extraconfig/tasks/major_upgrade_ceph_mon.sh b/extraconfig/tasks/major_upgrade_ceph_mon.sh
new file mode 100755
index 00000000..e0d160f1
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_ceph_mon.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+set -eu
+set -o pipefail
+
+echo INFO: starting $(basename "$0")
+
+# Exit if not running
+if ! pidof ceph-mon &> /dev/null; then
+ echo INFO: ceph-mon is not running, skipping
+ exit 0
+fi
+
+# Exit if not Hammer
+INSTALLED_VERSION=$(ceph --version | awk '{print $3}')
+if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then
+ echo INFO: version of Ceph installed is not 0.94, skipping
+ exit 0
+fi
+
+CEPH_STATUS=$(ceph health | awk '{print $1}')
+if [ ${CEPH_STATUS} = HEALTH_ERR ]; then
+ echo ERROR: Ceph cluster status is HEALTH_ERR, cannot be upgraded
+ exit 1
+fi
+
+# Useful when upgrading with OSDs num < replica size
+if [[ ${ignore_ceph_upgrade_warnings:-False} != [Tt]rue ]]; then
+ timeout 300 bash -c "while [ ${CEPH_STATUS} != HEALTH_OK ]; do
+ echo WARNING: Waiting for Ceph cluster status to go HEALTH_OK;
+ sleep 30;
+ CEPH_STATUS=$(ceph health | awk '{print $1}')
+ done"
+fi
+
+MON_PID=$(pidof ceph-mon)
+MON_ID=$(hostname -s)
+
+# Stop daemon using Hammer sysvinit script
+service ceph stop mon.${MON_ID}
+
+# Ensure it's stopped
+timeout 60 bash -c "while kill -0 ${MON_PID} 2> /dev/null; do
+ sleep 2;
+done"
+
+# Update to Jewel
+yum -y -q update ceph-mon ceph
+
+# Restart/Exit if not on Jewel, only in that case we need the changes
+UPDATED_VERSION=$(ceph --version | awk '{print $3}')
+if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
+ echo WARNING: Ceph was not upgraded, restarting daemons
+ service ceph start mon.${MON_ID}
+elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
+ # RPM could own some of these but we can't take risks on the pre-existing files
+ for d in /var/lib/ceph/mon /var/log/ceph /var/run/ceph /etc/ceph; do
+ chown -L -R ceph:ceph $d || echo WARNING: chown of $d failed
+ done
+
+ # Replay udev events with newer rules
+ udevadm trigger
+
+ # Enable systemd unit
+ systemctl enable ceph-mon.target
+ systemctl enable ceph-mon@${MON_ID}
+ systemctl start ceph-mon@${MON_ID}
+
+ # Wait for daemon to be back in the quorum
+ timeout 300 bash -c "until (ceph quorum_status | jq .quorum_names | grep -sq ${MON_ID}); do
+ echo WARNING: Waiting for mon.${MON_ID} to re-join quorum;
+ sleep 10;
+ done"
+
+ # if tunables become legacy, cluster status will be HEALTH_WARN causing
+ # upgrade to fail on following node
+ ceph osd crush tunables default
+
+ echo INFO: Ceph was upgraded to Jewel
+else
+ echo ERROR: Ceph was upgraded to an unknown release, daemon is stopped, need manual intervention
+ exit 1
+fi
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
index de42b16d..e690a383 100644
--- a/extraconfig/tasks/major_upgrade_ceph_storage.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -4,32 +4,113 @@
# major upgrade workflow.
#
set -eu
+set -o pipefail
UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
-cat > $UPGRADE_SCRIPT << ENDOFCAT
+cat > $UPGRADE_SCRIPT << 'ENDOFCAT'
+#!/bin/bash
### DO NOT MODIFY THIS FILE
### This file is automatically delivered to the ceph-storage nodes as part of the
### tripleo upgrades workflow
+set -eu
+
+echo INFO: starting $(basename "$0")
+
+# Exit if not running
+if ! pidof ceph-osd &> /dev/null; then
+ echo INFO: ceph-osd is not running, skipping
+ exit 0
+fi
+# Exit if not Hammer
+INSTALLED_VERSION=$(ceph --version | awk '{print $3}')
+if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then
+ echo INFO: version of Ceph installed is not 0.94, skipping
+ exit 0
+fi
-function systemctl_ceph {
- action=\$1
- systemctl \$action ceph
-}
+OSD_PIDS=$(pidof ceph-osd)
+OSD_IDS=$(ls /var/lib/ceph/osd | awk 'BEGIN { FS = "-" } ; { print $2 }')
-# "so that mirrors aren't rebalanced as if the OSD died" - gfidente
+# "so that mirrors aren't rebalanced as if the OSD died" - gfidente / leseb
ceph osd set noout
+ceph osd set norebalance
+ceph osd set nodeep-scrub
+ceph osd set noscrub
+
+# Stop daemon using Hammer sysvinit script
+for OSD_ID in $OSD_IDS; do
+ service ceph stop osd.${OSD_ID}
+done
+
+# Nice guy will return non-0 only when all failed
+timeout 60 bash -c "while kill -0 ${OSD_PIDS} 2> /dev/null; do
+ sleep 2;
+done"
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ mkdir OVS_UPGRADE || true
+ pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ echo "Updating openvswitch with nopostun"
+ rpm -U --nopostun ./*.rpm
+ popd
+else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
+fi
-systemctl_ceph stop
+# Update (Ceph to Jewel)
yum -y install python-zaqarclient # needed for os-collect-config
yum -y update
-systemctl_ceph start
-ceph osd unset noout
+# Restart/Exit if not on Jewel, only in that case we need the changes
+UPDATED_VERSION=$(ceph --version | awk '{print $3}')
+if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
+ echo WARNING: Ceph was not upgraded, restarting daemon
+ for OSD_ID in $OSD_IDS; do
+ service ceph start osd.${OSD_ID}
+ done
+elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
+ # RPM could own some of these but we can't take risks on the pre-existing files
+ for d in /var/lib/ceph/osd /var/log/ceph /var/run/ceph /etc/ceph; do
+ chown -L -R ceph:ceph $d || echo WARNING: chown of $d failed
+ done
+
+ # Replay udev events with newer rules
+ udevadm trigger && udevadm settle
+
+ # If on ext4, we need to enforce lower values for name and namespace len
+ # or ceph-osd will refuse to start, see: http://tracker.ceph.com/issues/16187
+ for OSD_ID in $OSD_IDS; do
+ OSD_FS=$(findmnt -n -o FSTYPE -T /var/lib/ceph/osd/ceph-${OSD_ID})
+ if [ ${OSD_FS} = ext4 ]; then
+ crudini --set /etc/ceph/ceph.conf global osd_max_object_name_len 256
+ crudini --set /etc/ceph/ceph.conf global osd_max_object_namespace_len 64
+ fi
+ done
+
+ # Enable systemd unit
+ systemctl enable ceph-osd.target
+ for OSD_ID in $OSD_IDS; do
+ systemctl enable ceph-osd@${OSD_ID}
+ systemctl start ceph-osd@${OSD_ID}
+ done
+ echo INFO: Ceph was upgraded to Jewel
+else
+ echo ERROR: Ceph was upgraded to an unknown release, daemon is stopped, need manual intervention
+ exit 1
+fi
+
+ceph osd unset noout
+ceph osd unset norebalance
+ceph osd unset nodeep-scrub
+ceph osd unset noscrub
ENDOFCAT
# ensure the permissions are OK
chmod 0755 $UPGRADE_SCRIPT
-
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
new file mode 100755
index 00000000..b65f6915
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_check.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+set -eu
+
+check_cluster()
+{
+ if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
+ echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
+ exit 1
+ fi
+}
+
+check_pcsd()
+{
+ if pcs status 2>&1 | grep -E 'Offline'; then
+ echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
+ exit 1
+ fi
+}
+
+check_disk_for_mysql_dump()
+{
+ # Where to backup current database if mysql need to be upgraded
+ MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
+ MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
+ # Spare disk ratio for extra safety
+ MYSQL_BACKUP_SIZE_RATIO=1.2
+
+ # Shall we upgrade mysql data directory during the stack upgrade?
+ if [ "$mariadb_do_major_upgrade" = "auto" ]; then
+ ret=$(is_mysql_upgrade_needed)
+ if [ $ret = "1" ]; then
+ DO_MYSQL_UPGRADE=1
+ else
+ DO_MYSQL_UPGRADE=0
+ fi
+ echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
+ elif [ "$mariadb_do_major_upgrade" = "no" ]; then
+ DO_MYSQL_UPGRADE=0
+ else
+ DO_MYSQL_UPGRADE=1
+ fi
+
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+
+ if [ -d "$MYSQL_BACKUP_DIR" ]; then
+ echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
+ exit 1
+ fi
+ mkdir "$MYSQL_BACKUP_DIR"
+ if [ $? -ne 0 ]; then
+ echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
+ exit 1
+ fi
+
+ # the /root/.my.cnf is needed because we set the mysql root
+ # password from liberty onwards
+ backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
+ # While not ideal, this step allows us to calculate exactly how much space the dump
+ # will need. Our main goal here is avoiding any chance of corruption due to disk space
+ # exhaustion
+ backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
+ database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
+ free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
+
+ # we need at least space for a new mysql database + dump of the existing one,
+ # times a small factor for additional safety room
+ # note: bash doesn't do floating point math or floats in if statements,
+ # so use python to apply the ratio and cast it back to integer
+ required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
+ if [ $required_space -ge $free_space ]; then
+ echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
+ exit 1
+ fi
+ fi
+ fi
+}
+
+check_python_rpm()
+{
+ # If for some reason rpm-python are missing we want to error out early enough
+ if ! rpm -q rpm-python &> /dev/null; then
+ echo_error "ERROR: upgrade cannot start without rpm-python installed"
+ exit 1
+ fi
+}
+
+check_clean_cluster()
+{
+ if pcs status | grep -q Stopped:; then
+ echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running."
+ exit 1
+ fi
+}
+
+check_galera_root_password()
+{
+ # BZ: 1357112
+ if [ ! -e /root/.my.cnf ]; then
+ echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
+ exit 1
+ fi
+}
diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh
index 78628c8c..950fe8d5 100644
--- a/extraconfig/tasks/major_upgrade_compute.sh
+++ b/extraconfig/tasks/major_upgrade_compute.sh
@@ -12,10 +12,27 @@ cat > $UPGRADE_SCRIPT << ENDOFCAT
### This file is automatically delivered to the compute nodes as part of the
### tripleo upgrades workflow
+set -eu
+
# pin nova to kilo (messaging +-1) for the nova-compute service
crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ mkdir OVS_UPGRADE || true
+ pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ echo "Updating openvswitch with nopostun"
+ rpm -U --nopostun ./*.rpm
+ popd
+else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
+fi
+
yum -y install python-zaqarclient # needed for os-collect-config
yum -y update
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
index 2ee473ce..2690ee64 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
@@ -4,11 +4,14 @@ set -eu
cluster_sync_timeout=1800
-if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
+check_cluster
+check_pcsd
+if [[ -n $(is_bootstrap_node) ]]; then
+ check_clean_cluster
fi
-
+check_python_rpm
+check_galera_root_password
+check_disk_for_mysql_dump
# We want to disable fencing during the cluster --stop as it might fence
# nodes where a service fails to stop, which could be fatal during an upgrade
@@ -17,12 +20,43 @@ fi
STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
pcs property set stonith-enabled=false
-# If for some reason rpm-python are missing we want to error out early enough
-if [ ! rpm -q rpm-python &> /dev/null ]; then
- echo_error "ERROR: upgrade cannot start without rpm-python installed"
- exit 1
+# Migrate to HA NG and fix up rabbitmq queues
+# We fix up the rabbitmq ha queues after the migration because it will
+# restart the rabbitmq resource. Doing it after the migration means no other
+# services will be restart as there are no other constraints
+if [[ -n $(is_bootstrap_node) ]]; then
+ migrate_full_to_ng_ha
+ rabbitmq_mitaka_newton_upgrade
fi
+# After migrating the cluster to HA-NG the services not under pacemaker's control
+# are still up and running. We need to stop them explicitely otherwise during the yum
+# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
+# is going to take a long time because rabbit is down. By having the service stopped
+# systemctl try-restart is a noop
+
+for service in $(services_to_migrate); do
+ manage_systemd_service stop "${service%%-clone}"
+ # So the reason for not reusing check_resource_systemd is that
+ # I have observed systemctl is-active returning unknown with at least
+ # one service that was stopped (See LP 1627254)
+ timeout=600
+ tstart=$(date +%s)
+ tend=$(( $tstart + $timeout ))
+ check_interval=3
+ while (( $(date +%s) < $tend )); do
+ if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
+ echo "$service still active, sleeping $check_interval seconds."
+ sleep $check_interval
+ else
+ # we do not care if it is inactive, unknown or failed as long as it is
+ # not running
+ break
+ fi
+
+ done
+done
+
# In case the mysql package is updated, the database on disk must be
# upgraded as well. This typically needs to happen during major
# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
@@ -35,77 +69,20 @@ fi
# on mysql package versionning, but this can be overriden manually
# to support specific upgrade scenario
-# Where to backup current database if mysql need to be upgraded
-MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
-MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
-# Spare disk ratio for extra safety
-MYSQL_BACKUP_SIZE_RATIO=1.2
-
-# Shall we upgrade mysql data directory during the stack upgrade?
-if [ "$mariadb_do_major_upgrade" = "auto" ]; then
- ret=$(is_mysql_upgrade_needed)
- if [ $ret = "1" ]; then
- DO_MYSQL_UPGRADE=1
- else
- DO_MYSQL_UPGRADE=0
- fi
- echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
-elif [ "$mariadb_do_major_upgrade" = "no" ]; then
- DO_MYSQL_UPGRADE=0
-else
- DO_MYSQL_UPGRADE=1
-fi
-
-if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+if [[ -n $(is_bootstrap_node) ]]; then
if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d "$MYSQL_BACKUP_DIR" ]; then
- echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
- exit 1
- fi
- mkdir "$MYSQL_BACKUP_DIR"
- if [ $? -ne 0 ]; then
- echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
- exit 1
- fi
-
- # the /root/.my.cnf is needed because we set the mysql root
- # password from liberty onwards
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- # While not ideal, this step allows us to calculate exactly how much space the dump
- # will need. Our main goal here is avoiding any chance of corruption due to disk space
- # exhaustion
- backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
- database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
- free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
- # we need at least space for a new mysql database + dump of the existing one,
- # times a small factor for additional safety room
- # note: bash doesn't do floating point math or floats in if statements,
- # so use python to apply the ratio and cast it back to integer
- required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
- if [ $required_space -ge $free_space ]; then
- echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
- exit 1
- fi
-
mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
fi
- pcs resource disable httpd
- check_resource httpd stopped 1800
- pcs resource disable openstack-core
- check_resource openstack-core stopped 1800
pcs resource disable redis
check_resource redis stopped 600
- pcs resource disable mongod
- check_resource mongod stopped 600
pcs resource disable rabbitmq
check_resource rabbitmq stopped 600
- pcs resource disable memcached
- check_resource memcached stopped 600
pcs resource disable galera
check_resource galera stopped 600
+ pcs resource disable openstack-cinder-volume
+ check_resource openstack-cinder-volume stopped 600
# Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
# https://bugzilla.redhat.com/show_bug.cgi?id=1330688
for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
@@ -115,7 +92,8 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
pcs cluster stop --all
fi
-# Swift isn't controled by pacemaker
+
+# Swift isn't controlled by pacemaker
systemctl_swift stop
tstart=$(date +%s)
@@ -142,6 +120,21 @@ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
fi
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ mkdir OVS_UPGRADE || true
+ pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ echo "Updating openvswitch with nopostun"
+ rpm -U --nopostun ./*.rpm
+ popd
+else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
+fi
+
yum -y install python-zaqarclient # needed for os-collect-config
yum -y -q update
@@ -155,17 +148,19 @@ wsrep_on = ON
wsrep_cluster_address = gcomm://localhost
EOF
-if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- # Scripts run via heat have no HOME variable set and this confuses
- # mysqladmin
- export HOME=/root
- mkdir /var/lib/mysql || /bin/true
- chown mysql:mysql /var/lib/mysql
- chmod 0755 /var/lib/mysql
- restorecon -R /var/lib/mysql/
- mysql_install_db --datadir=/var/lib/mysql --user=mysql
- chown -R mysql:mysql /var/lib/mysql/
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ # Scripts run via heat have no HOME variable set and this confuses
+ # mysqladmin
+ export HOME=/root
+
+ mkdir /var/lib/mysql || /bin/true
+ chown mysql:mysql /var/lib/mysql
+ chmod 0755 /var/lib/mysql
+ restorecon -R /var/lib/mysql/
+ mysql_install_db --datadir=/var/lib/mysql --user=mysql
+ chown -R mysql:mysql /var/lib/mysql/
+
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
mysqld_safe --wsrep-new-cluster &
# We have a populated /root/.my.cnf with root/password here so
# we need to temporarily rename it because the newly created
@@ -182,6 +177,9 @@ fi
# If we reached here without error we can safely blow away the origin
# mysql dir from every controller
+
+# TODO: What if the upgrade fails on the bootstrap node, but not on
+# this controller. Data may be lost.
if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
fi
@@ -201,3 +199,7 @@ crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
# LP: 1615035, required only for M/N upgrade.
crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
+# LP: 1627450, required only for M/N upgrade
+crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
+
+crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
index bc708cce..b3a0098c 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -3,10 +3,10 @@
set -eu
cluster_form_timeout=600
-cluster_settle_timeout=600
+cluster_settle_timeout=1800
galera_sync_timeout=600
-if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+if [[ -n $(is_bootstrap_node) ]]; then
pcs cluster start --all
tstart=$(date +%s)
@@ -26,14 +26,23 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
pcs resource enable $vip
- check_resource $vip started 60
+ check_resource_pacemaker $vip started 60
done
+fi
- pcs resource enable galera
- check_resource galera started 600
- pcs resource enable mongod
- check_resource mongod started 600
+start_or_enable_service galera
+check_resource galera started 600
+start_or_enable_service redis
+check_resource redis started 600
+# We need mongod which is now a systemd service up and running before calling
+# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
+# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
+# we should be good.
+# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
+systemctl start mongod
+check_resource mongod started 600
+if [[ -n $(is_bootstrap_node) ]]; then
tstart=$(date +%s)
while ! clustercheck; do
sleep 5
@@ -54,18 +63,7 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
nova-manage db sync
nova-manage api_db sync
-
- pcs resource enable memcached
- check_resource memcached started 600
- pcs resource enable rabbitmq
- check_resource rabbitmq started 600
- pcs resource enable redis
- check_resource redis started 600
- pcs resource enable openstack-core
- check_resource openstack-core started 1800
- pcs resource enable httpd
- check_resource httpd started 1800
+ nova-manage db online_data_migrations
+ gnocchi-upgrade
+ sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
fi
-
-# Swift isn't controled by heat
-systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
new file mode 100755
index 00000000..b653c7c7
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+set -eu
+
+start_or_enable_service rabbitmq
+check_resource rabbitmq started 600
+start_or_enable_service redis
+check_resource redis started 600
+start_or_enable_service openstack-cinder-volume
+check_resource openstack-cinder-volume started 600
+
+
+# Swift isn't controled by pacemaker
+systemctl_swift start
+
+# We need to start the systemd services we explicitely stopped at step _1.sh
+# FIXME: Should we let puppet during the convergence step do the service enabling or
+# should we add it here?
+services=$(services_to_migrate)
+if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
+ services=${services%%openstack-sahara*}
+fi
+for service in $services; do
+ manage_systemd_service start "${service%%-clone}"
+ check_resource_systemd "${service%%-clone}" started 600
+done
diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh
index 931f4f42..750ad82c 100644
--- a/extraconfig/tasks/major_upgrade_object_storage.sh
+++ b/extraconfig/tasks/major_upgrade_object_storage.sh
@@ -12,6 +12,7 @@ cat > $UPGRADE_SCRIPT << ENDOFCAT
### This file is automatically delivered to the swift-storage nodes as part of the
### tripleo upgrades workflow
+set -eu
function systemctl_swift {
action=\$1
@@ -22,6 +23,19 @@ function systemctl_swift {
done
}
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ mkdir OVS_UPGRADE || true
+ pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ echo "Updating openvswitch with nopostun"
+ rpm -U --nopostun ./*.rpm
+ popd
+else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
+fi
systemctl_swift stop
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index c70a954f..7c78d5ad 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -1,16 +1,8 @@
-heat_template_version: 2014-10-16
+heat_template_version: 2016-10-14
description: 'Upgrade for Pacemaker deployments'
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
input_values:
type: json
@@ -26,12 +18,48 @@ parameters:
constraints:
- allowed_values: ['auto', 'yes', 'no']
default: 'auto'
+ IgnoreCephUpgradeWarnings:
+ type: boolean
+ default: false
+ description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean
+ KeepSaharaServicesOnUpgrade:
+ type: boolean
+ default: true
+ description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton
+
resources:
# TODO(jistr): for Mitaka->Newton upgrades and further we can use
# map_merge with input_values instead of feeding params into scripts
# via str_replace on bash snippets
+ CephMonUpgradeConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ ignore_ceph_upgrade_warnings='IGNORE_CEPH_UPGRADE_WARNINGS'
+ params:
+ IGNORE_CEPH_UPGRADE_WARNINGS: {get_param: IgnoreCephUpgradeWarnings}
+ - get_file: major_upgrade_ceph_mon.sh
+
+ CephMonUpgradeDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: CephMonUpgradeConfig}
+ input_values: {get_param: input_values}
+ update_policy:
+ batch_create:
+ max_batch_size: 1
+ rolling_update:
+ max_batch_size: 1
+
ControllerPacemakerUpgradeConfig_Step1:
type: OS::Heat::SoftwareConfig
properties:
@@ -52,13 +80,15 @@ resources:
params:
MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_check.sh
- get_file: major_upgrade_pacemaker_migrations.sh
- get_file: major_upgrade_controller_pacemaker_1.sh
ControllerPacemakerUpgradeDeployment_Step1:
type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: CephMonUpgradeDeployment
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: [servers, Controller]}
config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
input_values: {get_param: input_values}
@@ -72,7 +102,7 @@ resources:
BlockStorageUpgradeDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: blockstorage_servers}
+ servers: {get_param: [servers, BlockStorage]}
config: {get_resource: BlockStorageUpgradeConfig}
input_values: {get_param: input_values}
@@ -91,7 +121,32 @@ resources:
type: OS::Heat::SoftwareDeploymentGroup
depends_on: BlockStorageUpgradeDeployment
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: [servers, Controller]}
config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
input_values: {get_param: input_values}
+ ControllerPacemakerUpgradeConfig_Step3:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE'
+ params:
+ KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
+ - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - get_file: major_upgrade_controller_pacemaker_3.sh
+
+ ControllerPacemakerUpgradeDeployment_Step3:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: ControllerPacemakerUpgradeDeployment_Step2
+ properties:
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
+ input_values: {get_param: input_values}
+
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
index 623549a0..f6aa3066 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
@@ -3,15 +3,7 @@ description: 'Upgrade for Pacemaker deployments'
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
input_values:
type: json
@@ -43,45 +35,12 @@ resources:
- "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- get_param: UpgradeInitCommand
- UpgradeInitControllerDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: controller_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitComputeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: compute_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitBlockStorageDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: blockstorage_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitObjectStorageDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: objectstorage_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitCephStorageDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: cephstorage_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
# TODO(jistr): for Mitaka->Newton upgrades and further we can use
# map_merge with input_values instead of feeding params into scripts
# via str_replace on bash snippets
+ # FIXME(shardy) we have hard-coded per-role *ScriptConfig's here
+ # Would be better to have a common config for all roles
ComputeDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -97,35 +56,32 @@ resources:
UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- get_file: major_upgrade_compute.sh
- ComputeDeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: compute_servers}
- config: {get_resource: ComputeDeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
-
ObjectStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: major_upgrade_object_storage.sh}
- ObjectStorageDeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: objectstorage_servers}
- config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
-
CephStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: major_upgrade_ceph_storage.sh}
- CephStorageDeliverUpgradeScriptDeployment:
+{% for role in roles %}
+ UpgradeInit{{role.name}}Deployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ {% if not role.name in ['Controller', 'BlockStorage'] %}
+ {{role.name}}DeliverUpgradeScriptDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: cephstorage_servers}
- config: {get_resource: CephStorageDeliverUpgradeScriptConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig}
input_values: {get_param: input_values}
+ {% endif %}
+{% endfor %}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
index 7ed7012d..7c9083a4 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
@@ -56,3 +56,126 @@ function is_mysql_upgrade_needed {
fi
echo "1"
}
+
+# This function returns the list of services to be migrated away from pacemaker
+# and to systemd. The reason to have these services in a separate function is because
+# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
+# and in the function to migrate the cluster from full HA to HA NG
+function services_to_migrate {
+ # The following PCMK resources the ones the we are going to delete
+ PCMK_RESOURCE_TODELETE="
+ httpd-clone
+ memcached-clone
+ mongod-clone
+ neutron-dhcp-agent-clone
+ neutron-l3-agent-clone
+ neutron-metadata-agent-clone
+ neutron-netns-cleanup-clone
+ neutron-openvswitch-agent-clone
+ neutron-ovs-cleanup-clone
+ neutron-server-clone
+ openstack-aodh-evaluator-clone
+ openstack-aodh-listener-clone
+ openstack-aodh-notifier-clone
+ openstack-ceilometer-central-clone
+ openstack-ceilometer-collector-clone
+ openstack-ceilometer-notification-clone
+ openstack-cinder-api-clone
+ openstack-cinder-scheduler-clone
+ openstack-glance-api-clone
+ openstack-glance-registry-clone
+ openstack-gnocchi-metricd-clone
+ openstack-gnocchi-statsd-clone
+ openstack-heat-api-cfn-clone
+ openstack-heat-api-clone
+ openstack-heat-api-cloudwatch-clone
+ openstack-heat-engine-clone
+ openstack-nova-api-clone
+ openstack-nova-conductor-clone
+ openstack-nova-consoleauth-clone
+ openstack-nova-novncproxy-clone
+ openstack-nova-scheduler-clone
+ openstack-sahara-api-clone
+ openstack-sahara-engine-clone
+ "
+ echo $PCMK_RESOURCE_TODELETE
+}
+
+# This function will migrate a mitaka system where all the resources are managed
+# via pacemaker to a newton setup where only a few services will be managed by pacemaker
+# On a high-level it will operate as follows:
+# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
+# during the conversion
+# 2. Remove all the colocation constraints and then the ordering constraints, except the
+# ones related to haproxy/VIPs which exist in Newton as well
+# 3. Take the cluster out of maintenance-mode
+# 4. Remove all the resources that won't be managed by pacemaker in newton. The
+# outcome will be
+# that they are stopped and removed from pacemakers control
+# 5. Do a resource cleanup to make sure the cluster is in a clean state
+function migrate_full_to_ng_ha {
+ if [[ -n $(pcmk_running) ]]; then
+ pcs property set maintenance-mode=true
+
+ # First we go through all the colocation constraints (except the ones
+ # we want to keep, i.e. the haproxy/ip ones) and we remove those
+ COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
+ for constraint in $COL_CONSTRAINTS; do
+ log_debug "Deleting colocation constraint $constraint from CIB"
+ pcs constraint remove "$constraint"
+ done
+
+ # Now we kill all the ordering constraints (except the haproxy/ip ones)
+ ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
+ for constraint in $ORD_CONSTRAINTS; do
+ log_debug "Deleting ordering constraint $constraint from CIB"
+ pcs constraint remove "$constraint"
+ done
+ # At this stage all the pacemaker resources are removed from the CIB.
+ # Once we remove the maintenance-mode those systemd resources will keep
+ # on running. They shall be systemd enabled via the puppet converge
+ # step later on
+ pcs property set maintenance-mode=false
+
+ # At this stage there are no constraints whatsoever except the haproxy/ip ones
+ # which we want to keep. We now disable and then delete each resource
+ # that will move to systemd.
+ # We want the systemd resources be stopped before doing "yum update",
+ # that way "systemctl try-restart <service>" is no-op because the
+ # service was down already
+ PCS_STATUS_OUTPUT="$(pcs status)"
+ for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
+ if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
+ log_debug "Deleting $resource from the CIB"
+ if ! pcs resource disable "$resource" --wait=600; then
+ echo_error "ERROR: resource $resource failed to be disabled"
+ exit 1
+ fi
+ pcs resource delete --force "$resource"
+ else
+ log_debug "Service $resource not found as a pacemaker resource, not trying to delete."
+ fi
+ done
+
+ # We need to do a pcs resource cleanup here + crm_resource --wait to
+ # make sure the cluster is in a clean state before we stop everything,
+ # upgrade and restart everything
+ pcs resource cleanup
+ # We are making sure here that the cluster is stable before proceeding
+ if ! timeout -k 10 600 crm_resource --wait; then
+ echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
+ exit 1
+ fi
+ fi
+}
+
+function disable_standalone_ceilometer_api {
+ if [[ -n $(is_bootstrap_node) ]]; then
+ if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then
+ # Disable pacemaker resources for ceilometer-api
+ manage_pacemaker_service disable openstack-ceilometer-api
+ check_resource_pacemaker openstack-ceilometer-api stopped 600
+ pcs resource delete openstack-ceilometer-api --wait=600
+ fi
+ fi
+}
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
index 9414ac19..b9a87d33 100644
--- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
+++ b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
@@ -4,15 +4,7 @@ description: >
Software-config for performing aodh data migration
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
input_values:
type: json
@@ -28,6 +20,6 @@ resources:
AodhMysqlMigrationScriptDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: [servers, Controller]}
config: {get_resource: AodhMysqlMigrationScriptConfig}
input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
new file mode 100644
index 00000000..1c376285
--- /dev/null
+++ b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
@@ -0,0 +1,97 @@
+# Copyright 2015 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This puppet manifest is to be used only during a Mitaka->Newton upgrade
+# It configures ceilometer to be run under httpd but it makes sure to not
+# restart any services. This snippet needs to be called before init as a
+# pre upgrade migration.
+
+Service <|
+ tag == 'ceilometer-service'
+|> {
+ hasrestart => true,
+ restart => '/bin/true',
+ start => '/bin/true',
+ stop => '/bin/true',
+}
+
+if $::hostname == downcase(hiera('bootstrap_nodeid')) {
+ $pacemaker_master = true
+ $sync_db = true
+} else {
+ $pacemaker_master = false
+ $sync_db = false
+}
+
+include ::tripleo::packages
+
+
+if str2bool(hiera('mongodb::server::ipv6', false)) {
+ $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[')
+ $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+} else {
+ $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017')
+}
+$mongodb_replset = hiera('mongodb::server::replset')
+$mongo_node_string = join($mongo_node_ips_with_port, ',')
+$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
+
+include ::ceilometer
+
+class {'::ceilometer::db':
+ database_connection => $database_connection,
+}
+
+if $sync_db {
+ include ::ceilometer::db::sync
+}
+
+include ::ceilometer::config
+
+class { '::ceilometer::api':
+ enabled => true,
+ service_name => 'httpd',
+ keystone_password => hiera('ceilometer::keystone::auth::password'),
+ identity_uri => hiera('ceilometer::keystone::authtoken::auth_url'),
+ auth_uri => hiera('ceilometer::keystone::authtoken::auth_uri'),
+ keystone_tenant => hiera('ceilometer::keystone::authtoken::project_name'),
+}
+
+class { '::apache' :
+ service_enable => false,
+ service_manage => true,
+ service_restart => '/bin/true',
+ purge_configs => false,
+ purge_vhost_dir => false,
+}
+
+# To ensure existing ports are not overridden
+class { '::aodh::wsgi::apache':
+ servername => $::hostname,
+ ssl => false,
+}
+class { '::gnocchi::wsgi::apache':
+ servername => $::hostname,
+ ssl => false,
+}
+
+class { '::keystone::wsgi::apache':
+ servername => $::hostname,
+ ssl => false,
+}
+class { '::ceilometer::wsgi::apache':
+ servername => $::hostname,
+ ssl => false,
+}
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index 7d794c97..4f17b69a 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -2,51 +2,286 @@
set -eu
-function check_resource {
+DEBUG="true" # set false if the verbosity is a problem
+SCRIPT_NAME=$(basename $0)
+function log_debug {
+ if [[ $DEBUG = "true" ]]; then
+ echo "`date` $SCRIPT_NAME tripleo-upgrade $(facter hostname) $1"
+ fi
+}
+
+function is_bootstrap_node {
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ log_debug "Node is bootstrap"
+ echo "true"
+ fi
+}
+function check_resource_pacemaker {
if [ "$#" -ne 3 ]; then
- echo_error "ERROR: check_resource function expects 3 parameters, $# given"
- exit 1
+ echo_error "ERROR: check_resource function expects 3 parameters, $# given"
+ exit 1
fi
- service=$1
- state=$2
- timeout=$3
+ local service=$1
+ local state=$2
+ local timeout=$3
+
+ if [[ -z $(is_bootstrap_node) ]] ; then
+ log_debug "Node isn't bootstrap, skipping check for $service to be $state here "
+ return
+ else
+ log_debug "Node is bootstrap checking $service to be $state here"
+ fi
if [ "$state" = "stopped" ]; then
- match_for_incomplete='Started'
+ match_for_incomplete='Started'
else # started
- match_for_incomplete='Stopped'
+ match_for_incomplete='Stopped'
fi
nodes_local=$(pcs status | grep ^Online | sed 's/.*\[ \(.*\) \]/\1/g' | sed 's/ /\|/g')
if timeout -k 10 $timeout crm_resource --wait; then
- node_states=$(pcs status --full | grep "$service" | grep -v Clone | { egrep "$nodes_local" || true; } )
- if echo "$node_states" | grep -q "$match_for_incomplete"; then
- echo_error "ERROR: cluster finished transition but $service was not in $state state, exiting."
- exit 1
- else
- echo "$service has $state"
- fi
- else
- echo_error "ERROR: cluster remained unstable for more than $timeout seconds, exiting."
+ node_states=$(pcs status --full | grep "$service" | grep -v Clone | { egrep "$nodes_local" || true; } )
+ if echo "$node_states" | grep -q "$match_for_incomplete"; then
+ echo_error "ERROR: cluster finished transition but $service was not in $state state, exiting."
exit 1
+ else
+ echo "$service has $state"
+ fi
+ else
+ echo_error "ERROR: cluster remained unstable for more than $timeout seconds, exiting."
+ exit 1
+ fi
+
+}
+
+function pcmk_running {
+ if [[ $(systemctl is-active pacemaker) = "active" ]] ; then
+ echo "true"
+ fi
+}
+
+function is_systemd_unknown {
+ local service=$1
+ if [[ $(systemctl is-active "$service") = "unknown" ]]; then
+ log_debug "$service found to be unkown to systemd"
+ echo "true"
+ fi
+}
+
+function grep_is_cluster_controlled {
+ local service=$1
+ if [[ -n $(systemctl status $service -l | grep Drop-In -A 5 | grep pacemaker) ||
+ -n $(systemctl status $service -l | grep "Cluster Controlled $service") ]] ; then
+ log_debug "$service is pcmk managed from systemctl grep"
+ echo "true"
+ fi
+}
+
+
+function is_systemd_managed {
+ local service=$1
+ #if we have pcmk check to see if it is managed there
+ if [[ -n $(pcmk_running) ]]; then
+ if [[ -z $(pcs status --full | grep $service) && -z $(is_systemd_unknown $service) ]] ; then
+ log_debug "$service found to be systemd managed from pcs status"
+ echo "true"
+ fi
+ else
+ # if it is "unknown" to systemd, then it is pacemaker managed
+ if [[ -n $(is_systemd_unknown $service) ]] ; then
+ return
+ elif [[ -z $(grep_is_cluster_controlled $service) ]] ; then
+ echo "true"
+ fi
+ fi
+}
+
+function is_pacemaker_managed {
+ local service=$1
+ #if we have pcmk check to see if it is managed there
+ if [[ -n $(pcmk_running) ]]; then
+ if [[ -n $(pcs status --full | grep $service) ]]; then
+ log_debug "$service found to be pcmk managed from pcs status"
+ echo "true"
+ fi
+ else
+ # if it is unknown to systemd, then it is pcmk managed
+ if [[ -n $(is_systemd_unknown $service) ]]; then
+ echo "true"
+ elif [[ -n $(grep_is_cluster_controlled $service) ]] ; then
+ echo "true"
+ fi
+ fi
+}
+
+function is_managed {
+ local service=$1
+ if [[ -n $(is_pacemaker_managed $service) || -n $(is_systemd_managed $service) ]]; then
+ echo "true"
+ fi
+}
+
+function check_resource_systemd {
+
+ if [ "$#" -ne 3 ]; then
+ echo_error "ERROR: check_resource function expects 3 parameters, $# given"
+ exit 1
fi
+ local service=$1
+ local state=$2
+ local timeout=$3
+ local check_interval=3
+
+ if [ "$state" = "stopped" ]; then
+ match_for_incomplete='active'
+ else # started
+ match_for_incomplete='inactive'
+ fi
+
+ log_debug "Going to check_resource_systemd for $service to be $state"
+
+ #sanity check is systemd managed:
+ if [[ -z $(is_systemd_managed $service) ]]; then
+ echo "ERROR - $service not found to be systemd managed."
+ exit 1
+ fi
+
+ tstart=$(date +%s)
+ tend=$(( $tstart + $timeout ))
+ while (( $(date +%s) < $tend )); do
+ if [[ "$(systemctl is-active $service)" = $match_for_incomplete ]]; then
+ echo "$service not yet $state, sleeping $check_interval seconds."
+ sleep $check_interval
+ else
+ echo "$service is $state"
+ return
+ fi
+ done
+
+ echo "Timed out waiting for $service to go to $state after $timeout seconds"
+ exit 1
+}
+
+
+function check_resource {
+ local service=$1
+ local pcmk_managed=$(is_pacemaker_managed $service)
+ local systemd_managed=$(is_systemd_managed $service)
+
+ if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+ log_debug "ERROR $service managed by both systemd and pcmk - SKIPPING"
+ return
+ fi
+
+ if [[ -n $pcmk_managed ]]; then
+ check_resource_pacemaker $@
+ return
+ elif [[ -n $systemd_managed ]]; then
+ check_resource_systemd $@
+ return
+ fi
+ log_debug "ERROR cannot check_resource for $service, not managed here?"
+}
+
+function manage_systemd_service {
+ local action=$1
+ local service=$2
+ log_debug "Going to systemctl $action $service"
+ systemctl $action $service
+}
+
+function manage_pacemaker_service {
+ local action=$1
+ local service=$2
+ # not if pacemaker isn't running!
+ if [[ -z $(pcmk_running) ]]; then
+ echo "$(facter hostname) pacemaker not active, skipping $action $service here"
+ elif [[ -n $(is_bootstrap_node) ]]; then
+ log_debug "Going to pcs resource $action $service"
+ pcs resource $action $service
+ fi
+}
+
+function stop_or_disable_service {
+ local service=$1
+ local pcmk_managed=$(is_pacemaker_managed $service)
+ local systemd_managed=$(is_systemd_managed $service)
+
+ if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+ log_debug "Skipping stop_or_disable $service due to management conflict"
+ return
+ fi
+
+ log_debug "Stopping or disabling $service"
+ if [[ -n $pcmk_managed ]]; then
+ manage_pacemaker_service disable $service
+ return
+ elif [[ -n $systemd_managed ]]; then
+ manage_systemd_service stop $service
+ return
+ fi
+ log_debug "ERROR: $service not managed here?"
+}
+
+function start_or_enable_service {
+ local service=$1
+ local pcmk_managed=$(is_pacemaker_managed $service)
+ local systemd_managed=$(is_systemd_managed $service)
+
+ if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+ log_debug "Skipping start_or_enable $service due to management conflict"
+ return
+ fi
+
+ log_debug "Starting or enabling $service"
+ if [[ -n $pcmk_managed ]]; then
+ manage_pacemaker_service enable $service
+ return
+ elif [[ -n $systemd_managed ]]; then
+ manage_systemd_service start $service
+ return
+ fi
+ log_debug "ERROR $service not managed here?"
+}
+
+function restart_service {
+ local service=$1
+ local pcmk_managed=$(is_pacemaker_managed $service)
+ local systemd_managed=$(is_systemd_managed $service)
+
+ if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+ log_debug "ERROR $service managed by both systemd and pcmk - SKIPPING"
+ return
+ fi
+
+ log_debug "Restarting $service"
+ if [[ -n $pcmk_managed ]]; then
+ manage_pacemaker_service restart $service
+ return
+ elif [[ -n $systemd_managed ]]; then
+ manage_systemd_service restart $service
+ return
+ fi
+ log_debug "ERROR $service not managed here?"
}
function echo_error {
echo "$@" | tee /dev/fd2
}
+# swift is a special case because it is/was never handled by pacemaker
+# when stand-alone swift is used, only swift-proxy is running on controllers
function systemctl_swift {
services=( openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy )
- action=$1
+ local action=$1
case $action in
stop)
- services=$(systemctl | grep swift | grep running | awk '{print $1}')
+ services=$(systemctl | grep openstack-swift- | grep running | awk '{print $1}')
;;
start)
enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml 'enable_swift_storage')
@@ -54,9 +289,11 @@ function systemctl_swift {
services=( openstack-swift-proxy )
fi
;;
- *) services=() ;; # for safetly, should never happen
+ *) echo "Unknown action $action passed to systemctl_swift"
+ exit 1
+ ;; # shouldn't ever happen...
esac
- for S in ${services[@]}; do
- systemctl $action $S
+ for service in ${services[@]}; do
+ manage_systemd_service $action $service
done
}
diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh
index 1637cee2..3da7efec 100755
--- a/extraconfig/tasks/pacemaker_resource_restart.sh
+++ b/extraconfig/tasks/pacemaker_resource_restart.sh
@@ -2,20 +2,24 @@
set -eux
-pacemaker_status=$(systemctl is-active pacemaker)
-
# Run if pacemaker is running, we're the bootstrap node,
# and we're updating the deployment (not creating).
-if [ "$pacemaker_status" = "active" -a \
- "$(hiera bootstrap_nodeid)" = "$(facter hostname)" -a \
- "$(hiera stack_action)" = "UPDATE" ]; then
+if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
- PCMK_RESOURCES="haproxy-clone redis-master rabbitmq-clone galera-master openstack-cinder-volume openstack-cinder-backup"
- # Ten minutes of timeout to restart each resource, given there are no constraints should be enough
TIMEOUT=600
- for resource in $PCMK_RESOURCES; do
- if pcs status | grep $resource; then
- pcs resource restart --wait=$TIMEOUT $resource
- fi
+ SERVICES_TO_RESTART="$(ls /var/lib/tripleo/pacemaker-restarts)"
+ PCS_STATUS_OUTPUT="$(pcs status)"
+
+ for service in $SERVICES_TO_RESTART; do
+ if ! echo "$PCS_STATUS_OUTPUT" | grep $service; then
+ echo "Service $service not found as a pacemaker resource, cannot restart it."
+ exit 1
+ fi
+ done
+
+ for service in $SERVICES_TO_RESTART; do
+ echo "Restarting $service..."
+ pcs resource restart --wait=$TIMEOUT $service
+ rm -f /var/lib/tripleo/pacemaker-restarts/$service
done
fi
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index b045e5ea..44c5e6e2 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -61,6 +61,21 @@ else
exit 0
fi
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ mkdir OVS_UPGRADE || true
+ pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ echo "Updating openvswitch with nopostun"
+ rpm -U --nopostun ./*.rpm
+ popd
+else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
+fi
+
command=${command:-update}
full_command="yum -q -y $command $command_arguments"
echo "Running: $full_command"
diff --git a/hosts-config.yaml b/hosts-config.yaml
new file mode 100644
index 00000000..df0addfd
--- /dev/null
+++ b/hosts-config.yaml
@@ -0,0 +1,30 @@
+heat_template_version: 2016-10-14
+description: 'All Hosts Config'
+
+parameters:
+ hosts:
+ type: comma_delimited_list
+
+resources:
+
+ hostsConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hosts:
+ list_join:
+ - "\n"
+ - {get_param: hosts}
+
+outputs:
+ config_id:
+ description: The ID of the hostsConfigImpl resource.
+ value:
+ {get_resource: hostsConfigImpl}
+ hosts_entries:
+ description: |
+ The content that should be appended to your /etc/hosts if you want to get
+ hostname-based access to the deployed nodes (useful for testing without
+ setting up a DNS).
+ value: {get_attr: [hostsConfigImpl, config, hosts]}
diff --git a/j2_excludes.yaml b/j2_excludes.yaml
new file mode 100644
index 00000000..063e63d4
--- /dev/null
+++ b/j2_excludes.yaml
@@ -0,0 +1,10 @@
+# This template specifies which j2 rendered templates
+# should be excluded in the render process from
+# tripleo-common/tripleo_common/actions/templates.py
+
+name:
+ - puppet/controller-role.yaml
+ - puppet/compute-role.yaml
+ - puppet/blockstorage-role.yaml
+ - puppet/objectstorage-role.yaml
+ - puppet/cephstorage-role.yaml
diff --git a/net-config-bond.yaml b/net-config-bond.yaml
index 0a162e77..ec881bdc 100644
--- a/net-config-bond.yaml
+++ b/net-config-bond.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2016-10-14
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge.
@@ -6,9 +6,15 @@ description: >
parameters:
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like
- lacp=active and/or bond_mode=balance-slb using this option.
+ description: |
+ The ovs_options string for the bond interface. Set things like
+ lacp=active and/or bond_mode=balance-slb using this option.
type: string
+ constraints:
+ - allowed_pattern: "^((?!balance.tcp).)*$"
+ description: |
+ The balance-tcp bond mode is known to cause packet loss and
+ should not be used in BondInterfaceOvsOptions.
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
diff --git a/network/config/bond-with-vlans/ceph-storage.yaml b/network/config/bond-with-vlans/ceph-storage.yaml
index b414747f..9f537c02 100644
--- a/network/config/bond-with-vlans/ceph-storage.yaml
+++ b/network/config/bond-with-vlans/ceph-storage.yaml
@@ -38,6 +38,11 @@ parameters:
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
+ constraints:
+ - allowed_pattern: "^((?!balance.tcp).)*$"
+ description: |
+ The balance-tcp bond mode is known to cause packet loss and
+ should not be used in BondInterfaceOvsOptions.
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/cinder-storage.yaml b/network/config/bond-with-vlans/cinder-storage.yaml
index 5ee9ff09..b4d71fa3 100644
--- a/network/config/bond-with-vlans/cinder-storage.yaml
+++ b/network/config/bond-with-vlans/cinder-storage.yaml
@@ -38,6 +38,11 @@ parameters:
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
+ constraints:
+ - allowed_pattern: "^((?!balance.tcp).)*$"
+ description: |
+ The balance-tcp bond mode is known to cause packet loss and
+ should not be used in BondInterfaceOvsOptions.
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/compute-dpdk.yaml b/network/config/bond-with-vlans/compute-dpdk.yaml
new file mode 100644
index 00000000..3fc764be
--- /dev/null
+++ b/network/config/bond-with-vlans/compute-dpdk.yaml
@@ -0,0 +1,192 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config with 2 bonded nics on a bridge
+ with VLANs attached for the compute role.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ BondInterfaceOvsOptions:
+ default: ''
+ description: The ovs_options string for the bond interface. Set things like
+ lacp=active and/or bond_mode=balance-slb using this option.
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ ExternalInterfaceDefaultRoute: # Not used by default in this template
+ default: '10.0.0.1'
+ description: The default route of the external network.
+ type: string
+ ManagementInterfaceDefaultRoute: # Commented out by default in this template
+ default: unset
+ description: The default route of the management network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers: {get_param: DnsServers}
+ addresses:
+ -
+ ip_netmask:
+ list_join:
+ - '/'
+ - - {get_param: ControlPlaneIp}
+ - {get_param: ControlPlaneSubnetCidr}
+ routes:
+ -
+ ip_netmask: 169.254.169.254/32
+ next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+ -
+ type: ovs_bridge
+ name: {get_input: bridge_name}
+ members:
+ -
+ type: ovs_bond
+ name: bond1
+ ovs_options: {get_param: BondInterfaceOvsOptions}
+ members:
+ -
+ type: interface
+ name: nic2
+ primary: true
+ -
+ type: interface
+ name: nic3
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: InternalApiNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: StorageNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: TenantNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ # If setting default route on the Management interface, comment
+ # out the default route on the Control Plane.
+ #-
+ # type: vlan
+ # device: bond1
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
+ # routes:
+ # -
+ # default: true
+ # next_hop: {get_param: ManagementInterfaceDefaultRoute}
+ -
+ type: ovs_user_bridge
+ name: br-link
+ members:
+ -
+ type: ovs_dpdk_bond
+ name: dpdkbond0
+ members:
+ -
+ type: ovs_dpdk_port
+ name: dpdk0
+ members:
+ -
+ type: interface
+ name: nic4
+ -
+ type: ovs_dpdk_port
+ name: dpdk1
+ members:
+ -
+ type: interface
+ name: nic5
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/bond-with-vlans/compute.yaml b/network/config/bond-with-vlans/compute.yaml
index 19c011eb..b2cfb0a2 100644
--- a/network/config/bond-with-vlans/compute.yaml
+++ b/network/config/bond-with-vlans/compute.yaml
@@ -38,6 +38,11 @@ parameters:
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
+ constraints:
+ - allowed_pattern: "^((?!balance.tcp).)*$"
+ description: |
+ The balance-tcp bond mode is known to cause packet loss and
+ should not be used in BondInterfaceOvsOptions.
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/controller-no-external.yaml b/network/config/bond-with-vlans/controller-no-external.yaml
index 6242e2f8..4c3e59fa 100644
--- a/network/config/bond-with-vlans/controller-no-external.yaml
+++ b/network/config/bond-with-vlans/controller-no-external.yaml
@@ -38,6 +38,11 @@ parameters:
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
+ constraints:
+ - allowed_pattern: "^((?!balance.tcp).)*$"
+ description: |
+ The balance-tcp bond mode is known to cause packet loss and
+ should not be used in BondInterfaceOvsOptions.
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/controller-v6.yaml b/network/config/bond-with-vlans/controller-v6.yaml
index 71b7e1b1..1361d969 100644
--- a/network/config/bond-with-vlans/controller-v6.yaml
+++ b/network/config/bond-with-vlans/controller-v6.yaml
@@ -40,6 +40,11 @@ parameters:
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
+ constraints:
+ - allowed_pattern: "^((?!balance.tcp).)*$"
+ description: |
+ The balance-tcp bond mode is known to cause packet loss and
+ should not be used in BondInterfaceOvsOptions.
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/controller.yaml b/network/config/bond-with-vlans/controller.yaml
index 9917f073..677c90c5 100644
--- a/network/config/bond-with-vlans/controller.yaml
+++ b/network/config/bond-with-vlans/controller.yaml
@@ -38,6 +38,11 @@ parameters:
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
+ constraints:
+ - allowed_pattern: "^((?!balance.tcp).)*$"
+ description: |
+ The balance-tcp bond mode is known to cause packet loss and
+ should not be used in BondInterfaceOvsOptions.
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/config/bond-with-vlans/swift-storage.yaml b/network/config/bond-with-vlans/swift-storage.yaml
index 4dd6628f..e16d6b6e 100644
--- a/network/config/bond-with-vlans/swift-storage.yaml
+++ b/network/config/bond-with-vlans/swift-storage.yaml
@@ -38,6 +38,11 @@ parameters:
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
+ constraints:
+ - allowed_pattern: "^((?!balance.tcp).)*$"
+ description: |
+ The balance-tcp bond mode is known to cause packet loss and
+ should not be used in BondInterfaceOvsOptions.
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
index 84b03c7e..fb01925b 100644
--- a/network/endpoints/endpoint_data.yaml
+++ b/network/endpoints/endpoint_data.yaml
@@ -199,6 +199,21 @@ Swift:
S3:
port: 8080
+CephRgw:
+ Internal:
+ net_param: CephRgw
+ uri_suffixes:
+ '': /swift/v1
+ Public:
+ net_param: Public
+ uri_suffixes:
+ '': /swift/v1
+ Admin:
+ net_param: CephRgw
+ uri_suffixes:
+ '': /swift/v1
+ port: 8080
+
Sahara:
Internal:
net_param: SaharaApi
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index dd29bcde..734b6431 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -25,6 +25,9 @@ parameters:
CeilometerAdmin: {protocol: http, port: '8777', host: IP_ADDRESS}
CeilometerInternal: {protocol: http, port: '8777', host: IP_ADDRESS}
CeilometerPublic: {protocol: http, port: '8777', host: IP_ADDRESS}
+ CephRgwAdmin: {protocol: http, port: '8080', host: IP_ADDRESS}
+ CephRgwInternal: {protocol: http, port: '8080', host: IP_ADDRESS}
+ CephRgwPublic: {protocol: http, port: '8080', host: IP_ADDRESS}
CinderAdmin: {protocol: http, port: '8776', host: IP_ADDRESS}
CinderInternal: {protocol: http, port: '8776', host: IP_ADDRESS}
CinderPublic: {protocol: http, port: '8776', host: IP_ADDRESS}
@@ -563,6 +566,252 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, CeilometerPublic, port]
+ CephRgwAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ port:
+ get_param: [EndpointMap, CephRgwAdmin, port]
+ protocol:
+ get_param: [EndpointMap, CephRgwAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwAdmin, port]
+ - /swift/v1
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwAdmin, port]
+ CephRgwInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ port:
+ get_param: [EndpointMap, CephRgwInternal, port]
+ protocol:
+ get_param: [EndpointMap, CephRgwInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwInternal, port]
+ - /swift/v1
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwInternal, port]
+ CephRgwPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, CephRgwPublic, port]
+ protocol:
+ get_param: [EndpointMap, CephRgwPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwPublic, port]
+ - /swift/v1
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwPublic, port]
CinderAdmin:
host:
str_replace:
diff --git a/network/external.yaml b/network/external.yaml
index 3b24da7e..4dfbc77e 100644
--- a/network/external.yaml
+++ b/network/external.yaml
@@ -37,6 +37,10 @@ parameters:
default: [{'start': '10.0.0.4', 'end': '10.0.0.250'}]
description: Ip allocation pool range for the external network.
type: json
+ ExternalInterfaceDefaultRoute:
+ default: '10.0.0.1'
+ description: default route for the external network
+ type: string
resources:
ExternalNetwork:
@@ -55,6 +59,7 @@ resources:
name: {get_param: ExternalSubnetName}
network: {get_resource: ExternalNetwork}
allocation_pools: {get_param: ExternalAllocationPools}
+ gateway_ip: {get_param: ExternalInterfaceDefaultRoute}
outputs:
OS::stack_id:
diff --git a/network/external_v6.yaml b/network/external_v6.yaml
index 3e120f24..e0736ab7 100644
--- a/network/external_v6.yaml
+++ b/network/external_v6.yaml
@@ -42,6 +42,10 @@ parameters:
default: dhcpv6-stateful
description: Neutron subnet IPv6 router advertisement mode
type: string
+ ExternalInterfaceDefaultRoute:
+ default: '2001:db8:fd00:1000::1'
+ description: default route for the external network
+ type: string
resources:
ExternalNetwork:
@@ -62,6 +66,7 @@ resources:
name: {get_param: ExternalSubnetName}
network: {get_resource: ExternalNetwork}
allocation_pools: {get_param: ExternalAllocationPools}
+ gateway_ip: {get_param: ExternalInterfaceDefaultRoute}
outputs:
OS::stack_id:
diff --git a/network/internal_api.yaml b/network/internal_api.yaml
index 6f8aa3a8..090e38f7 100644
--- a/network/internal_api.yaml
+++ b/network/internal_api.yaml
@@ -55,6 +55,7 @@ resources:
name: {get_param: InternalApiSubnetName}
network: {get_resource: InternalApiNetwork}
allocation_pools: {get_param: InternalApiAllocationPools}
+ gateway_ip: null
outputs:
OS::stack_id:
diff --git a/network/internal_api_v6.yaml b/network/internal_api_v6.yaml
index 68c14fbe..19d64b0a 100644
--- a/network/internal_api_v6.yaml
+++ b/network/internal_api_v6.yaml
@@ -62,6 +62,7 @@ resources:
name: {get_param: InternalApiSubnetName}
network: {get_resource: InternalApiNetwork}
allocation_pools: {get_param: InternalApiAllocationPools}
+ gateway_ip: null
outputs:
OS::stack_id:
diff --git a/network/management.yaml b/network/management.yaml
index 6878bac4..6798e11e 100644
--- a/network/management.yaml
+++ b/network/management.yaml
@@ -13,7 +13,7 @@ parameters:
ManagementNetValueSpecs:
default: {'provider:physical_network': 'management', 'provider:network_type': 'flat'}
description: Value specs for the management network.
- type: json
+ type: json
ManagementNetAdminStateUp:
default: false
description: The admin state of the network.
@@ -38,6 +38,10 @@ parameters:
default: [{'start': '10.0.1.4', 'end': '10.0.1.250'}]
description: Ip allocation pool range for the management network.
type: json
+ ManagementInterfaceDefaultRoute:
+ default: null
+ description: The default route of the management network.
+ type: string
resources:
ManagementNetwork:
@@ -56,6 +60,7 @@ resources:
name: {get_param: ManagementSubnetName}
network: {get_resource: ManagementNetwork}
allocation_pools: {get_param: ManagementAllocationPools}
+ gateway_ip: {get_param: ManagementInterfaceDefaultRoute}
outputs:
OS::stack_id:
diff --git a/network/ports/external_from_pool_v6.yaml b/network/ports/external_from_pool_v6.yaml
index baa544e7..e541049d 100644
--- a/network/ports/external_from_pool_v6.yaml
+++ b/network/ports/external_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [ExternalPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: ExternalNetCidr}, 1]}
diff --git a/network/ports/from_service.yaml b/network/ports/from_service.yaml
index 3d61910e..782b6b07 100644
--- a/network/ports/from_service.yaml
+++ b/network/ports/from_service.yaml
@@ -24,6 +24,12 @@ parameters:
description: The name of the undercloud Neutron control plane
default: ctlplane
type: string
+ FixedIPs: # Here for compatibility with ctlplane_vip.yaml
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
ServiceVips:
default: {}
type: json
diff --git a/network/ports/from_service_v6.yaml b/network/ports/from_service_v6.yaml
index 2dd0a0ee..80060b57 100644
--- a/network/ports/from_service_v6.yaml
+++ b/network/ports/from_service_v6.yaml
@@ -24,6 +24,12 @@ parameters:
description: The name of the undercloud Neutron control plane
default: ctlplane
type: string
+ FixedIPs: # Here for compatibility with ctlplane_vip.yaml
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
ServiceVips:
default: {}
type: json
diff --git a/network/ports/internal_api_from_pool_v6.yaml b/network/ports/internal_api_from_pool_v6.yaml
index 8d0a91b6..afb144ba 100644
--- a/network/ports/internal_api_from_pool_v6.yaml
+++ b/network/ports/internal_api_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [InternalApiPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: InternalApiNetCidr}, 1]}
diff --git a/network/ports/management_from_pool_v6.yaml b/network/ports/management_from_pool_v6.yaml
index d9ac6046..4c1cc216 100644
--- a/network/ports/management_from_pool_v6.yaml
+++ b/network/ports/management_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [ManagementPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: ManagementNetCidr}, 1]}
diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml
index 36f3358e..d7863e02 100644
--- a/network/ports/net_ip_list_map.yaml
+++ b/network/ports/net_ip_list_map.yaml
@@ -28,6 +28,35 @@ parameters:
ServiceNetMap:
default: {}
type: json
+ ServiceHostnameList:
+ default: []
+ type: comma_delimited_list
+ NetworkHostnameMap:
+ default: []
+ type: json
+
+resources:
+ # This adds the extra "services" on for keystone
+ # so that keystone_admin_api_network and
+ # keystone_public_api_network point to the correct
+ # network on the nodes running the "keystone" service
+ EnabledServicesValue:
+ type: OS::Heat::Value
+ properties:
+ type: comma_delimited_list
+ value:
+ yaql:
+ expression: let(root => $) -> $.data.extra_services.items().where($[0] in $root.data.enabled_services).select($[1]).flatten() + $root.data.enabled_services
+ data:
+ enabled_services: {get_param: EnabledServices}
+ extra_services:
+ # If anything other than keystone needs this
+ # then we should add an extra_networks interface
+ # to the service templates role_data but for
+ # now we hard-code the keystone special case
+ keystone:
+ - keystone_admin_api
+ - keystone_public_api
outputs:
net_ip_map:
@@ -61,7 +90,7 @@ outputs:
template:
SERVICE_node_ips: SERVICE_network
for_each:
- SERVICE: {get_param: EnabledServices}
+ SERVICE: {get_attr: [EnabledServicesValue, value]}
- values: {get_param: ServiceNetMap}
- values:
ctlplane: {get_param: ControlPlaneIpList}
@@ -71,3 +100,41 @@ outputs:
storage_mgmt: {get_param: StorageMgmtIpList}
tenant: {get_param: TenantIpList}
management: {get_param: ManagementIpList}
+ service_hostnames:
+ description: >
+ Map of enabled services to a list of hostnames where they're running
+ value:
+ map_replace:
+ - yaql:
+ # This filters any entries where the value hasn't been substituted for
+ # a list, e.g it's still $service_network. This happens when there is
+ # no network defined for the service in the ServiceNetMap, which is OK
+ # as not all services have to be bound to a network, so we filter them
+ expression: dict($.data.map.items().where(not $[1].endsWith("_network")))
+ data:
+ map:
+ map_replace:
+ - map_merge:
+ repeat:
+ template:
+ SERVICE_node_names: SERVICE_network
+ for_each:
+ SERVICE: {get_attr: [EnabledServicesValue, value]}
+ - values: {get_param: ServiceNetMap}
+ - values: {get_param: NetworkHostnameMap}
+ short_service_hostnames:
+ description: >
+ Map of enabled services to a list of hostnames where they're running regardless of the network
+ value:
+ yaql:
+ # If ServiceHostnameList is empty the role is deployed with zero nodes
+ # therefore we don't want to add any *_node_names to the map
+ expression: dict($.data.map.items().where(len($[1]) > 0))
+ data:
+ map:
+ map_merge:
+ repeat:
+ template:
+ SERVICE_short_node_names: {get_param: ServiceHostnameList}
+ for_each:
+ SERVICE: {get_attr: [EnabledServicesValue, value]}
diff --git a/network/ports/storage_from_pool_v6.yaml b/network/ports/storage_from_pool_v6.yaml
index 328f8385..18faf1bd 100644
--- a/network/ports/storage_from_pool_v6.yaml
+++ b/network/ports/storage_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [StoragePort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: StorageNetCidr}, 1]}
diff --git a/network/ports/storage_mgmt_from_pool_v6.yaml b/network/ports/storage_mgmt_from_pool_v6.yaml
index 50470c92..e1145a31 100644
--- a/network/ports/storage_mgmt_from_pool_v6.yaml
+++ b/network/ports/storage_mgmt_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [StorageMgmtPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: StorageMgmtNetCidr}, 1]}
diff --git a/network/ports/tenant_from_pool_v6.yaml b/network/ports/tenant_from_pool_v6.yaml
index bbe6f736..d4f0d29c 100644
--- a/network/ports/tenant_from_pool_v6.yaml
+++ b/network/ports/tenant_from_pool_v6.yaml
@@ -48,4 +48,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [TenantPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: TenantNetCidr}, 1]}
diff --git a/network/service_net_map.yaml b/network/service_net_map.j2.yaml
index 50f5c55a..c4d86fb9 100644
--- a/network/service_net_map.yaml
+++ b/network/service_net_map.j2.yaml
@@ -8,11 +8,20 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
+ Note that the key in this map must match the service_name
+ in the service template, e.g if the service_name is heat_api
+ the key must be either heat_api_network, or optionally
+ HeatApiNetwork (which will be internally converted to
+ transform captalization to underscores).
default: {}
type: json
+ # Note that the key in this map must match the service_name
+ # see the description above about conversion from CamelCase to
+ # snake_case - the names must still match when converted
ServiceNetMapDefaults:
default:
+ ApacheNetwork: internal_api
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
AodhApiNetwork: internal_api
@@ -22,16 +31,19 @@ parameters:
CinderIscsiNetwork: storage
GlanceApiNetwork: storage
GlanceRegistryNetwork: internal_api
- IronicApiNetwork: internal_api
+ IronicApiNetwork: ctlplane
+ IronicNetwork: ctlplane
KeystoneAdminApiNetwork: ctlplane # allows undercloud to config endpoints
KeystonePublicApiNetwork: internal_api
ManilaApiNetwork: internal_api
NeutronApiNetwork: internal_api
HeatApiNetwork: internal_api
+ HeatApiCfnNetwork: internal_api
+ HeatApiCloudwatchNetwork: internal_api
NovaApiNetwork: internal_api
NovaMetadataNetwork: internal_api
NovaVncProxyNetwork: internal_api
- SwiftMgmtNetwork: storage_mgmt
+ SwiftStorageNetwork: storage_mgmt
SwiftProxyNetwork: storage
SaharaApiNetwork: internal_api
HorizonNetwork: internal_api
@@ -40,14 +52,16 @@ parameters:
RedisNetwork: internal_api
MysqlNetwork: internal_api
CephClusterNetwork: storage_mgmt
- CephPublicNetwork: storage
- ControllerHostnameResolveNetwork: internal_api
- ComputeHostnameResolveNetwork: internal_api
- BlockStorageHostnameResolveNetwork: internal_api
- ObjectStorageHostnameResolveNetwork: internal_api
- CephStorageHostnameResolveNetwork: storage
+ CephMonNetwork: storage
+ CephRgwNetwork: storage
PublicNetwork: external
- OpenDaylightApiNetwork: internal_api
+ OpendaylightApiNetwork: internal_api
+ # We special-case the default ResolveNetwork for the CephStorage role
+ # for backwards compatibility, all other roles default to internal_api
+ CephStorageHostnameResolveNetwork: storage
+{% for role in roles if role.name != 'CephStorage' %}
+ {{role.name}}HostnameResolveNetwork: internal_api
+{% endfor %}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
@@ -58,6 +72,8 @@ parameters:
default:
MongoDbNetwork: MongodbNetwork
RabbitMqNetwork: RabbitmqNetwork
+ CephPublicNetwork: CephMonNetwork
+ SwiftMgmtNetwork: SwiftStorageNetwork
description: Mapping older deprecated service names, intended for
internal use only, this will be removed in future.
type: json
diff --git a/network/storage.yaml b/network/storage.yaml
index dc9f35ea..35dae17a 100644
--- a/network/storage.yaml
+++ b/network/storage.yaml
@@ -55,6 +55,7 @@ resources:
name: {get_param: StorageSubnetName}
network: {get_resource: StorageNetwork}
allocation_pools: {get_param: StorageAllocationPools}
+ gateway_ip: null
outputs:
OS::stack_id:
diff --git a/network/storage_mgmt.yaml b/network/storage_mgmt.yaml
index 59933c8c..03cfd139 100644
--- a/network/storage_mgmt.yaml
+++ b/network/storage_mgmt.yaml
@@ -55,6 +55,7 @@ resources:
name: {get_param: StorageMgmtSubnetName}
network: {get_resource: StorageMgmtNetwork}
allocation_pools: {get_param: StorageMgmtAllocationPools}
+ gateway_ip: null
outputs:
OS::stack_id:
diff --git a/network/storage_mgmt_v6.yaml b/network/storage_mgmt_v6.yaml
index f05644ef..39c456db 100644
--- a/network/storage_mgmt_v6.yaml
+++ b/network/storage_mgmt_v6.yaml
@@ -62,6 +62,7 @@ resources:
name: {get_param: StorageMgmtSubnetName}
network: {get_resource: StorageMgmtNetwork}
allocation_pools: {get_param: StorageMgmtAllocationPools}
+ gateway_ip: null
outputs:
OS::stack_id:
diff --git a/network/storage_v6.yaml b/network/storage_v6.yaml
index 36a6fae8..5c8af9e5 100644
--- a/network/storage_v6.yaml
+++ b/network/storage_v6.yaml
@@ -62,6 +62,7 @@ resources:
name: {get_param: StorageSubnetName}
network: {get_resource: StorageNetwork}
allocation_pools: {get_param: StorageAllocationPools}
+ gateway_ip: null
outputs:
OS::stack_id:
diff --git a/network/tenant.yaml b/network/tenant.yaml
index 6fe96121..1045b81b 100644
--- a/network/tenant.yaml
+++ b/network/tenant.yaml
@@ -55,6 +55,7 @@ resources:
name: {get_param: TenantSubnetName}
network: {get_resource: TenantNetwork}
allocation_pools: {get_param: TenantAllocationPools}
+ gateway_ip: null
outputs:
OS::stack_id:
diff --git a/network/tenant_v6.yaml b/network/tenant_v6.yaml
index b653eaf7..bf758a50 100644
--- a/network/tenant_v6.yaml
+++ b/network/tenant_v6.yaml
@@ -62,6 +62,7 @@ resources:
name: {get_param: TenantSubnetName}
network: {get_resource: TenantNetwork}
allocation_pools: {get_param: TenantAllocationPools}
+ gateway_ip: null
outputs:
OS::stack_id:
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.j2.yaml
index 57399210..9b9cd581 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -1,32 +1,44 @@
resource_registry:
- OS::TripleO::BlockStorage: puppet/cinder-storage.yaml
- OS::TripleO::BlockStorage::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::Compute: puppet/compute.yaml
- OS::TripleO::Compute::Net::SoftwareConfig: net-config-noop.yaml
+
OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment
- OS::TripleO::Controller: puppet/controller.yaml
- OS::TripleO::Controller::Net::SoftwareConfig: net-config-bridge.yaml
- OS::TripleO::ObjectStorage: puppet/swift-storage.yaml
- OS::TripleO::ObjectStorage::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::CephStorage: puppet/ceph-storage.yaml
- OS::TripleO::CephStorage::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::ControllerPostDeployment: puppet/controller-post.yaml
- # set to controller-config-pacemaker.yaml to enable pacemaker
- OS::TripleO::ControllerConfig: puppet/controller-config.yaml
- OS::TripleO::ComputePostDeployment: puppet/compute-post.yaml
- OS::TripleO::ObjectStoragePostDeployment: puppet/swift-storage-post.yaml
- OS::TripleO::BlockStoragePostDeployment: puppet/cinder-storage-post.yaml
- OS::TripleO::CephStoragePostDeployment: puppet/ceph-storage-post.yaml
- OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig: puppet/swift-devices-and-proxy-config.yaml
+ OS::TripleO::PostDeploySteps: puppet/post.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
+ OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
# Tasks (for internal TripleO usage)
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
- OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None
- OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None
+{% for role in roles %}
+ OS::TripleO::{{role.name}}: puppet/{{role.name.lower()}}-role.yaml
+ OS::TripleO::{{role.name}}Config: puppet/{{role.name.lower()}}-config.yaml
+ OS::TripleO::Tasks::{{role.name}}PreConfig: OS::Heat::None
+ OS::TripleO::Tasks::{{role.name}}PostConfig: OS::Heat::None
+ OS::TripleO::{{role.name}}ExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
+ # Port assignments for the {{role.name}} role
+ # Note we have to special-case ObjectStorage for backwards compatibility
+ {% if role.name != 'ObjectStorage' %}
+ OS::TripleO::{{role.name}}::Ports::ExternalPort: network/ports/noop.yaml
+ OS::TripleO::{{role.name}}::Ports::InternalApiPort: network/ports/noop.yaml
+ OS::TripleO::{{role.name}}::Ports::StoragePort: network/ports/noop.yaml
+ OS::TripleO::{{role.name}}::Ports::StorageMgmtPort: network/ports/noop.yaml
+ OS::TripleO::{{role.name}}::Ports::TenantPort: network/ports/noop.yaml
+ OS::TripleO::{{role.name}}::Ports::ManagementPort: network/ports/noop.yaml
+ {% else %}
+ OS::TripleO::SwiftStorage::Ports::ExternalPort: network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::TenantPort: network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::ManagementPort: network/ports/noop.yaml
+ {% endif %}
+ OS::TripleO::{{role.name}}::Net::SoftwareConfig: net-config-noop.yaml
+{% endfor %}
+
+ # This resource registry entry will override the one generated by default
+ # in the jinja loop
+ OS::TripleO::Controller::Net::SoftwareConfig: net-config-bridge.yaml
OS::TripleO::Server: OS::Nova::Server
@@ -42,9 +54,6 @@ resource_registry:
OS::TripleO::NodeUserData: firstboot/userdata_default.yaml
OS::TripleO::NodeTLSCAData: OS::Heat::None
OS::TripleO::NodeTLSData: OS::Heat::None
- OS::TripleO::ControllerExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
- OS::TripleO::ComputeExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
- OS::TripleO::CephStorageExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
@@ -57,7 +66,6 @@ resource_registry:
# TripleO overcloud networks
OS::TripleO::Network: network/networks.yaml
- OS::TripleO::VipConfig: puppet/vip-config.yaml
OS::TripleO::Network::External: OS::Heat::None
OS::TripleO::Network::InternalApi: OS::Heat::None
@@ -79,46 +87,6 @@ resource_registry:
OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml
OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
- # Port assignments for the controller role
- OS::TripleO::Controller::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::TenantPort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::ManagementPort: network/ports/noop.yaml
-
- # Port assignments for the compute role
- OS::TripleO::Compute::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::Compute::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::Compute::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::Compute::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::Compute::Ports::TenantPort: network/ports/noop.yaml
- OS::TripleO::Compute::Ports::ManagementPort: network/ports/noop.yaml
-
- # Port assignments for the ceph storage role
- OS::TripleO::CephStorage::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::TenantPort: network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::ManagementPort: network/ports/noop.yaml
-
- # Port assignments for the swift storage role
- OS::TripleO::SwiftStorage::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::TenantPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::ManagementPort: network/ports/noop.yaml
-
- # Port assignments for the block storage role
- OS::TripleO::BlockStorage::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::TenantPort: network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::ManagementPort: network/ports/noop.yaml
-
# Service to network Mappings
OS::TripleO::ServiceNetMap: network/service_net_map.yaml
@@ -130,8 +98,11 @@ resource_registry:
# services
OS::TripleO::Services: puppet/services/services.yaml
+ OS::TripleO::Services::Apache: puppet/services/apache.yaml
+ OS::TripleO::Services::ApacheTLS: OS::Heat::None
OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
OS::TripleO::Services::CephMon: OS::Heat::None
+ OS::TripleO::Services::CephRgw: OS::Heat::None
OS::TripleO::Services::CephOSD: OS::Heat::None
OS::TripleO::Services::CephClient: OS::Heat::None
OS::TripleO::Services::CephExternal: OS::Heat::None
@@ -139,6 +110,7 @@ resource_registry:
OS::TripleO::Services::CinderBackup: OS::Heat::None
OS::TripleO::Services::CinderScheduler: puppet/services/cinder-scheduler.yaml
OS::TripleO::Services::CinderVolume: puppet/services/cinder-volume.yaml
+ OS::TripleO::Services::BlockStorageCinderVolume: puppet/services/cinder-volume.yaml
OS::TripleO::Services::Core: OS::Heat::None
OS::TripleO::Services::Keystone: puppet/services/keystone.yaml
OS::TripleO::Services::GlanceApi: puppet/services/glance-api.yaml
@@ -161,17 +133,19 @@ resource_registry:
# things where VMs run
OS::TripleO::Services::ComputeNeutronCorePlugin: puppet/services/neutron-plugin-ml2.yaml
# Neutron Core Plugin Vendors (these typically override NeutronCorePlugin)
+ OS::TripleO::Services::NeutronCorePluginML2OVN: puppet/services/neutron-plugin-ml2-ovn.yaml
OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml
OS::TripleO::Services::NeutronCorePluginNuage: puppet/services/neutron-plugin-nuage.yaml
OS::TripleO::Services::NeutronCorePluginOpencontrail: puppet/services/neutron-plugin-opencontrail.yaml
OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml
OS::TripleO::Services::NeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
- # ComputeNeutronOvsAgent can be overriden to puppet/services/neutron-ovs-dpdk-agent.yaml also to enable DPDK
OS::TripleO::Services::ComputeNeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
OS::TripleO::Services::Pacemaker: OS::Heat::None
OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None
OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml
+ OS::TripleO::Services::HAProxyPublicTLS: OS::Heat::None
+ OS::TripleO::Services::HAProxyInternalTLS: OS::Heat::None
OS::TripleO::Services::Keepalived: puppet/services/keepalived.yaml
OS::TripleO::Services::Memcached: puppet/services/memcached.yaml
OS::TripleO::Services::SaharaApi: OS::Heat::None
@@ -180,9 +154,10 @@ resource_registry:
OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml
OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml
OS::TripleO::Services::NovaApi: puppet/services/nova-api.yaml
+ OS::TripleO::Services::NovaMetadata: puppet/services/nova-metadata.yaml
OS::TripleO::Services::NovaScheduler: puppet/services/nova-scheduler.yaml
OS::TripleO::Services::NovaConsoleauth: puppet/services/nova-consoleauth.yaml
- OS::TripleO::Services::NovaVncproxy: puppet/services/nova-vncproxy.yaml
+ OS::TripleO::Services::NovaVncProxy: puppet/services/nova-vnc-proxy.yaml
OS::TripleO::Services::NovaCompute: puppet/services/nova-compute.yaml
OS::TripleO::Services::NovaLibvirt: puppet/services/nova-libvirt.yaml
OS::TripleO::Services::Ntp: puppet/services/time/ntp.yaml
@@ -202,10 +177,16 @@ resource_registry:
OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml
OS::TripleO::Services::GnocchiMetricd: puppet/services/gnocchi-metricd.yaml
OS::TripleO::Services::GnocchiStatsd: puppet/services/gnocchi-statsd.yaml
+ OS::TripleO::Services::VipHosts: puppet/services/vip-hosts.yaml
# Services that are disabled by default (use relevant environment files):
+ OS::TripleO::Services::FluentdClient: OS::Heat::None
+ OS::TripleO::LoggingConfiguration: puppet/services/logging/fluentd-config.yaml
OS::Tripleo::Services::ManilaApi: OS::Heat::None
OS::Tripleo::Services::ManilaScheduler: OS::Heat::None
OS::Tripleo::Services::ManilaShare: OS::Heat::None
+ OS::Tripleo::Services::ManilaBackendGeneric: OS::Heat::None
+ OS::Tripleo::Services::ManilaBackendNetapp: OS::Heat::None
+ OS::Tripleo::Services::ManilaBackendCephFs: OS::Heat::None
OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronMetadataAgent: OS::Heat::None
OS::TripleO::Services::AodhApi: puppet/services/aodh-api.yaml
@@ -220,8 +201,14 @@ resource_registry:
OS::TripleO::Services::NovaIronic: OS::Heat::None
OS::TripleO::Services::TripleoPackages: puppet/services/tripleo-packages.yaml
OS::TripleO::Services::TripleoFirewall: puppet/services/tripleo-firewall.yaml
- OS::TripleO::Services::OpenDaylight: OS::Heat::None
+ OS::TripleO::Services::OpenDaylightApi: OS::Heat::None
OS::TripleO::Services::OpenDaylightOvs: OS::Heat::None
+ OS::TripleO::Services::SensuClient: OS::Heat::None
+ OS::TripleO::Services::ContrailAnalytics: puppet/services/network/contrail-analytics.yaml
+ OS::TripleO::Services::ContrailConfig: puppet/services/network/contrail-config.yaml
+ OS::TripleO::Services::ContrailControl: puppet/services/network/contrail-control.yaml
+ OS::TripleO::Services::ContrailDatabase: puppet/services/network/contrail-database.yaml
+ OS::TripleO::Services::ContrailWebui: puppet/services/network/contrail-webui.yaml
parameter_defaults:
EnablePackageInstall: false
diff --git a/overcloud-without-mergepy.yaml b/overcloud-without-mergepy.yaml
deleted file mode 120000
index 23dc6464..00000000
--- a/overcloud-without-mergepy.yaml
+++ /dev/null
@@ -1 +0,0 @@
-overcloud.yaml \ No newline at end of file
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
new file mode 100644
index 00000000..2e6412db
--- /dev/null
+++ b/overcloud.j2.yaml
@@ -0,0 +1,653 @@
+heat_template_version: 2016-10-14
+
+description: >
+ Deploy an OpenStack environment, consisting of several node types (roles),
+ Controller, Compute, BlockStorage, SwiftStorage and CephStorage. The Storage
+ roles enable independent scaling of the storage components, but the minimal
+ deployment is one Controller and one Compute node.
+
+
+# TODO(shadower): we should probably use the parameter groups to put
+# some order in here.
+parameters:
+
+ # Common parameters (not specific to a role)
+ CloudName:
+ default: overcloud.localdomain
+ description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
+ type: string
+ CloudNameInternal:
+ default: overcloud.internalapi.localdomain
+ description: >
+ The DNS name of this cloud's internal API endpoint. E.g.
+ 'ci-overcloud.internalapi.tripleo.org'.
+ type: string
+ CloudNameStorage:
+ default: overcloud.storage.localdomain
+ description: >
+ The DNS name of this cloud's storage endpoint. E.g.
+ 'ci-overcloud.storage.tripleo.org'.
+ type: string
+ CloudNameStorageManagement:
+ default: overcloud.storagemgmt.localdomain
+ description: >
+ The DNS name of this cloud's storage management endpoint. E.g.
+ 'ci-overcloud.storagemgmt.tripleo.org'.
+ type: string
+ CloudNameCtlplane:
+ default: overcloud.ctlplane.localdomain
+ description: >
+ The DNS name of this cloud's storage management endpoint. E.g.
+ 'ci-overcloud.management.tripleo.org'.
+ type: string
+ ControlFixedIPs:
+ default: []
+ description: Should be used for arbitrary ips.
+ type: json
+ InternalApiVirtualFixedIPs:
+ default: []
+ description: >
+ Control the IP allocation for the InternalApiVirtualInterface port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ type: json
+ NeutronControlPlaneID:
+ default: 'ctlplane'
+ type: string
+ description: Neutron ID or name for ctlplane network.
+ NeutronPublicInterface:
+ default: nic1
+ description: What interface to bridge onto br-ex for network nodes.
+ type: string
+ PublicVirtualFixedIPs:
+ default: []
+ description: >
+ Control the IP allocation for the PublicVirtualInterface port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ type: json
+ RabbitCookieSalt:
+ type: string
+ default: unset
+ description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
+ StorageVirtualFixedIPs:
+ default: []
+ description: >
+ Control the IP allocation for the StorageVirtualInterface port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ type: json
+ StorageMgmtVirtualFixedIPs:
+ default: []
+ description: >
+ Control the IP allocation for the StorageMgmgVirtualInterface port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ type: json
+ RedisVirtualFixedIPs:
+ default: []
+ description: >
+ Control the IP allocation for the virtual IP used by Redis. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ type: json
+ CloudDomain:
+ default: 'localdomain'
+ type: string
+ description: >
+ The DNS domain used for the hosts. This should match the dhcp_domain
+ configured in the Undercloud neutron. Defaults to localdomain.
+ ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API.
+ type: json
+
+# Compute-specific params
+# FIXME(shardy) handle these deprecated names as they don't match compute.yaml
+ HypervisorNeutronPhysicalBridge:
+ default: 'br-ex'
+ description: >
+ An OVS bridge to create on each hypervisor. This defaults to br-ex the
+ same as the control plane nodes, as we have a uniform configuration of
+ the openvswitch agent. Typically should not need to be changed.
+ type: string
+ HypervisorNeutronPublicInterface:
+ default: nic1
+ description: What interface to add to the HypervisorNeutronPhysicalBridge.
+ type: string
+
+ # Jinja loop for Role in role_data.yaml
+{% for role in roles %}
+ # Parameters generated for {{role.name}} Role
+ {{role.name}}Services:
+ description: A list of service resources (configured in the Heat
+ resource_registry) which represent nested stacks
+ for each service that should get installed on the {{role.name}} role.
+ type: comma_delimited_list
+ default: {{role.ServicesDefault|default([])}}
+
+ {{role.name}}Count:
+ description: Number of {{role.name}} nodes to deploy
+ type: number
+ default: {{role.CountDefault|default(0)}}
+
+ {{role.name}}HostnameFormat:
+ type: string
+ description: >
+ Format for {{role.name}} node hostnames
+ Note %index% is translated into the index of the node, e.g 0/1/2 etc
+ and %stackname% is replaced with the stack name e.g overcloud
+ {% if role.HostnameFormatDefault %}
+ default: "{{role.HostnameFormatDefault}}"
+ {% else %}
+ default: "%stackname%-{{role.name.lower()}}-%index%"
+ {% endif %}
+
+ {{role.name}}RemovalPolicies:
+ default: []
+ type: json
+ description: >
+ List of resources to be removed from {{role.name}} ResourceGroup when
+ doing an update which requires removal of specific resources.
+ Example format ComputeRemovalPolicies: [{'resource_list': ['0']}]
+
+{% if role.name != 'Compute' %}
+ {{role.name}}SchedulerHints:
+{% else %}
+ NovaComputeSchedulerHints:
+{% endif %}
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
+{% endfor %}
+
+ # Identifiers to trigger tasks on nodes
+ UpdateIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting to a previously unused value during stack-update will trigger
+ package update on all nodes
+ DeployIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+
+resources:
+
+ HeatAuthEncryptionKey:
+ type: OS::Heat::RandomString
+
+ PcsdPassword:
+ type: OS::Heat::RandomString
+ properties:
+ length: 16
+
+ HorizonSecret:
+ type: OS::Heat::RandomString
+ properties:
+ length: 10
+
+ ServiceNetMap:
+ type: OS::TripleO::ServiceNetMap
+
+ EndpointMap:
+ type: OS::TripleO::EndpointMap
+ properties:
+ CloudEndpoints:
+ external: {get_param: CloudName}
+ internal_api: {get_param: CloudNameInternal}
+ storage: {get_param: CloudNameStorage}
+ storage_mgmt: {get_param: CloudNameStorageManagement}
+ ctlplane: {get_param: CloudNameCtlplane}
+ NetIpMap: {get_attr: [VipMap, net_ip_map]}
+ ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
+
+ # Jinja loop for Role in roles_data.yaml
+{% for role in roles %}
+ # Resources generated for {{role.name}} Role
+ {{role.name}}ServiceChain:
+ type: OS::TripleO::Services
+ properties:
+ Services:
+ get_param: {{role.name}}Services
+ ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
+ EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
+
+ {{role.name}}HostsDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ name: {{role.name}}HostsDeployment
+ config: {get_attr: [hostsConfig, config_id]}
+ servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+
+ {{role.name}}AllNodesDeployment:
+ type: OS::Heat::StructuredDeployments
+ depends_on:
+{% for role_inner in roles %}
+ - {{role_inner.name}}HostsDeployment
+{% endfor %}
+ properties:
+ name: {{role.name}}AllNodesDeployment
+ config: {get_attr: [allNodesConfig, config_id]}
+ servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ input_values:
+ bootstrap_nodeid: {get_attr: [{{role.name}}, resource.0.hostname]}
+ bootstrap_nodeid_ip: {get_attr: [{{role.name}}, resource.0.ip_address]}
+
+ {{role.name}}AllNodesValidationDeployment:
+ type: OS::Heat::StructuredDeployments
+ depends_on: {{role.name}}AllNodesDeployment
+ properties:
+ name: {{role.name}}AllNodesValidationDeployment
+ config: {get_resource: AllNodesValidationConfig}
+ servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+
+ {{role.name}}IpListMap:
+ type: OS::TripleO::Network::Ports::NetIpListMap
+ properties:
+ ControlPlaneIpList: {get_attr: [{{role.name}}, ip_address]}
+ ExternalIpList: {get_attr: [{{role.name}}, external_ip_address]}
+ InternalApiIpList: {get_attr: [{{role.name}}, internal_api_ip_address]}
+ StorageIpList: {get_attr: [{{role.name}}, storage_ip_address]}
+ StorageMgmtIpList: {get_attr: [{{role.name}}, storage_mgmt_ip_address]}
+ TenantIpList: {get_attr: [{{role.name}}, tenant_ip_address]}
+ ManagementIpList: {get_attr: [{{role.name}}, management_ip_address]}
+ EnabledServices: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
+ ServiceHostnameList: {get_attr: [{{role.name}}, hostname]}
+ NetworkHostnameMap:
+ # Note (shardy) this somewhat complex yaql may be replaced
+ # with a map_deep_merge function in ocata. It merges the
+ # list of maps, but appends to colliding lists so we can
+ # create a map of lists for all nodes for each network
+ yaql:
+ expression: dict($.data.where($ != null).flatten().selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+ data:
+ - {get_attr: [{{role.name}}, hostname_map]}
+
+ {{role.name}}:
+ type: OS::Heat::ResourceGroup
+ depends_on: Networks
+ properties:
+ count: {get_param: {{role.name}}Count}
+ removal_policies: {get_param: {{role.name}}RemovalPolicies}
+ resource_def:
+ type: OS::TripleO::{{role.name}}
+ properties:
+ CloudDomain: {get_param: CloudDomain}
+ ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
+ EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ Hostname:
+ str_replace:
+ template: {get_param: {{role.name}}HostnameFormat}
+ params:
+ '%stackname%': {get_param: 'OS::stack_name'}
+ NodeIndex: '%index%'
+ {% if role.name != 'Compute' %}
+ {{role.name}}SchedulerHints: {get_param: {{role.name}}SchedulerHints}
+ {% else %}
+ NovaComputeSchedulerHints: {get_param: NovaComputeSchedulerHints}
+ {% endif %}
+ ServiceConfigSettings:
+ map_merge:
+ - get_attr: [{{role.name}}ServiceChain, role_data, config_settings]
+ {% for r in roles %}
+ - get_attr: [{{r.name}}ServiceChain, role_data, global_config_settings]
+ {% endfor %}
+ # This next step combines two yaql passes:
+ # - The inner one does a deep merge on the service_config_settings for all roles
+ # - The outer one filters the map based on the services enabled for the role
+ # then merges the result into one map.
+ - yaql:
+ expression: let(root => $) -> $.data.map.items().where($[0] in $root.data.services).select($[1]).reduce($1.mergeWith($2), {})
+ data:
+ map:
+ yaql:
+ expression: $.data.where($ != null).reduce($1.mergeWith($2), {})
+ data:
+ {% for r in roles %}
+ - get_attr: [{{r.name}}ServiceChain, role_data, service_config_settings]
+ {% endfor %}
+ services: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ ServiceNames: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]}
+{% endfor %}
+
+ hostsConfig:
+ type: OS::TripleO::Hosts::SoftwareConfig
+ properties:
+ hosts:
+{% for role in roles %}
+ - list_join:
+ - '\n'
+ - {get_attr: [{{role.name}}, hosts_entry]}
+{% endfor %}
+
+ allNodesConfig:
+ type: OS::TripleO::AllNodes::SoftwareConfig
+ properties:
+ cloud_name_external: {get_param: CloudName}
+ cloud_name_internal_api: {get_param: CloudNameInternal}
+ cloud_name_storage: {get_param: CloudNameStorage}
+ cloud_name_storage_mgmt: {get_param: CloudNameStorageManagement}
+ cloud_name_ctlplane: {get_param: CloudNameCtlplane}
+ enabled_services:
+ list_join:
+ - ','
+{% for role in roles %}
+ - {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+{% endfor %}
+ logging_groups:
+ yaql:
+ expression: >
+ $.data.groups.flatten()
+ data:
+ groups:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}ServiceChain, role_data, logging_groups]}
+{% endfor %}
+ logging_sources:
+ yaql:
+ expression: >
+ $.data.sources.flatten()
+ data:
+ sources:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}ServiceChain, role_data, logging_sources]}
+{% endfor %}
+ controller_ips: {get_attr: [Controller, ip_address]}
+ controller_names: {get_attr: [Controller, hostname]}
+ service_ips:
+ # Note (shardy) this somewhat complex yaql may be replaced
+ # with a map_deep_merge function in ocata. It merges the
+ # list of maps, but appends to colliding lists when a service
+ # is deployed on more than one role
+ yaql:
+ expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+ data:
+ l:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, service_ips]}
+{% endfor %}
+ service_node_names:
+ yaql:
+ expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+ data:
+ l:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, service_hostnames]}
+{% endfor %}
+ short_service_node_names:
+ yaql:
+ expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+ data:
+ l:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, short_service_hostnames]}
+{% endfor %}
+ # FIXME(shardy): These require further work to move into service_ips
+ memcache_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
+ NetVipMap: {get_attr: [VipMap, net_ip_map]}
+ RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
+ ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
+ DeployIdentifier: {get_param: DeployIdentifier}
+ UpdateIdentifier: {get_param: UpdateIdentifier}
+
+ MysqlRootPassword:
+ type: OS::Heat::RandomString
+ properties:
+ length: 10
+
+ RabbitCookie:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20
+ salt: {get_param: RabbitCookieSalt}
+
+ DefaultPasswords:
+ type: OS::TripleO::DefaultPasswords
+ properties:
+ DefaultMysqlRootPassword: {get_attr: [MysqlRootPassword, value]}
+ DefaultRabbitCookie: {get_attr: [RabbitCookie, value]}
+ DefaultHeatAuthEncryptionKey: {get_attr: [HeatAuthEncryptionKey, value]}
+ DefaultPcsdPassword: {get_attr: [PcsdPassword, value]}
+ DefaultHorizonSecret: {get_attr: [HorizonSecret, value]}
+
+ # creates the network architecture
+ Networks:
+ type: OS::TripleO::Network
+
+ ControlVirtualIP:
+ type: OS::Neutron::Port
+ depends_on: Networks
+ properties:
+ name: control_virtual_ip
+ network: {get_param: NeutronControlPlaneID}
+ fixed_ips: {get_param: ControlFixedIPs}
+ replacement_policy: AUTO
+
+ RedisVirtualIP:
+ depends_on: Networks
+ type: OS::TripleO::Network::Ports::RedisVipPort
+ properties:
+ ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
+ PortName: redis_virtual_ip
+ NetworkName: {get_attr: [ServiceNetMap, service_net_map, RedisNetwork]}
+ ServiceName: redis
+ FixedIPs: {get_param: RedisVirtualFixedIPs}
+
+ # The public VIP is on the External net, falls back to ctlplane
+ PublicVirtualIP:
+ depends_on: Networks
+ type: OS::TripleO::Network::Ports::ExternalVipPort
+ properties:
+ ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
+ PortName: public_virtual_ip
+ FixedIPs: {get_param: PublicVirtualFixedIPs}
+
+ InternalApiVirtualIP:
+ depends_on: Networks
+ type: OS::TripleO::Network::Ports::InternalApiVipPort
+ properties:
+ ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ PortName: internal_api_virtual_ip
+ FixedIPs: {get_param: InternalApiVirtualFixedIPs}
+
+ StorageVirtualIP:
+ depends_on: Networks
+ type: OS::TripleO::Network::Ports::StorageVipPort
+ properties:
+ ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ PortName: storage_virtual_ip
+ FixedIPs: {get_param: StorageVirtualFixedIPs}
+
+ StorageMgmtVirtualIP:
+ depends_on: Networks
+ type: OS::TripleO::Network::Ports::StorageMgmtVipPort
+ properties:
+ ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ PortName: storage_management_virtual_ip
+ FixedIPs: {get_param: StorageMgmtVirtualFixedIPs}
+
+ VipMap:
+ type: OS::TripleO::Network::Ports::NetVipMap
+ properties:
+ ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ ExternalIp: {get_attr: [PublicVirtualIP, ip_address]}
+ ExternalIpUri: {get_attr: [PublicVirtualIP, ip_address_uri]}
+ InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]}
+ InternalApiIpUri: {get_attr: [InternalApiVirtualIP, ip_address_uri]}
+ StorageIp: {get_attr: [StorageVirtualIP, ip_address]}
+ StorageIpUri: {get_attr: [StorageVirtualIP, ip_address_uri]}
+ StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]}
+ StorageMgmtIpUri: {get_attr: [StorageMgmtVirtualIP, ip_address_uri]}
+ # No tenant or management VIP required
+
+ # All Nodes Validations
+ AllNodesValidationConfig:
+ type: OS::TripleO::AllNodes::Validation
+ properties:
+ PingTestIps:
+ list_join:
+ - ' '
+ - - {get_attr: [Controller, resource.0.external_ip_address]}
+ - {get_attr: [Controller, resource.0.internal_api_ip_address]}
+ - {get_attr: [Controller, resource.0.storage_ip_address]}
+ - {get_attr: [Controller, resource.0.storage_mgmt_ip_address]}
+ - {get_attr: [Controller, resource.0.tenant_ip_address]}
+ - {get_attr: [Controller, resource.0.management_ip_address]}
+
+ UpdateWorkflow:
+ type: OS::TripleO::Tasks::UpdateWorkflow
+ depends_on:
+{% for role in roles %}
+ - {{role.name}}AllNodesDeployment
+{% endfor %}
+ properties:
+ servers:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
+ input_values:
+ deploy_identifier: {get_param: DeployIdentifier}
+ update_identifier: {get_param: UpdateIdentifier}
+
+ # Optional ExtraConfig for all nodes - all roles are passed in here, but
+ # the nested template may configure each role differently (or not at all)
+ AllNodesExtraConfig:
+ type: OS::TripleO::AllNodesExtraConfig
+ depends_on:
+ - UpdateWorkflow
+{% for role in roles %}
+ - {{role.name}}AllNodesValidationDeployment
+{% endfor %}
+ properties:
+{% for role in roles %}
+ servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
+
+ # Post deployment steps for all roles
+ AllNodesDeploySteps:
+ type: OS::TripleO::PostDeploySteps
+{% for role in roles %}
+ depends_on:
+ - {{role.name}}AllNodesDeployment
+{% endfor %}
+ properties:
+ servers:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
+ role_data:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+{% endfor %}
+
+outputs:
+ ManagedEndpoints:
+ description: Asserts that the keystone endpoints have been provisioned.
+ value: true
+ KeystoneURL:
+ description: URL for the Overcloud Keystone service
+ value: {get_attr: [EndpointMap, endpoint_map, KeystonePublic, uri]}
+ KeystoneAdminVip:
+ description: Keystone Admin VIP endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
+ PublicVip:
+ description: Controller VIP for public API endpoints
+ value: {get_attr: [VipMap, net_ip_map, external]}
+ AodhInternalVip:
+ description: VIP for Aodh API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, AodhApiNetwork]}]}
+ CeilometerInternalVip:
+ description: VIP for Ceilometer API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CeilometerApiNetwork]}]}
+ CephRgwInternalVip:
+ description: VIP for Ceph RGW internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CephRgwNetwork]}]}
+ CinderInternalVip:
+ description: VIP for Cinder API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CinderApiNetwork]}]}
+ GlanceInternalVip:
+ description: VIP for Glance API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GlanceApiNetwork]}]}
+ GnocchiInternalVip:
+ description: VIP for Gnocchi API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GnocchiApiNetwork]}]}
+ HeatInternalVip:
+ description: VIP for Heat API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, HeatApiNetwork]}]}
+ IronicInternalVip:
+ description: VIP for Ironic API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, IronicApiNetwork]}]}
+ KeystoneInternalVip:
+ description: VIP for Keystone API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]}
+ ManilaInternalVip:
+ description: VIP for Manila API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, ManilaApiNetwork]}]}
+ NeutronInternalVip:
+ description: VIP for Neutron API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NeutronApiNetwork]}]}
+ NovaInternalVip:
+ description: VIP for Nova API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NovaApiNetwork]}]}
+ OpenDaylightInternalVip:
+ description: VIP for OpenDaylight API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, OpenDaylightApiNetwork]}]}
+ SaharaInternalVip:
+ description: VIP for Sahara API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SaharaApiNetwork]}]}
+ SwiftInternalVip:
+ description: VIP for Swift Proxy internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SwiftProxyNetwork]}]}
+ EndpointMap:
+ description: |
+ Mapping of the resources with the needed info for their endpoints.
+ This includes the protocol used, the IP, port and also a full
+ representation of the URI.
+ value: {get_attr: [EndpointMap, endpoint_map]}
+ HostsEntry:
+ description: |
+ The content that should be appended to your /etc/hosts if you want to get
+ hostname-based access to the deployed nodes (useful for testing without
+ setting up a DNS).
+ value:
+ list_join:
+ - "\n"
+ - - {get_attr: [hostsConfig, hosts_entries]}
+ -
+ - str_replace:
+ template: IP HOST
+ params:
+ IP: {get_attr: [VipMap, net_ip_map, external]}
+ HOST: {get_param: CloudName}
+ - str_replace:
+ template: IP HOST
+ params:
+ IP: {get_attr: [VipMap, net_ip_map, ctlplane]}
+ HOST: {get_param: CloudNameCtlplane}
+ - str_replace:
+ template: IP HOST
+ params:
+ IP: {get_attr: [VipMap, net_ip_map, internal_api]}
+ HOST: {get_param: CloudNameInternal}
+ - str_replace:
+ template: IP HOST
+ params:
+ IP: {get_attr: [VipMap, net_ip_map, storage]}
+ HOST: {get_param: CloudNameStorage}
+ - str_replace:
+ template: IP HOST
+ params:
+ IP: {get_attr: [VipMap, net_ip_map, storage_mgmt]}
+ HOST: {get_param: CloudNameStorageManagement}
+ EnabledServices:
+ description: The services enabled on each role
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+{% endfor %}
diff --git a/overcloud.yaml b/overcloud.yaml
deleted file mode 100644
index 38a36800..00000000
--- a/overcloud.yaml
+++ /dev/null
@@ -1,1029 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Deploy an OpenStack environment, consisting of several node types (roles),
- Controller, Compute, BlockStorage, SwiftStorage and CephStorage. The Storage
- roles enable independent scaling of the storage components, but the minimal
- deployment is one Controller and one Compute node.
-
-
-# TODO(shadower): we should probably use the parameter groups to put
-# some order in here.
-parameters:
-
- # Common parameters (not specific to a role)
- CloudName:
- default: overcloud
- description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
- type: string
- CloudNameInternal:
- default: overcloud.internalapi.localdomain
- description: >
- The DNS name of this cloud's internal API endpoint. E.g.
- 'ci-overcloud.internalapi.tripleo.org'.
- type: string
- CloudNameStorage:
- default: overcloud.storage.localdomain
- description: >
- The DNS name of this cloud's storage endpoint. E.g.
- 'ci-overcloud.storage.tripleo.org'.
- type: string
- CloudNameStorageManagement:
- default: overcloud.storagemgmt.localdomain
- description: >
- The DNS name of this cloud's storage management endpoint. E.g.
- 'ci-overcloud.storagemgmt.tripleo.org'.
- type: string
- CloudNameManagement:
- default: overcloud.management.localdomain
- description: >
- The DNS name of this cloud's storage management endpoint. E.g.
- 'ci-overcloud.management.tripleo.org'.
- type: string
- ControlFixedIPs:
- default: []
- description: Should be used for arbitrary ips.
- type: json
- InternalApiVirtualFixedIPs:
- default: []
- description: >
- Control the IP allocation for the InternalApiVirtualInterface port. E.g.
- [{'ip_address':'1.2.3.4'}]
- type: json
- NeutronControlPlaneID:
- default: 'ctlplane'
- type: string
- description: Neutron ID or name for ctlplane network.
- NeutronPublicInterface:
- default: nic1
- description: What interface to bridge onto br-ex for network nodes.
- type: string
- PublicVirtualFixedIPs:
- default: []
- description: >
- Control the IP allocation for the PublicVirtualInterface port. E.g.
- [{'ip_address':'1.2.3.4'}]
- type: json
- RabbitCookieSalt:
- type: string
- default: unset
- description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
- StorageVirtualFixedIPs:
- default: []
- description: >
- Control the IP allocation for the StorageVirtualInterface port. E.g.
- [{'ip_address':'1.2.3.4'}]
- type: json
- StorageMgmtVirtualFixedIPs:
- default: []
- description: >
- Control the IP allocation for the StorageMgmgVirtualInterface port. E.g.
- [{'ip_address':'1.2.3.4'}]
- type: json
- RedisVirtualFixedIPs:
- default: []
- description: >
- Control the IP allocation for the virtual IP used by Redis. E.g.
- [{'ip_address':'1.2.3.4'}]
- type: json
- CloudDomain:
- default: 'localdomain'
- type: string
- description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
- ServerMetadata:
- default: {}
- description: >
- Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
- type: json
-
- # Controller-specific params
- ControllerCount:
- type: number
- default: 1
- controllerExtraConfig:
- default: {}
- description: |
- Deprecated. Use ControllerExtraConfig via parameter_defaults instead.
- type: json
- ExtraConfig:
- default: {}
- description: |
- Additional configuration to inject into the cluster. The format required
- may be implementation specific, e.g puppet hieradata. Any role specific
- ExtraConfig, e.g controllerExtraConfig takes precedence over ExtraConfig.
- type: json
-
-# Compute-specific params
- ComputeCount:
- type: number
- default: 1
- HypervisorNeutronPhysicalBridge:
- default: 'br-ex'
- description: >
- An OVS bridge to create on each hypervisor. This defaults to br-ex the
- same as the control plane nodes, as we have a uniform configuration of
- the openvswitch agent. Typically should not need to be changed.
- type: string
- HypervisorNeutronPublicInterface:
- default: nic1
- description: What interface to add to the HypervisorNeutronPhysicalBridge.
- type: string
-
- ControllerServices:
- default:
- - OS::TripleO::Services::CACerts
- - OS::TripleO::Services::CephMon
- - OS::TripleO::Services::CephExternal
- - OS::TripleO::Services::CinderApi
- - OS::TripleO::Services::CinderBackup
- - OS::TripleO::Services::CinderScheduler
- - OS::TripleO::Services::CinderVolume
- - OS::TripleO::Services::Core
- - OS::TripleO::Services::Kernel
- - OS::TripleO::Services::Keystone
- - OS::TripleO::Services::GlanceApi
- - OS::TripleO::Services::GlanceRegistry
- - OS::TripleO::Services::HeatApi
- - OS::TripleO::Services::HeatApiCfn
- - OS::TripleO::Services::HeatApiCloudwatch
- - OS::TripleO::Services::HeatEngine
- - OS::TripleO::Services::MySQL
- - OS::TripleO::Services::NeutronDhcpAgent
- - OS::TripleO::Services::NeutronL3Agent
- - OS::TripleO::Services::NeutronMetadataAgent
- - OS::TripleO::Services::NeutronApi
- - OS::TripleO::Services::NeutronCorePlugin
- - OS::TripleO::Services::NeutronOvsAgent
- - OS::TripleO::Services::RabbitMQ
- - OS::TripleO::Services::HAproxy
- - OS::TripleO::Services::Keepalived
- - OS::TripleO::Services::Memcached
- - OS::TripleO::Services::Pacemaker
- - OS::TripleO::Services::Redis
- - OS::TripleO::Services::NovaConductor
- - OS::TripleO::Services::MongoDb
- - OS::TripleO::Services::NovaApi
- - OS::TripleO::Services::NovaScheduler
- - OS::TripleO::Services::NovaConsoleauth
- - OS::TripleO::Services::NovaVncproxy
- - OS::TripleO::Services::Ntp
- - OS::TripleO::Services::SwiftProxy
- - OS::TripleO::Services::SwiftStorage
- - OS::TripleO::Services::SwiftRingBuilder
- - OS::TripleO::Services::Snmp
- - OS::TripleO::Services::Timezone
- - OS::TripleO::Services::CeilometerApi
- - OS::TripleO::Services::CeilometerCollector
- - OS::TripleO::Services::CeilometerExpirer
- - OS::TripleO::Services::CeilometerAgentCentral
- - OS::TripleO::Services::CeilometerAgentNotification
- - OS::TripleO::Services::Horizon
- - OS::TripleO::Services::GnocchiApi
- - OS::TripleO::Services::GnocchiMetricd
- - OS::TripleO::Services::GnocchiStatsd
- - OS::Tripleo::Services::ManilaApi
- - OS::Tripleo::Services::ManilaScheduler
- - OS::Tripleo::Services::ManilaShare
- - OS::TripleO::Services::AodhApi
- - OS::TripleO::Services::AodhEvaluator
- - OS::TripleO::Services::AodhNotifier
- - OS::TripleO::Services::AodhListener
- - OS::TripleO::Services::SaharaApi
- - OS::TripleO::Services::SaharaEngine
- - OS::TripleO::Services::IronicApi
- - OS::TripleO::Services::IronicConductor
- - OS::TripleO::Services::NovaIronic
- - OS::TripleO::Services::TripleoPackages
- - OS::TripleO::Services::TripleoFirewall
- - OS::TripleO::Services::OpenDaylight
- description: A list of service resources (configured in the Heat
- resource_registry) which represent nested stacks
- for each service that should get installed on the Controllers.
- type: comma_delimited_list
-
- ComputeServices:
- default:
- - OS::TripleO::Services::CACerts
- - OS::TripleO::Services::CephClient
- - OS::TripleO::Services::CephExternal
- - OS::TripleO::Services::Timezone
- - OS::TripleO::Services::Ntp
- - OS::TripleO::Services::Snmp
- - OS::TripleO::Services::NovaCompute
- - OS::TripleO::Services::NovaLibvirt
- - OS::TripleO::Services::Kernel
- - OS::TripleO::Services::ComputeNeutronCorePlugin
- - OS::TripleO::Services::ComputeNeutronOvsAgent
- - OS::TripleO::Services::ComputeCeilometerAgent
- - OS::TripleO::Services::ComputeNeutronL3Agent
- - OS::TripleO::Services::ComputeNeutronMetadataAgent
- - OS::TripleO::Services::TripleoPackages
- - OS::TripleO::Services::TripleoFirewall
- - OS::TripleO::Services::NeutronSriovAgent
- - OS::TripleO::Services::OpenDaylightOvs
- description: A list of service resources (configured in the Heat
- resource_registry) which represent nested stacks
- for each service that should get installed on the Compute Nodes.
- type: comma_delimited_list
-
-# Block storage specific parameters
- BlockStorageCount:
- type: number
- default: 0
- BlockStorageExtraConfig:
- default: {}
- description: |
- BlockStorage specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
- BlockStorageServices:
- default:
- - OS::TripleO::Services::CACerts
- - OS::TripleO::Services::CinderVolume
- - OS::TripleO::Services::Kernel
- - OS::TripleO::Services::Ntp
- - OS::TripleO::Services::Timezone
- - OS::TripleO::Services::Snmp
- - OS::TripleO::Services::TripleoPackages
- - OS::TripleO::Services::TripleoFirewall
- description: A list of service resources (configured in the Heat
- resource_registry) which represent nested stacks
- for each service that should get installed on the BlockStorage nodes.
- type: comma_delimited_list
-
-# Object storage specific parameters
- ObjectStorageCount:
- type: number
- default: 0
- ObjectStorageExtraConfig:
- default: {}
- description: |
- ObjectStorage specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
- ObjectStorageServices:
- default:
- - OS::TripleO::Services::CACerts
- - OS::TripleO::Services::Kernel
- - OS::TripleO::Services::Ntp
- - OS::TripleO::Services::SwiftStorage
- - OS::TripleO::Services::SwiftRingBuilder
- - OS::TripleO::Services::Snmp
- - OS::TripleO::Services::Timezone
- - OS::TripleO::Services::TripleoPackages
- - OS::TripleO::Services::TripleoFirewall
- description: A list of service resources (configured in the Heat
- resource_registry) which represent nested stacks
- for each service that should get installed on the ObjectStorage nodes.
- Note this role currently only supports steps 2, 3 and 4 configuration.
- type: comma_delimited_list
-
-
-# Ceph storage specific parameters
- CephStorageCount:
- type: number
- default: 0
- CephStorageExtraConfig:
- default: {}
- description: |
- CephStorage specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
- CephStorageServices:
- default:
- - OS::TripleO::Services::CACerts
- - OS::TripleO::Services::CephOSD
- - OS::TripleO::Services::Kernel
- - OS::TripleO::Services::Ntp
- - OS::TripleO::Services::Timezone
- - OS::TripleO::Services::TripleoPackages
- - OS::TripleO::Services::TripleoFirewall
- description: A list of service resources (configured in the Heat
- resource_registry) which represent nested stacks
- for each service that should get installed on the CephStorage nodes.
- type: comma_delimited_list
-
- # Hostname format for each role
- # Note %index% is translated into the index of the node, e.g 0/1/2 etc
- # and %stackname% is replaced with OS::stack_name in the template below.
- # If you want to use the heat generated names, pass '' (empty string).
- ControllerHostnameFormat:
- type: string
- description: Format for Controller node hostnames
- default: '%stackname%-controller-%index%'
- ComputeHostnameFormat:
- type: string
- description: Format for Compute node hostnames
- default: '%stackname%-novacompute-%index%'
- BlockStorageHostnameFormat:
- type: string
- description: Format for BlockStorage node hostnames
- default: '%stackname%-blockstorage-%index%'
- ObjectStorageHostnameFormat:
- type: string
- description: Format for SwiftStorage node hostnames
- default: '%stackname%-objectstorage-%index%'
- CephStorageHostnameFormat:
- type: string
- description: Format for CephStorage node hostnames
- default: '%stackname%-cephstorage-%index%'
-
- # Identifiers to trigger tasks on nodes
- UpdateIdentifier:
- default: ''
- type: string
- description: >
- Setting to a previously unused value during stack-update will trigger
- package update on all nodes
- DeployIdentifier:
- default: ''
- type: string
- description: >
- Setting this to a unique value will re-run any deployment tasks which
- perform configuration on a Heat stack-update.
-
- # If you want to remove a specific node from a resource group, you can pass
- # the node name or id as a <Group>RemovalPolicies parameter, for example:
- # ComputeRemovalPolicies: [{'resource_list': ['0']}]
- ControllerRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from ControllerResourceGroup when
- doing an update which requires removal of specific resources.
- ComputeRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from ComputeResourceGroup when
- doing an update which requires removal of specific resources.
- BlockStorageRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from BlockStorageResourceGroup when
- doing an update which requires removal of specific resources.
- ObjectStorageRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from ObjectStorageResourceGroup when
- doing an update which requires removal of specific resources.
- CephStorageRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from CephStorageResourceGroup when
- doing an update which requires removal of specific resources.
-
-parameter_groups:
-- label: deprecated
- description: Do not use deprecated params, they will be removed.
- parameters:
- - controllerExtraConfig
-
-
-resources:
-
- HeatAuthEncryptionKey:
- type: OS::Heat::RandomString
-
- PcsdPassword:
- type: OS::Heat::RandomString
- properties:
- length: 16
-
- HorizonSecret:
- type: OS::Heat::RandomString
- properties:
- length: 10
-
- ServiceNetMap:
- type: OS::TripleO::ServiceNetMap
-
- EndpointMap:
- type: OS::TripleO::EndpointMap
- properties:
- CloudEndpoints:
- external: {get_param: CloudName}
- internal_api: {get_param: CloudNameInternal}
- storage: {get_param: CloudNameStorage}
- storage_mgmt: {get_param: CloudNameStorageManagement}
- management: {get_param: CloudNameManagement}
- NetIpMap: {get_attr: [VipMap, net_ip_map]}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
-
- ControllerServiceChain:
- type: OS::TripleO::Services
- properties:
- Services: {get_param: ControllerServices}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
- DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
-
- Controller:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: ControllerCount}
- removal_policies: {get_param: ControllerRemovalPolicies}
- resource_def:
- type: OS::TripleO::Controller
- properties:
- CloudDomain: {get_param: CloudDomain}
- controllerExtraConfig: {get_param: controllerExtraConfig}
- HorizonSecret: {get_resource: HorizonSecret}
- PcsdPassword: {get_resource: PcsdPassword}
- RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
- RedisVirtualIPUri: {get_attr: [RedisVirtualIP, ip_address_uri]}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
- Hostname:
- str_replace:
- template: {get_param: ControllerHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- NodeIndex: '%index%'
- ServiceConfigSettings: {get_attr: [ControllerServiceChain, role_data, config_settings]}
- ServiceNames: {get_attr: [ControllerServiceChain, role_data, service_names]}
-
- ComputeServiceChain:
- type: OS::TripleO::Services
- properties:
- Services: {get_param: ComputeServices}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
- DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
-
- Compute:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: ComputeCount}
- removal_policies: {get_param: ComputeRemovalPolicies}
- resource_def:
- type: OS::TripleO::Compute
- properties:
- CloudDomain: {get_param: CloudDomain}
- NeutronPhysicalBridge: {get_param: HypervisorNeutronPhysicalBridge}
- NeutronPublicInterface: {get_param: HypervisorNeutronPublicInterface}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
- Hostname:
- str_replace:
- template: {get_param: ComputeHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- NodeIndex: '%index%'
- ServiceConfigSettings: {get_attr: [ComputeServiceChain, role_data, config_settings]}
- ServiceNames: {get_attr: [ComputeServiceChain, role_data, service_names]}
-
- BlockStorageServiceChain:
- type: OS::TripleO::Services
- properties:
- Services: {get_param: BlockStorageServices}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
- DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
-
- BlockStorage:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: BlockStorageCount}
- removal_policies: {get_param: BlockStorageRemovalPolicies}
- resource_def:
- type: OS::TripleO::BlockStorage
- properties:
- UpdateIdentifier: {get_param: UpdateIdentifier}
- Hostname:
- str_replace:
- template: {get_param: BlockStorageHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- ExtraConfig: {get_param: ExtraConfig}
- BlockStorageExtraConfig: {get_param: BlockStorageExtraConfig}
- CloudDomain: {get_param: CloudDomain}
- ServerMetadata: {get_param: ServerMetadata}
- NodeIndex: '%index%'
- ServiceConfigSettings: {get_attr: [BlockStorageServiceChain, role_data, config_settings]}
- ServiceNames: {get_attr: [BlockStorageServiceChain, role_data, service_names]}
-
- ObjectStorageServiceChain:
- type: OS::TripleO::Services
- properties:
- Services: {get_param: ObjectStorageServices}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
- DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
-
- ObjectStorage:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: ObjectStorageCount}
- removal_policies: {get_param: ObjectStorageRemovalPolicies}
- resource_def:
- type: OS::TripleO::ObjectStorage
- properties:
- UpdateIdentifier: {get_param: UpdateIdentifier}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- Hostname:
- str_replace:
- template: {get_param: ObjectStorageHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- ExtraConfig: {get_param: ExtraConfig}
- ObjectStorageExtraConfig: {get_param: ObjectStorageExtraConfig}
- CloudDomain: {get_param: CloudDomain}
- ServerMetadata: {get_param: ServerMetadata}
- NodeIndex: '%index%'
- ServiceConfigSettings: {get_attr: [ObjectStorageServiceChain, role_data, config_settings]}
- ServiceNames: {get_attr: [ObjectStorageServiceChain, role_data, service_names]}
-
- CephStorageServiceChain:
- type: OS::TripleO::Services
- properties:
- Services: {get_param: CephStorageServices}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
- DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
-
- CephStorage:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: CephStorageCount}
- removal_policies: {get_param: CephStorageRemovalPolicies}
- resource_def:
- type: OS::TripleO::CephStorage
- properties:
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- UpdateIdentifier: {get_param: UpdateIdentifier}
- Hostname:
- str_replace:
- template: {get_param: CephStorageHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- ExtraConfig: {get_param: ExtraConfig}
- CephStorageExtraConfig: {get_param: CephStorageExtraConfig}
- CloudDomain: {get_param: CloudDomain}
- ServerMetadata: {get_param: ServerMetadata}
- NodeIndex: '%index%'
- ServiceConfigSettings: {get_attr: [CephStorageServiceChain, role_data, config_settings]}
- ServiceNames: {get_attr: [CephStorageServiceChain, role_data, service_names]}
-
- ControllerIpListMap:
- type: OS::TripleO::Network::Ports::NetIpListMap
- properties:
- ControlPlaneIpList: {get_attr: [Controller, ip_address]}
- ExternalIpList: {get_attr: [Controller, external_ip_address]}
- InternalApiIpList: {get_attr: [Controller, internal_api_ip_address]}
- StorageIpList: {get_attr: [Controller, storage_ip_address]}
- StorageMgmtIpList: {get_attr: [Controller, storage_mgmt_ip_address]}
- TenantIpList: {get_attr: [Controller, tenant_ip_address]}
- ManagementIpList: {get_attr: [Controller, management_ip_address]}
- EnabledServices: {get_attr: [ControllerServiceChain, role_data, service_names]}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
-
- allNodesConfig:
- type: OS::TripleO::AllNodes::SoftwareConfig
- properties:
- hosts:
- - list_join:
- - '\n'
- - {get_attr: [Compute, hosts_entry]}
- - list_join:
- - '\n'
- - {get_attr: [Controller, hosts_entry]}
- - list_join:
- - '\n'
- - {get_attr: [BlockStorage, hosts_entry]}
- - list_join:
- - '\n'
- - {get_attr: [ObjectStorage, hosts_entry]}
- - list_join:
- - '\n'
- - {get_attr: [CephStorage, hosts_entry]}
- enabled_services:
- list_join:
- - ','
- - {get_attr: [ControllerServiceChain, role_data, service_names]}
- - {get_attr: [ComputeServiceChain, role_data, service_names]}
- - {get_attr: [BlockStorageServiceChain, role_data, service_names]}
- - {get_attr: [ObjectStorageServiceChain, role_data, service_names]}
- - {get_attr: [CephStorageServiceChain, role_data, service_names]}
- controller_ips: {get_attr: [Controller, ip_address]}
- controller_names: {get_attr: [Controller, hostname]}
- service_ips: {get_attr: [ControllerIpListMap, service_ips]}
- # FIXME(shardy): These require further work to move into service_ips
- rabbit_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, RabbitmqNetwork]}]}
- memcache_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
- keystone_public_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]}
- keystone_admin_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
- ceph_mon_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CephPublicNetwork]}]}
- ceph_mon_node_names: {get_attr: [Controller, hostname]}
- DeployIdentifier: {get_param: DeployIdentifier}
- UpdateIdentifier: {get_param: UpdateIdentifier}
-
- MysqlRootPassword:
- type: OS::Heat::RandomString
- properties:
- length: 10
-
- RabbitCookie:
- type: OS::Heat::RandomString
- properties:
- length: 20
- salt: {get_param: RabbitCookieSalt}
-
- DefaultPasswords:
- type: OS::TripleO::DefaultPasswords
- properties:
- DefaultMysqlRootPassword: {get_attr: [MysqlRootPassword, value]}
- DefaultRabbitCookie: {get_attr: [RabbitCookie, value]}
- DefaultHeatAuthEncryptionKey: {get_attr: [HeatAuthEncryptionKey, value]}
- DefaultPcsdPassword: {get_attr: [PcsdPassword, value]}
- DefaultHorizonSecret: {get_attr: [HorizonSecret, value]}
-
- # creates the network architecture
- Networks:
- type: OS::TripleO::Network
-
- ControlVirtualIP:
- type: OS::Neutron::Port
- depends_on: Networks
- properties:
- name: control_virtual_ip
- network: {get_param: NeutronControlPlaneID}
- fixed_ips: {get_param: ControlFixedIPs}
- replacement_policy: AUTO
-
- RedisVirtualIP:
- depends_on: Networks
- type: OS::TripleO::Network::Ports::RedisVipPort
- properties:
- ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
- PortName: redis_virtual_ip
- NetworkName: {get_attr: [ServiceNetMap, service_net_map, RedisNetwork]}
- ServiceName: redis
- FixedIPs: {get_param: RedisVirtualFixedIPs}
-
- # The public VIP is on the External net, falls back to ctlplane
- PublicVirtualIP:
- depends_on: Networks
- type: OS::TripleO::Network::Ports::ExternalVipPort
- properties:
- ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
- PortName: public_virtual_ip
- FixedIPs: {get_param: PublicVirtualFixedIPs}
-
- InternalApiVirtualIP:
- depends_on: Networks
- type: OS::TripleO::Network::Ports::InternalApiVipPort
- properties:
- ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- PortName: internal_api_virtual_ip
- FixedIPs: {get_param: InternalApiVirtualFixedIPs}
-
- StorageVirtualIP:
- depends_on: Networks
- type: OS::TripleO::Network::Ports::StorageVipPort
- properties:
- ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- PortName: storage_virtual_ip
- FixedIPs: {get_param: StorageVirtualFixedIPs}
-
- StorageMgmtVirtualIP:
- depends_on: Networks
- type: OS::TripleO::Network::Ports::StorageMgmtVipPort
- properties:
- ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- PortName: storage_management_virtual_ip
- FixedIPs: {get_param: StorageMgmtVirtualFixedIPs}
-
- VipMap:
- type: OS::TripleO::Network::Ports::NetVipMap
- properties:
- ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- ExternalIp: {get_attr: [PublicVirtualIP, ip_address]}
- ExternalIpUri: {get_attr: [PublicVirtualIP, ip_address_uri]}
- InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]}
- InternalApiIpUri: {get_attr: [InternalApiVirtualIP, ip_address_uri]}
- StorageIp: {get_attr: [StorageVirtualIP, ip_address]}
- StorageIpUri: {get_attr: [StorageVirtualIP, ip_address_uri]}
- StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]}
- StorageMgmtIpUri: {get_attr: [StorageMgmtVirtualIP, ip_address_uri]}
- # No tenant or management VIP required
-
- VipConfig:
- type: OS::TripleO::VipConfig
-
- VipDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- name: VipDeployment
- config: {get_resource: VipConfig}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
- input_values:
- # service VIP mappings
- keystone_admin_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
- keystone_public_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]}
- neutron_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NeutronApiNetwork]}]}
- cinder_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CinderApiNetwork]}]}
- glance_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GlanceApiNetwork]}]}
- glance_registry_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GlanceRegistryNetwork]}]}
- swift_proxy_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SwiftProxyNetwork]}]}
- nova_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NovaApiNetwork]}]}
- nova_metadata_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NovaMetadataNetwork]}]}
- ceilometer_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CeilometerApiNetwork]}]}
- aodh_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, AodhApiNetwork]}]}
- gnocchi_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GnocchiApiNetwork]}]}
- heat_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, HeatApiNetwork]}]}
- horizon_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, HorizonNetwork]}]}
- redis_vip: {get_attr: [RedisVirtualIP, ip_address]}
- manila_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, ManilaApiNetwork]}]}
- mysql_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MysqlNetwork]}]}
- rabbit_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, RabbitMqNetwork]}]}
- # direct configuration of Virtual IPs for each network
- control_virtual_ip: {get_attr: [VipMap, net_ip_map, ctlplane]}
- public_virtual_ip: {get_attr: [VipMap, net_ip_map, external]}
- internal_api_virtual_ip: {get_attr: [VipMap, net_ip_map, internal_api]}
- sahara_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SaharaApiNetwork]}]}
- ironic_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, IronicApiNetwork]}]}
- opendaylight_api_vip: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, OpenDaylightApiNetwork]}]}
- storage_virtual_ip: {get_attr: [VipMap, net_ip_map, storage]}
- storage_mgmt_virtual_ip: {get_attr: [VipMap, net_ip_map, storage_mgmt]}
-
- ControllerSwiftDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- name: ControllerSwiftDeployment
- config: {get_attr: [SwiftDevicesAndProxyConfig, config_id]}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
-
- ObjectStorageSwiftDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- name: ObjectStorageSwiftDeployment
- config: {get_attr: [SwiftDevicesAndProxyConfig, config_id]}
- servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
-
- SwiftDevicesAndProxyConfig:
- type: OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig
- properties:
- controller_swift_devices: {get_attr: [Controller, swift_device]}
- object_store_swift_devices: {get_attr: [ObjectStorage, swift_device]}
- controller_swift_proxy_memcaches: {get_attr: [Controller, swift_proxy_memcache]}
-
- ControllerAllNodesDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- name: ControllerAllNodesDeployment
- config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
- input_values:
- bootstrap_nodeid: {get_attr: [Controller, resource.0.hostname]}
- bootstrap_nodeid_ip: {get_attr: [Controller, resource.0.ip_address]}
-
- ComputeAllNodesDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- name: ComputeAllNodesDeployment
- config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [Compute, attributes, nova_server_resource]}
- input_values:
- bootstrap_nodeid: {get_attr: [Compute, resource.0.hostname]}
- bootstrap_nodeid_ip: {get_attr: [Compute, resource.0.ip_address]}
-
- BlockStorageAllNodesDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- name: BlockStorageAllNodesDeployment
- config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
- input_values:
- bootstrap_nodeid: {get_attr: [BlockStorage, resource.0.hostname]}
- bootstrap_nodeid_ip: {get_attr: [BlockStorage, resource.0.ip_address]}
-
- ObjectStorageAllNodesDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- name: ObjectStorageAllNodesDeployment
- config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
- input_values:
- bootstrap_nodeid: {get_attr: [ObjectStorage, resource.0.hostname]}
- bootstrap_nodeid_ip: {get_attr: [ObjectStorage, resource.0.ip_address]}
-
- CephStorageAllNodesDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- name: CephStorageAllNodesDeployment
- config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
- input_values:
- bootstrap_nodeid: {get_attr: [CephStorage, resource.0.hostname]}
- bootstrap_nodeid_ip: {get_attr: [CephStorage, resource.0.ip_address]}
-
- # All Nodes Validations
- AllNodesValidationConfig:
- type: OS::TripleO::AllNodes::Validation
- properties:
- PingTestIps:
- list_join:
- - ' '
- - - {get_attr: [Controller, resource.0.external_ip_address]}
- - {get_attr: [Controller, resource.0.internal_api_ip_address]}
- - {get_attr: [Controller, resource.0.storage_ip_address]}
- - {get_attr: [Controller, resource.0.storage_mgmt_ip_address]}
- - {get_attr: [Controller, resource.0.tenant_ip_address]}
- - {get_attr: [Controller, resource.0.management_ip_address]}
-
- ControllerAllNodesValidationDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: ControllerAllNodesDeployment
- properties:
- name: ControllerAllNodesValidationDeployment
- config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
-
- ComputeAllNodesValidationDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: ComputeAllNodesDeployment
- properties:
- name: ComputeAllNodesValidationDeployment
- config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [Compute, attributes, nova_server_resource]}
-
- BlockStorageAllNodesValidationDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: BlockStorageAllNodesDeployment
- properties:
- name: BlockStorageAllNodesValidationDeployment
- config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
-
- ObjectStorageAllNodesValidationDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: ObjectStorageAllNodesDeployment
- properties:
- name: ObjectStorageAllNodesValidationDeployment
- config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
-
- CephStorageAllNodesValidationDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: CephStorageAllNodesDeployment
- properties:
- name: CephStorageAllNodesValidationDeployment
- config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
-
- UpdateWorkflow:
- type: OS::TripleO::Tasks::UpdateWorkflow
- properties:
- controller_servers: {get_attr: [Controller, attributes, nova_server_resource]}
- compute_servers: {get_attr: [Compute, attributes, nova_server_resource]}
- blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
- objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
- cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
- input_values:
- deploy_identifier: {get_param: DeployIdentifier}
- update_identifier: {get_param: UpdateIdentifier}
-
- # Optional ExtraConfig for all nodes - all roles are passed in here, but
- # the nested template may configure each role differently (or not at all)
- AllNodesExtraConfig:
- type: OS::TripleO::AllNodesExtraConfig
- depends_on:
- - UpdateWorkflow
- - ComputeAllNodesValidationDeployment
- - BlockStorageAllNodesValidationDeployment
- - ObjectStorageAllNodesValidationDeployment
- - CephStorageAllNodesValidationDeployment
- - ControllerAllNodesValidationDeployment
- properties:
- controller_servers: {get_attr: [Controller, attributes, nova_server_resource]}
- compute_servers: {get_attr: [Compute, attributes, nova_server_resource]}
- blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
- objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
- cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
-
- # Nested stack deployment runs after all other controller deployments
- ControllerNodesPostDeployment:
- type: OS::TripleO::ControllerPostDeployment
- depends_on: [ControllerAllNodesDeployment, ControllerSwiftDeployment]
- properties:
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
- RoleData: {get_attr: [ControllerServiceChain, role_data]}
-
- ComputeNodesPostDeployment:
- type: OS::TripleO::ComputePostDeployment
- depends_on: [ComputeAllNodesDeployment]
- properties:
- servers: {get_attr: [Compute, attributes, nova_server_resource]}
- RoleData: {get_attr: [ComputeServiceChain, role_data]}
-
- ObjectStorageNodesPostDeployment:
- type: OS::TripleO::ObjectStoragePostDeployment
- depends_on: [ObjectStorageSwiftDeployment, ObjectStorageAllNodesDeployment]
- properties:
- servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
- RoleData: {get_attr: [ObjectStorageServiceChain, role_data]}
-
- BlockStorageNodesPostDeployment:
- type: OS::TripleO::BlockStoragePostDeployment
- depends_on: [ControllerNodesPostDeployment, BlockStorageAllNodesDeployment]
- properties:
- servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
- RoleData: {get_attr: [BlockStorageServiceChain, role_data]}
-
- CephStorageNodesPostDeployment:
- type: OS::TripleO::CephStoragePostDeployment
- depends_on: [ControllerNodesPostDeployment, CephStorageAllNodesDeployment]
- properties:
- servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
- RoleData: {get_attr: [CephStorageServiceChain, role_data]}
-
-outputs:
- ManagedEndpoints:
- description: Asserts that the keystone endpoints have been provisioned.
- value: true
- KeystoneURL:
- description: URL for the Overcloud Keystone service
- value: {get_attr: [EndpointMap, endpoint_map, KeystonePublic, uri]}
- KeystoneAdminVip:
- description: Keystone Admin VIP endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
- PublicVip:
- description: Controller VIP for public API endpoints
- value: {get_attr: [VipMap, net_ip_map, external]}
- AodhInternalVip:
- description: VIP for Aodh API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, AodhApiNetwork]}]}
- CeilometerInternalVip:
- description: VIP for Ceilometer API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CeilometerApiNetwork]}]}
- CinderInternalVip:
- description: VIP for Cinder API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CinderApiNetwork]}]}
- GlanceInternalVip:
- description: VIP for Glance API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GlanceApiNetwork]}]}
- GnocchiInternalVip:
- description: VIP for Gnocchi API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GnocchiApiNetwork]}]}
- HeatInternalVip:
- description: VIP for Heat API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, HeatApiNetwork]}]}
- IronicInternalVip:
- description: VIP for Ironic API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, IronicApiNetwork]}]}
- KeystoneInternalVip:
- description: VIP for Keystone API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]}
- ManilaInternalVip:
- description: VIP for Manila API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, ManilaApiNetwork]}]}
- NeutronInternalVip:
- description: VIP for Neutron API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NeutronApiNetwork]}]}
- NovaInternalVip:
- description: VIP for Nova API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NovaApiNetwork]}]}
- OpenDaylightInternalVip:
- description: VIP for OpenDaylight API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, OpenDaylightApiNetwork]}]}
- SaharaInternalVip:
- description: VIP for Sahara API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SaharaApiNetwork]}]}
- SwiftInternalVip:
- description: VIP for Swift Proxy internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SwiftProxyNetwork]}]}
- EndpointMap:
- description: |
- Mapping of the resources with the needed info for their endpoints.
- This includes the protocol used, the IP, port and also a full
- representation of the URI.
- value: {get_attr: [EndpointMap, endpoint_map]}
- HostsEntry:
- description: |
- The content that should be appended to your /etc/hosts if you want to get
- hostname-based access to the deployed nodes (useful for testing without
- setting up a DNS).
- value: {get_attr: [allNodesConfig, hosts_entries]}
- EnabledServices:
- description: The services enabled on each role
- value:
- Controller: {get_attr: [ControllerServiceChain, role_data, service_names]}
- Compute: {get_attr: [ComputeServiceChain, role_data, service_names]}
- BlockStorage: {get_attr: [BlockStorageServiceChain, role_data, service_names]}
- ObjectStorage: {get_attr: [ObjectStorageServiceChain, role_data, service_names]}
- CephStorage: {get_attr: [CephStorageServiceChain, role_data, service_names]}
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 644c1938..cc5e4eac 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -2,32 +2,46 @@ heat_template_version: 2016-10-14
description: 'All Nodes Config for Puppet'
parameters:
- hosts:
- type: comma_delimited_list
+ cloud_name_external:
+ type: string
+ cloud_name_internal_api:
+ type: string
+ cloud_name_storage:
+ type: string
+ cloud_name_storage_mgmt:
+ type: string
+ cloud_name_ctlplane:
+ type: string
# FIXME(shardy) this can be comma_delimited_list when
# https://bugs.launchpad.net/heat/+bug/1617019 is fixed
enabled_services:
type: string
controller_ips:
type: comma_delimited_list
+ logging_groups:
+ type: json
+ logging_sources:
+ type: json
service_ips:
type: json
+ service_node_names:
+ type: json
+ short_service_node_names:
+ type: json
controller_names:
type: comma_delimited_list
- rabbit_node_ips:
- type: comma_delimited_list
memcache_node_ips:
type: comma_delimited_list
- keystone_public_api_node_ips:
- type: comma_delimited_list
- keystone_admin_api_node_ips:
- type: comma_delimited_list
- ceph_mon_node_ips:
- type: comma_delimited_list
- ceph_mon_node_names:
- type: comma_delimited_list
+ NetVipMap:
+ type: json
+ RedisVirtualIP:
+ type: string
+ default: ''
+ ServiceNetMap:
+ type: json
DeployIdentifier:
type: string
+ default: ''
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
@@ -42,6 +56,15 @@ parameters:
Heat action on performed top-level stack.
constraints:
- allowed_values: ['CREATE', 'UPDATE']
+ # NOTE(jaosorior): This is being set as IPA as it's the first
+ # CA we'll actually be testing out. But we can change this if
+ # people request it.
+ CertmongerCA:
+ type: string
+ default: 'IPA'
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
@@ -50,10 +73,6 @@ resources:
properties:
group: os-apply-config
config:
- hosts:
- list_join:
- - "\n"
- - {get_param: hosts}
hiera:
datafiles:
bootstrap_node:
@@ -63,6 +82,8 @@ resources:
all_nodes:
mapped_data:
map_merge:
+ - tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
+ - tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
- enabled_services: {get_param: enabled_services}
# This writes out a mapping of service_name_enabled: 'true'
# For any services not enabled, hiera foo_enabled will
@@ -76,8 +97,37 @@ resources:
for_each:
SERVICE:
str_split: [',', {get_param: enabled_services}]
+ # Dynamically generate per-service network data
+ # This works as follows (outer->inner functions)
+ # yaql - filters services where no mapping exists in ServiceNetMap
+ # map_replace: substitute e.g heat_api_network with network name from ServiceNetMap
+ # map_merge/repeat: generate a per-service mapping
+ - yaql:
+ # This filters any entries where the value hasn't been substituted for
+ # a list, e.g it's still $service_network. This happens when there is
+ # no network defined for the service in the ServiceNetMap, which is OK
+ # as not all services have to be bound to a network, so we filter them
+ expression: dict($.data.map.items().where(isString($[1]) and not $[1].endsWith("_network")))
+ data:
+ map:
+ map_replace:
+ - map_merge:
+ repeat:
+ template:
+ SERVICE_network: SERVICE_network
+ for_each:
+ SERVICE:
+ str_split: [',', {get_param: enabled_services}]
+ - values: {get_param: ServiceNetMap}
+ # Keystone doesn't provide separate entries for the public
+ # and admin endpoints, so we need to add them here manually
+ # like we do in the vip-config below
+ - keystone_admin_api_network: {get_param: [ServiceNetMap, keystone_admin_api_network]}
+ keystone_public_api_network: {get_param: [ServiceNetMap, keystone_public_api_network]}
# provides a mapping of service_name_ips to a list of IPs
- {get_param: service_ips}
+ - {get_param: service_node_names}
+ - {get_param: short_service_node_names}
- controller_node_ips:
list_join:
- ','
@@ -86,18 +136,6 @@ resources:
list_join:
- ','
- {get_param: controller_names}
- galera_node_names:
- list_join:
- - ','
- - {get_param: controller_names}
- rabbitmq_node_ips: &rabbit_nodes_array
- str_replace:
- template: "['SERVERS_LIST']"
- params:
- SERVERS_LIST:
- list_join:
- - "','"
- - {get_param: rabbit_node_ips}
memcached_node_ips_v6:
str_replace:
template: "['inet6:[SERVERS_LIST]']"
@@ -106,64 +144,70 @@ resources:
list_join:
- "]','inet6:["
- {get_param: memcache_node_ips}
- keystone_public_api_node_ips:
- str_replace:
- template: "['SERVERS_LIST']"
- params:
- SERVERS_LIST:
- list_join:
- - "','"
- - {get_param: keystone_public_api_node_ips}
- keystone_admin_api_node_ips:
- str_replace:
- template: "['SERVERS_LIST']"
- params:
- SERVERS_LIST:
- list_join:
- - "','"
- - {get_param: keystone_admin_api_node_ips}
- tripleo::profile::base::ceph::ceph_mon_initial_members:
- list_join:
- - ','
- - {get_param: ceph_mon_node_names}
- tripleo::profile::base::ceph::ceph_mon_host:
- list_join:
- - ','
- - {get_param: ceph_mon_node_ips}
- tripleo::profile::base::ceph::ceph_mon_host_v6:
- str_replace:
- template: "'[IPS_LIST]'"
- params:
- IPS_LIST:
- list_join:
- - '],['
- - {get_param: ceph_mon_node_ips}
- # NOTE(gfidente): interpolation with %{} in the
- # hieradata file can't be used as it returns string
- ceilometer::rabbit_hosts: *rabbit_nodes_array
- aodh::rabbit_hosts: *rabbit_nodes_array
- cinder::rabbit_hosts: *rabbit_nodes_array
- glance::notify::rabbitmq::rabbit_hosts: *rabbit_nodes_array
- manila::rabbit_hosts: *rabbit_nodes_array
- heat::rabbit_hosts: *rabbit_nodes_array
- neutron::rabbit_hosts: *rabbit_nodes_array
- nova::rabbit_hosts: *rabbit_nodes_array
- keystone::rabbit_hosts: *rabbit_nodes_array
- sahara::rabbit_hosts: *rabbit_nodes_array
- ironic::rabbit_hosts: *rabbit_nodes_array
deploy_identifier: {get_param: DeployIdentifier}
update_identifier: {get_param: UpdateIdentifier}
stack_action: {get_param: StackAction}
+ vip_data:
+ mapped_data:
+ map_merge:
+ # Dynamically generate per-service VIP data based on enabled_services
+ # This works as follows (outer->inner functions)
+ # yaql - filters services where no mapping exists in ServiceNetMap
+ # map_replace: substitute e.g internal_api with the IP from NetVipMap
+ # map_replace: substitute e.g heat_api_network with network name from ServiceNetMap
+ # map_merge/repeat: generate a per-service mapping
+ - yaql:
+ # This filters any entries where the value hasn't been substituted for
+ # a list, e.g it's still $service_network. This happens when there is
+ # no network defined for the service in the ServiceNetMap, which is OK
+ # as not all services have to be bound to a network, so we filter them
+ expression: dict($.data.map.items().where(isString($[1]) and not $[1].endsWith("_network")))
+ data:
+ map:
+ map_replace:
+ - map_replace:
+ - map_merge:
+ repeat:
+ template:
+ SERVICE_vip: SERVICE_network
+ for_each:
+ SERVICE:
+ str_split: [',', {get_param: enabled_services}]
+ - values: {get_param: ServiceNetMap}
+ - values: {get_param: NetVipMap}
+ - keystone_admin_api_vip:
+ get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_admin_api_network]}]
+ keystone_public_api_vip:
+ get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_public_api_network]}]
+ public_virtual_ip: {get_param: [NetVipMap, external]}
+ controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
+ internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]}
+ storage_virtual_ip: {get_param: [NetVipMap, storage]}
+ storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]}
+ redis_vip: {get_param: RedisVirtualIP}
+ # public_virtual_ip and controller_virtual_ip are needed in
+ # both HAproxy & keepalived.
+ tripleo::haproxy::public_virtual_ip: {get_param: [NetVipMap, external]}
+ tripleo::haproxy::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
+ tripleo::keepalived::public_virtual_ip: {get_param: [NetVipMap, external]}
+ tripleo::keepalived::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
+ tripleo::keepalived::internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]}
+ tripleo::keepalived::storage_virtual_ip: {get_param: [NetVipMap, storage]}
+ tripleo::keepalived::storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]}
+ tripleo::keepalived::redis_virtual_ip: {get_param: RedisVirtualIP}
+ tripleo::redis_notification::haproxy_monitor_ip: {get_param: [NetVipMap, ctlplane]}
+ cloud_name_external: {get_param: cloud_name_external}
+ cloud_name_internal_api: {get_param: cloud_name_internal_api}
+ cloud_name_storage: {get_param: cloud_name_storage}
+ cloud_name_storage_mgmt: {get_param: cloud_name_storage_mgmt}
+ cloud_name_ctlplane: {get_param: cloud_name_ctlplane}
+ # TLS parameters
+ certmonger_ca: {get_param: CertmongerCA}
+ enable_internal_tls: {get_param: EnableInternalTLS}
outputs:
config_id:
description: The ID of the allNodesConfigImpl resource.
value:
{get_resource: allNodesConfigImpl}
- hosts_entries:
- description: |
- The content that should be appended to your /etc/hosts if you want to get
- hostname-based access to the deployed nodes (useful for testing without
- setting up a DNS).
- value: {get_attr: [allNodesConfigImpl, config, hosts]}
diff --git a/puppet/cinder-storage.yaml b/puppet/blockstorage-role.yaml
index ef3f08ff..8b695fff 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -30,14 +30,6 @@ parameters:
default: default
description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
UpdateIdentifier:
default: ''
type: string
@@ -56,6 +48,11 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
@@ -92,6 +89,9 @@ parameters:
ServiceNames:
type: comma_delimited_list
default: []
+ MonitoringSubscriptions:
+ type: comma_delimited_list
+ default: []
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
@@ -232,8 +232,6 @@ resources:
server: {get_resource: BlockStorage}
config: {get_resource: BlockStorageConfig}
input_values:
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
# Map heat metadata into hiera datafiles
@@ -251,13 +249,16 @@ resources:
- service_names
- service_configs
- volume
+ - bootstrap_node # provided by allNodesConfig
- all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
- '"%{::osfamily}"'
merge_behavior: deeper
datafiles:
service_names:
mapped_data:
service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
service_configs:
mapped_data:
map_replace:
@@ -270,8 +271,6 @@ resources:
volume:
mapped_data:
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
- snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
- snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -308,6 +307,51 @@ outputs:
hostname:
description: Hostname of the server
value: {get_attr: [BlockStorage, name]}
+ hostname_map:
+ description: Mapping of network names to hostnames
+ value:
+ external:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - external
+ - {get_param: CloudDomain}
+ internal_api:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ storage:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storage
+ - {get_param: CloudDomain}
+ storage_mgmt:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ tenant:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ management:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - management
+ - {get_param: CloudDomain}
+ ctlplane:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
hosts_entry:
value:
str_replace:
@@ -319,6 +363,7 @@ outputs:
STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
TENANTIP TENANTHOST.DOMAIN TENANTHOST
MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
+ CTLPLANEIP CTLPLANEHOST.DOMAIN CTLPLANEHOST
params:
PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
@@ -359,6 +404,12 @@ outputs:
- '.'
- - {get_attr: [BlockStorage, name]}
- management
+ CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - ctlplane
nova_server_resource:
description: Heat resource handle for the block storage server
value:
diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml
deleted file mode 100644
index a83e0cfe..00000000
--- a/puppet/ceph-storage-post.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
- OpenStack ceph storage node post deployment for Puppet
-
-parameters:
- ConfigDebug:
- default: false
- description: Whether to run config management (e.g. Puppet) in debug mode.
- type: boolean
- servers:
- type: json
- RoleData:
- type: json
- default: {}
- DeployIdentifier:
- type: string
- description: Value which changes if the node configuration may need to be re-applied
-
-resources:
-
- CephStorageArtifactsConfig:
- type: deploy-artifacts.yaml
-
- CephStorageArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: servers}
- config: {get_resource: CephStorageArtifactsConfig}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- CephStoragePuppetConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- options:
- enable_debug: {get_param: ConfigDebug}
- enable_hiera: True
- enable_facter: False
- modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
- inputs:
- - name: step
- outputs:
- - name: result
- config:
- list_join:
- - ''
- - - get_file: manifests/overcloud_cephstorage.pp
- - {get_param: [RoleData, step_config]}
-
- CephStorageDeployment_Step2:
- type: OS::Heat::StructuredDeployments
- depends_on: CephStorageArtifactsDeploy
- properties:
- name: CephStorageDeployment_Step2
- servers: {get_param: servers}
- config: {get_resource: CephStoragePuppetConfig}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- CephStorageDeployment_Step3:
- type: OS::Heat::StructuredDeployments
- depends_on: CephStorageDeployment_Step2
- properties:
- name: CephStorageDeployment_Step3
- servers: {get_param: servers}
- config: {get_resource: CephStoragePuppetConfig}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ExtraConfig:
- depends_on: CephStorageDeployment_Step3
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
diff --git a/puppet/ceph-storage.yaml b/puppet/cephstorage-role.yaml
index 829456b5..55b26336 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -27,6 +27,11 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
UpdateIdentifier:
default: ''
type: string
@@ -90,6 +95,9 @@ parameters:
ServiceNames:
type: comma_delimited_list
default: []
+ MonitoringSubscriptions:
+ type: comma_delimited_list
+ default: []
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
@@ -245,13 +253,16 @@ resources:
- extraconfig
- service_names
- service_configs
+ - bootstrap_node # provided by allNodesConfig
- all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
- '"%{::osfamily}"'
merge_behavior: deeper
datafiles:
service_names:
mapped_data:
service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
service_configs:
mapped_data:
map_replace:
@@ -306,6 +317,51 @@ outputs:
hostname:
description: Hostname of the server
value: {get_attr: [CephStorage, name]}
+ hostname_map:
+ description: Mapping of network names to hostnames
+ value:
+ external:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - external
+ - {get_param: CloudDomain}
+ internal_api:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ storage:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storage
+ - {get_param: CloudDomain}
+ storage_mgmt:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ tenant:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ management:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - management
+ - {get_param: CloudDomain}
+ ctlplane:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
hosts_entry:
value:
str_replace:
@@ -317,6 +373,7 @@ outputs:
STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
TENANTIP TENANTHOST.DOMAIN TENANTHOST
MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
+ CTLPLANEIP CTLPLANEHOST.DOMAIN CTLPLANEHOST
params:
PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
@@ -357,6 +414,12 @@ outputs:
- '.'
- - {get_attr: [CephStorage, name]}
- management
+ CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - ctlplane
nova_server_resource:
description: Heat resource handle for the ceph storage server
value:
diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml
deleted file mode 100644
index 6416c43e..00000000
--- a/puppet/cinder-storage-post.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'OpenStack cinder storage post deployment for Puppet'
-
-parameters:
- ConfigDebug:
- default: false
- description: Whether to run config management (e.g. Puppet) in debug mode.
- type: boolean
- servers:
- type: json
- DeployIdentifier:
- type: string
- description: Value which changes if the node configuration may need to be re-applied
- RoleData:
- type: json
- default: {}
-
-resources:
-
- VolumeArtifactsConfig:
- type: deploy-artifacts.yaml
-
- VolumeArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: servers}
- config: {get_resource: VolumeArtifactsConfig}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- VolumePuppetConfig:
- type: OS::Heat::SoftwareConfig
- depends_on: VolumeArtifactsDeploy
- properties:
- group: puppet
- options:
- enable_debug: {get_param: ConfigDebug}
- enable_hiera: True
- enable_facter: False
- modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
- inputs:
- - name: step
- outputs:
- - name: result
- config:
- list_join:
- - ''
- - - get_file: manifests/overcloud_volume.pp
- - {get_param: [RoleData, step_config]}
-
- VolumeDeployment_Step2:
- type: OS::Heat::StructuredDeployments
- depends_on: VolumeArtifactsDeploy
- properties:
- name: VolumeDeployment_Step2
- servers: {get_param: servers}
- config: {get_resource: VolumePuppetConfig}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- VolumeDeployment_Step3:
- type: OS::Heat::StructuredDeployments
- depends_on: VolumeDeployment_Step2
- properties:
- name: VolumeDeployment_Step3
- servers: {get_param: servers}
- config: {get_resource: VolumePuppetConfig}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- VolumeDeployment_Step4:
- type: OS::Heat::StructuredDeployments
- depends_on: VolumeDeployment_Step3
- properties:
- name: VolumeDeployment_Step4
- servers: {get_param: servers}
- config: {get_resource: VolumePuppetConfig}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ExtraConfig:
- depends_on: VolumeDeployment_Step4
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml
deleted file mode 100644
index d0c6082c..00000000
--- a/puppet/compute-post.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
- OpenStack compute node post deployment for Puppet.
-
-parameters:
- ConfigDebug:
- default: false
- description: Whether to run config management (e.g. Puppet) in debug mode.
- type: boolean
- servers:
- type: json
- RoleData:
- type: json
- default: {}
- DeployIdentifier:
- type: string
- description: Value which changes if the node configuration may need to be re-applied
-
-resources:
-
- ComputeArtifactsConfig:
- type: deploy-artifacts.yaml
-
- ComputeArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: servers}
- config: {get_resource: ComputeArtifactsConfig}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- ComputePuppetConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- options:
- enable_debug: {get_param: ConfigDebug}
- enable_hiera: True
- enable_facter: False
- modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
- inputs:
- - name: step
- outputs:
- - name: result
- config:
- list_join:
- - ''
- - - get_file: manifests/overcloud_compute.pp
- - {get_param: [RoleData, step_config]}
-
- ComputeServicesBaseDeployment_Step2:
- type: OS::Heat::StructuredDeployments
- depends_on: [ComputeArtifactsDeploy]
- properties:
- name: ComputeServicesBaseDeployment_Step2
- servers: {get_param: servers}
- config: {get_resource: ComputePuppetConfig}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- ComputeOvercloudServicesDeployment_Step3:
- type: OS::Heat::StructuredDeployments
- depends_on: ComputeServicesBaseDeployment_Step2
- properties:
- name: ComputeOvercloudServicesDeployment_Step3
- servers: {get_param: servers}
- config: {get_resource: ComputePuppetConfig}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- ComputeOvercloudServicesDeployment_Step4:
- type: OS::Heat::StructuredDeployments
- depends_on: ComputeOvercloudServicesDeployment_Step3
- properties:
- name: ComputeOvercloudServicesDeployment_Step4
- servers: {get_param: servers}
- config: {get_resource: ComputePuppetConfig}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ExtraConfig:
- depends_on: ComputeOvercloudServicesDeployment_Step4
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
-
diff --git a/puppet/compute.yaml b/puppet/compute-role.yaml
index 1790aa0d..4d77d6d3 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute-role.yaml
@@ -107,6 +107,9 @@ parameters:
ServiceNames:
type: comma_delimited_list
default: []
+ MonitoringSubscriptions:
+ type: comma_delimited_list
+ default: []
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
@@ -258,7 +261,9 @@ resources:
- service_names
- service_configs
- compute
+ - bootstrap_node # provided by allNodesConfig
- all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
- '"%{::osfamily}"'
- neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre
- cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre
@@ -271,6 +276,7 @@ resources:
service_names:
mapped_data:
service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
service_configs:
mapped_data:
map_replace:
@@ -354,6 +360,51 @@ outputs:
hostname:
description: Hostname of the server
value: {get_attr: [NovaCompute, name]}
+ hostname_map:
+ description: Mapping of network names to hostnames
+ value:
+ external:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - external
+ - {get_param: CloudDomain}
+ internal_api:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ storage:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storage
+ - {get_param: CloudDomain}
+ storage_mgmt:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ tenant:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ management:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - management
+ - {get_param: CloudDomain}
+ ctlplane:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
hosts_entry:
description: >
Server's IP address and hostname in the /etc/hosts format
@@ -367,6 +418,7 @@ outputs:
STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
TENANTIP TENANTHOST.DOMAIN TENANTHOST
MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
+ CTLPLANEIP CTLPLANEHOST.DOMAIN CTLPLANEHOST
params:
PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
@@ -407,6 +459,12 @@ outputs:
- '.'
- - {get_attr: [NovaCompute, name]}
- management
+ CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ CTLPLANEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - ctlplane
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
diff --git a/puppet/controller-config.yaml b/puppet/config.role.j2.yaml
index 811c544d..e59a0216 100644
--- a/puppet/controller-config.yaml
+++ b/puppet/config.role.j2.yaml
@@ -1,7 +1,7 @@
heat_template_version: 2015-04-30
description: >
- A software config which runs manifests/overcloud_controller.pp
+ A software config which runs puppet on the {{role}} role
parameters:
ConfigDebug:
@@ -15,7 +15,7 @@ parameters:
resources:
- ControllerPuppetConfigImpl:
+ {{role}}PuppetConfigImpl:
type: OS::Heat::SoftwareConfig
properties:
group: puppet
@@ -26,13 +26,19 @@ resources:
modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
outputs:
- name: result
+ inputs:
+ - name: step
+ type: Number
config:
list_join:
- ''
- - - get_file: manifests/overcloud_controller.pp
+ - - str_replace:
+ template: {get_file: manifests/overcloud_role.pp}
+ params:
+ __ROLE__: {{role.lower()}}
- {get_param: StepConfig}
outputs:
OS::stack_id:
- description: The software config which runs overcloud_controller.pp
- value: {get_resource: ControllerPuppetConfigImpl}
+ description: The software config which runs puppet on the {{role}} role
+ value: {get_resource: {{role}}PuppetConfigImpl}
diff --git a/puppet/controller-config-pacemaker.yaml b/puppet/controller-config-pacemaker.yaml
index b313f5de..24f31dc8 100644
--- a/puppet/controller-config-pacemaker.yaml
+++ b/puppet/controller-config-pacemaker.yaml
@@ -26,6 +26,9 @@ resources:
modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
outputs:
- name: result
+ inputs:
+ - name: step
+ type: Number
config:
list_join:
- ''
diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml
deleted file mode 100644
index 4af6cb46..00000000
--- a/puppet/controller-post.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
- OpenStack controller node post deployment for Puppet.
-
-parameters:
- ConfigDebug:
- default: false
- description: Whether to run config management (e.g. Puppet) in debug mode.
- type: boolean
- servers:
- type: json
- RoleData:
- type: json
- default: {}
- DeployIdentifier:
- type: string
- description: Value which changes if the node configuration may need to be re-applied
-
-resources:
-
- ControllerArtifactsConfig:
- type: deploy-artifacts.yaml
-
- ControllerArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: servers}
- config: {get_resource: ControllerArtifactsConfig}
-
- ControllerPrePuppet:
- type: OS::TripleO::Tasks::ControllerPrePuppet
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerPuppetConfig:
- type: OS::TripleO::ControllerConfig
- properties:
- StepConfig: {get_param: [RoleData, step_config]}
-
- # Step through a series of Puppet runs using the same manifest.
- # NOTE: To enable stepping through the deployments via heat hooks,
- # you must observe the glob naming defined in overcloud-steps.yaml
- # e.g all Deployment resources should have a *Deployment_StepN suffix
- ControllerLoadBalancerDeployment_Step1:
- type: OS::Heat::StructuredDeployments
- depends_on: [ControllerPrePuppet, ControllerArtifactsDeploy]
- properties:
- name: ControllerLoadBalancerDeployment_Step1
- servers: {get_param: servers}
- config: {get_resource: ControllerPuppetConfig}
- input_values:
- step: 1
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerServicesBaseDeployment_Step2:
- type: OS::Heat::StructuredDeployments
- depends_on: ControllerLoadBalancerDeployment_Step1
- properties:
- name: ControllerServicesBaseDeployment_Step2
- servers: {get_param: servers}
- config: {get_resource: ControllerPuppetConfig}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerOvercloudServicesDeployment_Step3:
- type: OS::Heat::StructuredDeployments
- depends_on: ControllerServicesBaseDeployment_Step2
- properties:
- name: ControllerOvercloudServicesDeployment_Step3
- servers: {get_param: servers}
- config: {get_resource: ControllerPuppetConfig}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerOvercloudServicesDeployment_Step4:
- type: OS::Heat::StructuredDeployments
- depends_on: ControllerOvercloudServicesDeployment_Step3
- properties:
- name: ControllerOvercloudServicesDeployment_Step4
- servers: {get_param: servers}
- config: {get_resource: ControllerPuppetConfig}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerOvercloudServicesDeployment_Step5:
- type: OS::Heat::StructuredDeployments
- depends_on: ControllerOvercloudServicesDeployment_Step4
- properties:
- name: ControllerOvercloudServicesDeployment_Step5
- servers: {get_param: servers}
- config: {get_resource: ControllerPuppetConfig}
- input_values:
- step: 5
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerPostPuppet:
- type: OS::TripleO::Tasks::ControllerPostPuppet
- depends_on: ControllerOvercloudServicesDeployment_Step5
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ExtraConfig:
- depends_on: ControllerPostPuppet
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
diff --git a/puppet/controller.yaml b/puppet/controller-role.yaml
index 28fd08da..b1433b04 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller-role.yaml
@@ -23,18 +23,10 @@ parameters:
...
}
type: json
- CorosyncIPv6:
- default: false
- description: Enable IPv6 in Corosync
- type: boolean
Debug:
default: ''
description: Set to True to enable debugging on all services.
type: string
- EnableFencing:
- default: false
- description: Whether to enable fencing in Pacemaker or not.
- type: boolean
EnableLoadBalancer:
default: true
description: Whether to deploy a LoadBalancer on the Controller
@@ -45,48 +37,12 @@ parameters:
Additional hieradata to inject into the cluster, note that
ControllerExtraConfig takes precedence over ExtraConfig.
type: json
- FencingConfig:
- default: {}
- description: |
- Pacemaker fencing configuration. The JSON should have
- the following structure:
- {
- "devices": [
- {
- "agent": "AGENT_NAME",
- "host_mac": "HOST_MAC_ADDRESS",
- "params": {"PARAM_NAME": "PARAM_VALUE"}
- }
- ]
- }
- For instance:
- {
- "devices": [
- {
- "agent": "fence_xvm",
- "host_mac": "52:54:00:aa:bb:cc",
- "params": {
- "multicast_address": "225.0.0.12",
- "port": "baremetal_0",
- "manage_fw": true,
- "manage_key_file": true,
- "key_file": "/etc/fence_xvm.key",
- "key_file_password": "abcdef"
- }
- }
- ]
- }
- type: json
OvercloudControlFlavor:
description: Flavor for control nodes to request when deploying.
default: baremetal
type: string
constraints:
- custom_constraint: nova.flavor
- HorizonSecret:
- description: Secret key for Django
- type: string
- hidden: true
controllerImage:
type: string
default: overcloud-full
@@ -96,76 +52,16 @@ parameters:
default: 'REBUILD_PRESERVE_EPHEMERAL'
description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
type: string
- InstanceNameTemplate:
- default: 'instance-%08x'
- description: Template string to be used to generate instance names
- type: string
KeyName:
default: default
description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
constraints:
- custom_constraint: nova.keypair
- ManageFirewall:
- default: false
- description: Whether to manage IPtables rules.
- type: boolean
- MemcachedIPv6:
- default: false
- description: Enable IPv6 features in Memcached.
- type: boolean
- PurgeFirewallRules:
- default: false
- description: Whether IPtables rules should be purged before setting up the new ones.
- type: boolean
- NeutronMetadataProxySharedSecret:
- description: Shared secret to prevent spoofing
- type: string
- hidden: true
- NeutronPassword:
- description: The password for the neutron service and db account, used by neutron agents.
- type: string
- hidden: true
NeutronPublicInterface:
default: nic1
description: What interface to bridge onto br-ex for network nodes.
type: string
- NovaEnableDBPurge:
- default: true
- description: |
- Whether to create cron job for purging soft deleted rows in Nova database.
- type: boolean
- NovaIPv6:
- default: false
- description: Enable IPv6 features in Nova
- type: boolean
- NovaPassword:
- description: The password for the nova service and db account, used by nova-api.
- type: string
- hidden: true
- PcsdPassword:
- type: string
- description: The password for the 'pcsd' user.
- hidden: true
- RedisPassword:
- description: The password for Redis
- type: string
- hidden: true
- RedisVirtualIP:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- RedisVirtualIPUri:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- description: An IP address which is wrapped in brackets in case of IPv6
- SwiftRawDisks:
- default: {}
- description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
- type: json
- UpgradeLevelNovaCompute:
- type: string
- description: Nova Compute upgrade level
- default: ''
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -225,6 +121,9 @@ parameters:
ServiceNames:
type: comma_delimited_list
default: []
+ MonitoringSubscriptions:
+ type: comma_delimited_list
+ default: []
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
@@ -392,45 +291,8 @@ resources:
server: {get_resource: Controller}
input_values:
bootstack_nodeid: {get_attr: [Controller, name]}
- horizon_secret: {get_param: HorizonSecret}
- debug: {get_param: Debug}
- keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
- keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- keystone_ec2_uri: { get_param: [EndpointMap, KeystoneEC2, uri] }
- enable_fencing: {get_param: EnableFencing}
enable_load_balancer: {get_param: EnableLoadBalancer}
- manage_firewall: {get_param: ManageFirewall}
- purge_firewall_rules: {get_param: PurgeFirewallRules}
- neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
- nova_enable_db_purge: {get_param: NovaEnableDBPurge}
- nova_ipv6: {get_param: NovaIPv6}
- corosync_ipv6: {get_param: CorosyncIPv6}
- memcached_ipv6: {get_param: MemcachedIPv6}
- nova_password: {get_param: NovaPassword}
- upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute}
- instance_name_template: {get_param: InstanceNameTemplate}
- fencing_config: {get_param: FencingConfig}
- pcsd_password: {get_param: PcsdPassword}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
- glance_api_servers: { get_param: [EndpointMap, GlanceInternal, uri]}
- neutron_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
- nova_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
- nova_metadata_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
- horizon_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
- horizon_subnet:
- str_replace:
- template: "['SUBNET']"
- params:
- SUBNET:
- get_attr:
- - NetIpMap
- - net_ip_map
- - str_replace:
- template: "NETWORK_subnet"
- params:
- NETWORK: {get_param: [ServiceNetMap, HorizonNetwork]}
- redis_vip: {get_param: RedisVirtualIP}
- ironic_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, IronicApiNetwork]}]}
# Map heat metadata into hiera datafiles
ControllerConfig:
@@ -447,10 +309,9 @@ resources:
- service_configs
- service_names
- controller
- - swift_devices_and_proxy # provided by SwiftDevicesAndProxyConfig
- bootstrap_node # provided by BootstrapNodeConfig
- all_nodes # provided by allNodesConfig
- - vip_data # provided by vip-config
+ - vip_data # provided by allNodesConfig
- '"%{::osfamily}"'
- cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre
- cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
@@ -465,6 +326,7 @@ resources:
service_names:
mapped_data:
service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
service_configs:
mapped_data:
map_replace:
@@ -482,44 +344,9 @@ resources:
bootstack_nodeid: {get_input: bootstack_nodeid}
# Pacemaker
- enable_fencing: {get_input: enable_fencing}
enable_load_balancer: {get_input: enable_load_balancer}
- hacluster_pwd: {get_input: pcsd_password}
- corosync_ipv6: {get_input: corosync_ipv6}
- tripleo::fencing::config: {get_input: fencing_config}
-
- # Neutron
- neutron::bind_host: {get_input: neutron_api_network}
- neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network}
- snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
- snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
-
- # Nova
- nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute}
- nova::use_ipv6: {get_input: nova_ipv6}
- nova::api::api_bind_address: {get_input: nova_api_network}
- nova::api::metadata_listen: {get_input: nova_metadata_network}
- nova::glance_api_servers: {get_input: glance_api_servers}
- nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
- nova::api::instance_name_template: {get_input: instance_name_template}
- nova::vncproxy::host: {get_input: nova_api_network}
- nova_enable_db_purge: {get_input: nova_enable_db_purge}
-
- # Horizon
- apache::mod::remoteip::proxy_ips: {get_input: horizon_subnet}
- apache::ip: {get_input: horizon_network}
- horizon::django_debug: {get_input: debug}
- horizon::secret_key: {get_input: horizon_secret}
- horizon::bind_address: {get_input: horizon_network}
- horizon::keystone_url: {get_input: keystone_auth_uri}
- # Redis
- redis_vip: {get_input: redis_vip}
- # Firewall
- tripleo::firewall::manage_firewall: {get_input: manage_firewall}
- tripleo::firewall::purge_firewall_rules: {get_input: purge_firewall_rules}
# Misc
- memcached_ipv6: {get_input: memcached_ipv6}
tripleo::haproxy::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
@@ -576,6 +403,51 @@ outputs:
hostname:
description: Hostname of the server
value: {get_attr: [Controller, name]}
+ hostname_map:
+ description: Mapping of network names to hostnames
+ value:
+ external:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - external
+ - {get_param: CloudDomain}
+ internal_api:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ storage:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storage
+ - {get_param: CloudDomain}
+ storage_mgmt:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ tenant:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ management:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - management
+ - {get_param: CloudDomain}
+ ctlplane:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
hosts_entry:
description: >
Server's IP address and hostname in the /etc/hosts format
@@ -589,6 +461,7 @@ outputs:
STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
TENANTIP TENANTHOST.DOMAIN TENANTHOST
MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
+ CTLPLANEIP CTLPLANEHOST.DOMAIN CTLPLANEHOST
params:
PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
@@ -629,45 +502,16 @@ outputs:
- '.'
- - {get_attr: [Controller, name]}
- management
+ CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
+ CTLPLANEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - ctlplane
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
{get_resource: Controller}
- swift_device:
- description: Swift device formatted for swift-ring-builder
- value:
- str_replace:
- template:
- list_join:
- - ','
- - ['r1z1-IP:%PORT%/d1']
- - repeat:
- template: 'r1z1-IP:%PORT%/DEVICE'
- for_each:
- DEVICE: {get_param: SwiftRawDisks}
- params:
- IP:
- get_attr:
- - NetIpMap
- - net_ip_map
- - str_replace:
- template: "NETWORK_uri"
- params:
- NETWORK: {get_param: [ServiceNetMap, SwiftMgmtNetwork]}
- swift_proxy_memcache:
- description: Swift proxy-memcache value
- value:
- str_replace:
- template: "IP:11211"
- params:
- IP:
- get_attr:
- - NetIpMap
- - net_ip_map
- - str_replace:
- template: "NETWORK_uri"
- params:
- NETWORK: {get_param: [ServiceNetMap, MemcachedNetwork]}
tls_key_modulus_md5:
description: MD5 checksum of the TLS Key Modulus
value: {get_attr: [NodeTLSData, key_modulus_md5]}
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
index aa5c3c43..6a2ea4d5 100644
--- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -4,15 +4,7 @@ description: Configure hieradata for all MidoNet nodes
parameters:
# Parameters passed from the parent template
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
EnableZookeeperOnController:
@@ -102,10 +94,10 @@ resources:
type: OS::Heat::StructuredDeploymentGroup
properties:
config: {get_resource: NetworkMidoNetConfig}
- servers: {get_param: controller_servers}
+ servers: {get_param: [servers, Controller]}
NetworkMidonetDeploymentComputes:
type: OS::Heat::StructuredDeploymentGroup
properties:
config: {get_resource: NetworkMidoNetConfig}
- servers: {get_param: compute_servers}
+ servers: {get_param: [servers, Compute]}
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index e924fc87..7bda0cd5 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -4,15 +4,7 @@ description: Configure hieradata for Network Cisco configuration
parameters:
# Parameters passed from the parent template
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
# extra parameters passed via parameter_defaults
@@ -140,7 +132,7 @@ resources:
properties:
name: NetworkCiscoDeployment
config: {get_resource: NetworkCiscoConfig}
- servers: {get_param: controller_servers}
+ servers: {get_param: [servers, Controller]}
input_values:
UCSM_ip: {get_param: NetworkUCSMIp}
UCSM_username: {get_param: NetworkUCSMUsername}
@@ -187,7 +179,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsController
- servers: {get_param: controller_servers}
+ servers: {get_param: [servers, Controller]}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -195,7 +187,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsCompute
- servers: {get_param: compute_servers}
+ servers: {get_param: [servers, Compute]}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -203,7 +195,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsBlockStorage
- servers: {get_param: blockstorage_servers}
+ servers: {get_param: [servers, BlockStorage]}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -211,7 +203,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsObjectStorage
- servers: {get_param: objectstorage_servers}
+ servers: {get_param: [servers, ObjectStorage]}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -219,7 +211,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsCephStorage
- servers: {get_param: cephstorage_servers}
+ servers: {get_param: [servers, CephStorage]}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -294,7 +286,7 @@ resources:
type: OS::Heat::SoftwareDeployment
properties:
name: MappingToNexusDeploymentsController
- server: {get_param: [controller_servers, '0']}
+ server: {get_param: [servers, Controller, '0']}
config: {get_resource: MappingToNexusConfig}
input_values:
# FIXME(shardy): It'd be more convenient if we could join these
@@ -338,7 +330,7 @@ resources:
depends_on: MappingToNexusDeploymentsController
properties:
name: MappingToUCSMDeploymentsController
- server: {get_param: [controller_servers, '0']}
+ server: {get_param: [servers, Controller, '0']}
config: {get_resource: MappingToUCSMConfig}
input_values:
ucsm_config: {get_param: NetworkUCSMHostList}
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
index e496553a..f5b1f0e6 100644
--- a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
@@ -32,6 +32,18 @@ resources:
contrail::vrouter::provision_vrouter::keystone_admin_tenant_name: admin
contrail::vrouter::provision_vrouter::keystone_admin_password: '"%{::admin_password}"'
+ contrail::vnc_api::vnc_api_config:
+ 'auth/AUTHN_TYPE':
+ value: keystone
+ 'auth/AUTHN_PROTOCOL':
+ value: http
+ 'auth/AUTHN_SERVER':
+ value: "%{hiera('keystone_admin_api_vip')}"
+ 'auth/AUTHN_PORT':
+ value: 35357
+ 'auth/AUTHN_URL':
+ value: '/v2.0/tokens'
+
ComputeContrailDeployment:
type: OS::Heat::StructuredDeployment
properties:
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
index 6ff90881..48446e5a 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
@@ -70,12 +70,22 @@ parameters:
CinderNetappStoragePools:
type: string
default: ''
- CinderNetappEseriesHostType:
+ CinderNetappHostType:
type: string
- default: 'linux_dm_mp'
+ default: ''
CinderNetappWebservicePath:
type: string
default: '/devmgr/v2'
+ # DEPRECATED options for compatibility with older versions
+ CinderNetappEseriesHostType:
+ type: string
+ default: 'linux_dm_mp'
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - CinderNetappEseriesHostType
resources:
CinderNetappConfig:
@@ -108,7 +118,7 @@ resources:
cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps}
cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword}
cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools}
- cinder::backend::netapp::netapp_eseries_host_type: {get_input: NetappEseriesHostType}
+ cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType}
cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath}
CinderNetappDeployment:
@@ -139,7 +149,7 @@ resources:
NetappControllerIps: {get_param: CinderNetappControllerIps}
NetappSaPassword: {get_param: CinderNetappSaPassword}
NetappStoragePools: {get_param: CinderNetappStoragePools}
- NetappEseriesHostType: {get_param: CinderNetappEseriesHostType}
+ NetappHostType: {get_param: CinderNetappHostType}
NetappWebservicePath: {get_param: CinderNetappWebservicePath}
outputs:
diff --git a/puppet/extraconfig/tls/tls-cert-inject.yaml b/puppet/extraconfig/tls/tls-cert-inject.yaml
index e281ef51..49d84574 100644
--- a/puppet/extraconfig/tls/tls-cert-inject.yaml
+++ b/puppet/extraconfig/tls/tls-cert-inject.yaml
@@ -64,11 +64,9 @@ resources:
| openssl md5 | cut -c 10- \
> ${heat_outputs_path}.key_modulus
# We need to reload haproxy in case the certificate changed because
- # puppet doesn't know the contents of the cert file. The pacemaker
- # case is handled separately in a pacemaker-specific resource.
- pacemaker_status=$(systemctl is-active pacemaker)
+ # puppet doesn't know the contents of the cert file.
haproxy_status=$(systemctl is-active haproxy)
- if [ "$pacemaker_status" != "active" -a "$haproxy_status" = "active"]; then
+ if [ "$haproxy_status" = "active" ]; then
systemctl reload haproxy
fi
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
deleted file mode 100644
index 2653badf..00000000
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-if hiera('step') >= 4 {
- hiera_include('ceph_classes', [])
-}
-
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_ceph', hiera('step')])
-package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
deleted file mode 100644
index f96c193c..00000000
--- a/puppet/manifests/overcloud_compute.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-if hiera('step') >= 4 {
- hiera_include('compute_classes', [])
-}
-
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_compute', hiera('step')])
-package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp
deleted file mode 100644
index 414a06ba..00000000
--- a/puppet/manifests/overcloud_object.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-if hiera('step') >= 4 {
- hiera_include('object_classes', [])
-}
-
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_object', hiera('step')])
-package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_role.pp
index 25bdbfb2..1a59620c 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_role.pp
@@ -13,9 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+# The content of this file will be used to generate
+# the puppet manifests for all roles, the placeholder
+# __ROLE__ will be replaced by 'controller', 'blockstorage',
+# 'cephstorage' and all the deployed roles.
+
if hiera('step') >= 4 {
- hiera_include('controller_classes', [])
+ hiera_include('__ROLE___classes', [])
}
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')])
+$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud___ROLE__', hiera('step')])
package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp
deleted file mode 100644
index e1cdadd5..00000000
--- a/puppet/manifests/overcloud_volume.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-if hiera('step') >= 4 {
- hiera_include('volume_classes', [])
-}
-
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_volume', hiera('step')])
-package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/swift-storage.yaml b/puppet/objectstorage-role.yaml
index b933c542..d7681d10 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -27,6 +27,11 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
Hostname:
type: string
default: '' # Defaults to Heat created hostname
@@ -84,14 +89,13 @@ parameters:
ServiceNames:
type: comma_delimited_list
default: []
+ MonitoringSubscriptions:
+ type: comma_delimited_list
+ default: []
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
- SwiftRawDisks:
- default: {}
- description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
- type: json
resources:
@@ -234,14 +238,16 @@ resources:
- service_names
- service_configs
- object
- - swift_devices_and_proxy # provided by SwiftDevicesAndProxyConfig
+ - bootstrap_node # provided by allNodesConfig
- all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
- '"%{::osfamily}"'
merge_behavior: deeper
datafiles:
service_names:
mapped_data:
service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
service_configs:
mapped_data:
map_replace:
@@ -300,6 +306,51 @@ outputs:
hostname:
description: Hostname of the server
value: {get_attr: [SwiftStorage, name]}
+ hostname_map:
+ description: Mapping of network names to hostnames
+ value:
+ external:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - external
+ - {get_param: CloudDomain}
+ internal_api:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ storage:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storage
+ - {get_param: CloudDomain}
+ storage_mgmt:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ tenant:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ management:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - management
+ - {get_param: CloudDomain}
+ ctlplane:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
hosts_entry:
value:
str_replace:
@@ -311,6 +362,7 @@ outputs:
STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
TENANTIP TENANTHOST.DOMAIN TENANTHOST
MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
+ CTLPLANEIP CTLPLANEHOST.DOMAIN CTLPLANEHOST
params:
PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]}
DOMAIN: {get_param: CloudDomain}
@@ -351,31 +403,16 @@ outputs:
- '.'
- - {get_attr: [SwiftStorage, name]}
- management
+ CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - ctlplane
nova_server_resource:
description: Heat resource handle for the swift storage server
value:
{get_resource: SwiftStorage}
- swift_device:
- description: Swift device formatted for swift-ring-builder
- value:
- str_replace:
- template:
- list_join:
- - ','
- - ['r1z1-IP:%PORT%/d1']
- - repeat:
- template: 'r1z1-IP:%PORT%/DEVICE'
- for_each:
- DEVICE: {get_param: SwiftRawDisks}
- params:
- IP:
- get_attr:
- - NetIpMap
- - net_ip_map
- - str_replace:
- template: "NETWORK_uri"
- params:
- NETWORK: {get_param: [ServiceNetMap, SwiftMgmtNetwork]}
external_ip_address:
description: IP address of the server in the external network
value: {get_attr: [ExternalPort, ip_address]}
diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml
new file mode 100644
index 00000000..65c96ac2
--- /dev/null
+++ b/puppet/post.j2.yaml
@@ -0,0 +1,139 @@
+heat_template_version: 2016-10-14
+
+description: >
+ Post-deploy configuration steps via puppet for all roles,
+ as defined in ../roles_data.yaml
+
+parameters:
+ servers:
+ type: json
+ description: Mapping of Role name e.g Controller to a list of servers
+
+ role_data:
+ type: json
+ description: Mapping of Role name e.g Controller to the per-role data
+
+ DeployIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+
+resources:
+
+{% for role in roles %}
+ # Post deployment steps for all roles
+ # A single config is re-applied with an incrementing step number
+ # {{role.name}} Role steps
+ {{role.name}}ArtifactsConfig:
+ type: deploy-artifacts.yaml
+
+ {{role.name}}ArtifactsDeploy:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ArtifactsConfig}
+
+ {{role.name}}PreConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PreConfig
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Config:
+ type: OS::TripleO::{{role.name}}Config
+ properties:
+ StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
+
+ # Step through a series of configuration steps
+ {{role.name}}Deployment_Step1:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ properties:
+ name: {{role.name}}Deployment_Step1
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 1
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step2:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step1
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step2
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 2
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step3:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step2
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step3
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 3
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step4:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step3
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step4
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 4
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step5:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step4
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step5
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 5
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}PostConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PostConfig
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ {% endfor %}
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ # Note, this should come last, so use depends_on to ensure
+ # this is created after any other resources.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}PostConfig
+ {% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+{% endfor %}
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
new file mode 100644
index 00000000..e4307001
--- /dev/null
+++ b/puppet/role.role.j2.yaml
@@ -0,0 +1,452 @@
+heat_template_version: 2016-10-14
+description: 'OpenStack {{role}} node configured by Puppet'
+parameters:
+ Overcloud{{role}}Flavor:
+ description: Flavor for the {{role}} node.
+ default: baremetal
+ type: string
+ constraints:
+ - custom_constraint: nova.flavor
+ {{role}}Image:
+ type: string
+ default: overcloud-full
+ constraints:
+ - custom_constraint: glance.image
+ ImageUpdatePolicy:
+ default: 'REBUILD_PRESERVE_EPHEMERAL'
+ description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
+ type: string
+ KeyName:
+ description: Name of an existing Nova key pair to enable SSH access to the instances
+ type: string
+ default: default
+ constraints:
+ - custom_constraint: nova.keypair
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ UpdateIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting to a previously unused value during stack-update will trigger
+ package update on all nodes
+ Hostname:
+ type: string
+ default: '' # Defaults to Heat created hostname
+ HostnameMap:
+ type: json
+ default: {}
+ description: Optional mapping to override hostnames
+ ExtraConfig:
+ default: {}
+ description: |
+ Additional hiera configuration to inject into the cluster. Note
+ that {{role}}ExtraConfig takes precedence over ExtraConfig.
+ type: json
+ {{role}}ExtraConfig:
+ default: {}
+ description: |
+ Role specific additional hiera configuration to inject into the cluster.
+ type: json
+ {{role}}IPs:
+ default: {}
+ type: json
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
+ CloudDomain:
+ type: string
+ description: >
+ The DNS domain used for the hosts. This should match the dhcp_domain
+ configured in the Undercloud neutron. Defaults to localdomain.
+ ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API.
+ type: json
+ {{role}}SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
+ NodeIndex:
+ type: number
+ default: 0
+ ServiceConfigSettings:
+ type: json
+ default: {}
+ ServiceNames:
+ type: comma_delimited_list
+ default: []
+ MonitoringSubscriptions:
+ type: comma_delimited_list
+ default: []
+ ConfigCommand:
+ type: string
+ description: Command which will be run whenever configuration data changes
+ default: os-refresh-config --timeout 14400
+ LoggingSources:
+ type: json
+ default: []
+ LoggingGroups:
+ type: comma_delimited_list
+ default: []
+
+resources:
+ {{role}}:
+ type: OS::TripleO::Server
+ metadata:
+ os-collect-config:
+ command: {get_param: ConfigCommand}
+ properties:
+ image: {get_param: {{role}}Image}
+ image_update_policy: {get_param: ImageUpdatePolicy}
+ flavor: {get_param: Overcloud{{role}}Flavor}
+ key_name: {get_param: KeyName}
+ networks:
+ - network: ctlplane
+ user_data_format: SOFTWARE_CONFIG
+ user_data: {get_resource: UserData}
+ name:
+ str_replace:
+ template: {get_param: Hostname}
+ params: {get_param: HostnameMap}
+ software_config_transport: {get_param: SoftwareConfigTransport}
+ metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: {{role}}SchedulerHints}
+
+ # Combine the NodeAdminUserData and NodeUserData mime archives
+ UserData:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: {get_resource: NodeAdminUserData}
+ type: multipart
+ - config: {get_resource: NodeUserData}
+ type: multipart
+
+ # Creates the "heat-admin" user if configured via the environment
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ NodeAdminUserData:
+ type: OS::TripleO::NodeAdminUserData
+
+ # For optional operator additional userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ NodeUserData:
+ type: OS::TripleO::NodeUserData
+
+ ExternalPort:
+ type: OS::TripleO::{{role}}::Ports::ExternalPort
+ properties:
+ ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ IPPool: {get_param: {{role}}IPs}
+ NodeIndex: {get_param: NodeIndex}
+
+ InternalApiPort:
+ type: OS::TripleO::{{role}}::Ports::InternalApiPort
+ properties:
+ ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ IPPool: {get_param: {{role}}IPs}
+ NodeIndex: {get_param: NodeIndex}
+
+ StoragePort:
+ type: OS::TripleO::{{role}}::Ports::StoragePort
+ properties:
+ ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ IPPool: {get_param: {{role}}IPs}
+ NodeIndex: {get_param: NodeIndex}
+
+ StorageMgmtPort:
+ type: OS::TripleO::{{role}}::Ports::StorageMgmtPort
+ properties:
+ ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ IPPool: {get_param: {{role}}IPs}
+ NodeIndex: {get_param: NodeIndex}
+
+ TenantPort:
+ type: OS::TripleO::{{role}}::Ports::TenantPort
+ properties:
+ ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ IPPool: {get_param: {{role}}IPs}
+ NodeIndex: {get_param: NodeIndex}
+
+ ManagementPort:
+ type: OS::TripleO::{{role}}::Ports::ManagementPort
+ properties:
+ ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ IPPool: {get_param: {{role}}IPs}
+ NodeIndex: {get_param: NodeIndex}
+
+ NetworkConfig:
+ type: OS::TripleO::{{role}}::Net::SoftwareConfig
+ properties:
+ ControlPlaneIp: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
+ StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
+ StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
+
+ NetIpMap:
+ type: OS::TripleO::Network::Ports::NetIpMap
+ properties:
+ ControlPlaneIp: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
+ ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
+ InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
+ InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
+ StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
+ StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
+ StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+ StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
+ TenantIp: {get_attr: [TenantPort, ip_address]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
+ ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+
+ NetworkDeployment:
+ type: OS::TripleO::SoftwareDeployment
+ properties:
+ name: NetworkDeployment
+ config: {get_resource: NetworkConfig}
+ server: {get_resource: {{role}}}
+ actions: {get_param: NetworkDeploymentActions}
+
+ {{role}}Deployment:
+ type: OS::Heat::StructuredDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: {{role}}Deployment
+ config: {get_resource: {{role}}Config}
+ server: {get_resource: {{role}}}
+ input_values:
+ enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+
+ {{role}}Config:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ hierarchy:
+ - '"%{::uuid}"'
+ - heat_config_%{::deploy_config_name}
+ - {{role.lower()}}_extraconfig
+ - extraconfig
+ - service_names
+ - service_configs
+ - bootstrap_node # provided by allNodesConfig
+ - all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
+ - '"%{::osfamily}"'
+ merge_behavior: deeper
+ datafiles:
+ service_names:
+ mapped_data:
+ service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
+ service_configs:
+ mapped_data:
+ map_replace:
+ - {get_param: ServiceConfigSettings}
+ - values: {get_attr: [NetIpMap, net_ip_map]}
+ {{role.lower()}}_extraconfig:
+ mapped_data: {get_param: {{role}}ExtraConfig}
+ extraconfig:
+ mapped_data: {get_param: ExtraConfig}
+ {{role.lower()}}:
+ mapped_data:
+ tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: LoggingSources}
+ tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: LoggingGroups}
+
+ # Resource for site-specific injection of root certificate
+ NodeTLSCAData:
+ depends_on: {{role}}Deployment
+ type: OS::TripleO::NodeTLSCAData
+ properties:
+ server: {get_resource: {{role}}}
+
+ # Hook for site-specific additional pre-deployment config, e.g extra hieradata
+ {{role}}ExtraConfigPre:
+ depends_on: {{role}}Deployment
+ type: OS::TripleO::{{role}}ExtraConfigPre
+ properties:
+ server: {get_resource: {{role}}}
+
+ # Hook for site-specific additional pre-deployment config,
+ # applying to all nodes, e.g node registration/unregistration
+ NodeExtraConfig:
+ depends_on: [{{role}}ExtraConfigPre, NodeTLSCAData]
+ type: OS::TripleO::NodeExtraConfig
+ properties:
+ server: {get_resource: {{role}}}
+
+ UpdateConfig:
+ type: OS::TripleO::Tasks::PackageUpdate
+
+ UpdateDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: UpdateConfig}
+ server: {get_resource: {{role}}}
+ input_values:
+ update_identifier:
+ get_param: UpdateIdentifier
+
+outputs:
+ ip_address:
+ description: IP address of the server in the ctlplane network
+ value: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ hostname:
+ description: Hostname of the server
+ value: {get_attr: [{{role}}, name]}
+ hostname_map:
+ description: Mapping of network names to hostnames
+ value:
+ external:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - external
+ - {get_param: CloudDomain}
+ internal_api:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ storage:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storage
+ - {get_param: CloudDomain}
+ storage_mgmt:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ tenant:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ management:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - management
+ - {get_param: CloudDomain}
+ ctlplane:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ hosts_entry:
+ value:
+ str_replace:
+ template: |
+ PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST
+ EXTERNALIP EXTERNALHOST.DOMAIN EXTERNALHOST
+ INTERNAL_APIIP INTERNAL_APIHOST.DOMAIN INTERNAL_APIHOST
+ STORAGEIP STORAGEHOST.DOMAIN STORAGEHOST
+ STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
+ TENANTIP TENANTHOST.DOMAIN TENANTHOST
+ MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
+ CTLPLANEIP CTLPLANEHOST.DOMAIN CTLPLANEHOST
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [{{role}}, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - external
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - internalapi
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storage
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storagemgmt
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - tenant
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - management
+ CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ CTLPLANEHOST:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - ctlplane
+ nova_server_resource:
+ description: Heat resource handle for {{role}} server
+ value:
+ {get_resource: {{role}}}
+ external_ip_address:
+ description: IP address of the server in the external network
+ value: {get_attr: [ExternalPort, ip_address]}
+ internal_api_ip_address:
+ description: IP address of the server in the internal_api network
+ value: {get_attr: [InternalApiPort, ip_address]}
+ storage_ip_address:
+ description: IP address of the server in the storage network
+ value: {get_attr: [StoragePort, ip_address]}
+ storage_mgmt_ip_address:
+ description: IP address of the server in the storage_mgmt network
+ value: {get_attr: [StorageMgmtPort, ip_address]}
+ tenant_ip_address:
+ description: IP address of the server in the tenant network
+ value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index 15c8c1f1..8fe51fa3 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -31,6 +31,8 @@ are re-asserted when applying latter ones.
* config_settings: Custom hiera settings for this service.
+ * global_config_settings: Additional hiera settings distributed to all roles.
+
* step_config: A puppet manifest that is used to step through the deployment
sequence. Each sequence is given a "step" (via hiera('step') that provides
information for when puppet classes should activate themselves.
diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml
index 4308052b..da043c80 100644
--- a/puppet/services/aodh-api.yaml
+++ b/puppet/services/aodh-api.yaml
@@ -18,6 +18,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionAodhApi:
+ default: 'overcloud-ceilometer-aodh-api'
+ type: string
+ EnableCombinationAlarms:
+ default: false
+ description: Combination alarms are deprecated in Newton, hence disabled
+ by default. To enable, set this parameter to true.
+ type: boolean
resources:
AodhBase:
@@ -27,16 +35,32 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
outputs:
role_data:
description: Role data for the Aodh API service.
value:
service_name: aodh_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionAodhApi}
config_settings:
map_merge:
- get_attr: [AodhBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- aodh::wsgi::apache::ssl: false
+ aodh::wsgi::apache::servername:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, AodhApiNetwork]}
aodh::api::service_name: 'httpd'
+ aodh::api::enable_proxy_headers_parsing: true
tripleo.aodh_api.firewall_rules:
'128 aodh-api':
dport:
@@ -50,5 +74,8 @@ outputs:
# internal_api_subnet - > IP/CIDR
aodh::api::host: {get_param: [ServiceNetMap, AodhApiNetwork]}
aodh::wsgi::apache::bind_host: {get_param: [ServiceNetMap, AodhApiNetwork]}
+ tripleo::profile::base::aodh::api::enable_combination_alarms: {get_param: EnableCombinationAlarms}
+ service_config_settings:
+ get_attr: [AodhBase, role_data, service_config_settings]
step_config: |
include tripleo::profile::base::aodh::api
diff --git a/puppet/services/aodh-base.yaml b/puppet/services/aodh-base.yaml
index 15f81953..0e2410f7 100644
--- a/puppet/services/aodh-base.yaml
+++ b/puppet/services/aodh-base.yaml
@@ -59,14 +59,7 @@ outputs:
value:
service_name: aodh_base
config_settings:
- aodh::evaluator::coordination_url:
- list_join:
- - ''
- - - 'redis://:'
- - {get_param: RedisPassword}
- - '@'
- - "%{hiera('redis_vip')}"
- - ':6379/'
+ aodh_redis_password: {get_param: RedisPassword}
aodh::db::database_connection:
list_join:
- ''
@@ -77,7 +70,7 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/aodh'
aodh::debug: {get_param: Debug}
- aodh::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ aodh::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::rabbit_userid: {get_param: RabbitUserName}
aodh::rabbit_password: {get_param: RabbitPassword}
aodh::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
@@ -87,18 +80,21 @@ outputs:
aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
aodh::auth::auth_password: {get_param: AodhPassword}
- aodh::keystone::auth::public_url: {get_param: [EndpointMap, AodhPublic, uri]}
- aodh::keystone::auth::internal_url: {get_param: [EndpointMap, AodhInternal, uri]}
- aodh::keystone::auth::admin_url: {get_param: [EndpointMap, AodhAdmin, uri]}
- aodh::keystone::auth::password: {get_param: AodhPassword}
- aodh::keystone::auth::region: {get_param: KeystoneRegion}
- aodh::keystone::auth::tenant: 'service'
- aodh::db::mysql::user: aodh
- aodh::db::mysql::password: {get_param: AodhPassword}
- aodh::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- aodh::db::mysql::dbname: aodh
- aodh::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
aodh::auth::auth_region: 'regionOne'
aodh::auth::auth_tenant_name: 'service'
+ service_config_settings:
+ keystone:
+ aodh::keystone::auth::public_url: {get_param: [EndpointMap, AodhPublic, uri]}
+ aodh::keystone::auth::internal_url: {get_param: [EndpointMap, AodhInternal, uri]}
+ aodh::keystone::auth::admin_url: {get_param: [EndpointMap, AodhAdmin, uri]}
+ aodh::keystone::auth::password: {get_param: AodhPassword}
+ aodh::keystone::auth::region: {get_param: KeystoneRegion}
+ aodh::keystone::auth::tenant: 'service'
+ mysql:
+ aodh::db::mysql::user: aodh
+ aodh::db::mysql::password: {get_param: AodhPassword}
+ aodh::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ aodh::db::mysql::dbname: aodh
+ aodh::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/aodh-evaluator.yaml b/puppet/services/aodh-evaluator.yaml
index 3988c940..405c500e 100644
--- a/puppet/services/aodh-evaluator.yaml
+++ b/puppet/services/aodh-evaluator.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionAodhEvaluator:
+ default: 'overcloud-ceilometer-aodh-evaluator'
+ type: string
resources:
AodhBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Aodh Evaluator service.
value:
service_name: aodh_evaluator
+ monitoring_subscription: {get_param: MonitoringSubscriptionAodhEvaluator}
config_settings:
get_attr: [AodhBase, role_data, config_settings]
step_config: |
diff --git a/puppet/services/aodh-listener.yaml b/puppet/services/aodh-listener.yaml
index bc1ccde7..fc4e8b39 100644
--- a/puppet/services/aodh-listener.yaml
+++ b/puppet/services/aodh-listener.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionAodhListener:
+ default: 'overcloud-ceilometer-aodh-listener'
+ type: string
resources:
AodhBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Aodh Listener service.
value:
service_name: aodh_listener
+ monitoring_subscription: {get_param: MonitoringSubscriptionAodhListener}
config_settings:
get_attr: [AodhBase, role_data, config_settings]
step_config: |
diff --git a/puppet/services/aodh-notifier.yaml b/puppet/services/aodh-notifier.yaml
index 66e9f3e9..2e51c639 100644
--- a/puppet/services/aodh-notifier.yaml
+++ b/puppet/services/aodh-notifier.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionAodhNotifier:
+ default: 'overcloud-ceilometer-aodh-notifier'
+ type: string
resources:
AodhBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Aodh Notifier service.
value:
service_name: aodh_notifier
+ monitoring_subscription: {get_param: MonitoringSubscriptionAodhNotifier}
config_settings:
get_attr: [AodhBase, role_data, config_settings]
step_config: |
diff --git a/puppet/services/apache-internal-tls-certmonger.yaml b/puppet/services/apache-internal-tls-certmonger.yaml
new file mode 100644
index 00000000..87e53f13
--- /dev/null
+++ b/puppet/services/apache-internal-tls-certmonger.yaml
@@ -0,0 +1,50 @@
+heat_template_version: 2016-10-14
+
+description: >
+ Apache service TLS configurations.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ # The following parameters are not needed by the template but are
+ # required to pass the pep8 tests
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the Apache role.
+ value:
+ service_name: apache_internal_tls_certmonger
+ config_settings:
+ generate_service_certificates: true
+ apache_certificates_specs:
+ map_merge:
+ repeat:
+ template:
+ httpd-NETWORK:
+ service_certificate: '/etc/pki/tls/certs/httpd-NETWORK.crt'
+ service_key: '/etc/pki/tls/private/httpd-NETWORK.key'
+ hostname: "%{::fqdn_NETWORK}"
+ principal: "HTTP/%{::fqdn_NETWORK}"
+ for_each:
+ NETWORK:
+ # NOTE(jaosorior) Get unique network names to create
+ # certificates for those. We skip the tenant network since
+ # we don't need a certificate for that, and the external
+ # network will be handled in another template.
+ yaql:
+ expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
+ data:
+ map:
+ get_param: ServiceNetMap
diff --git a/puppet/services/apache.yaml b/puppet/services/apache.yaml
new file mode 100644
index 00000000..382e0ff9
--- /dev/null
+++ b/puppet/services/apache.yaml
@@ -0,0 +1,66 @@
+heat_template_version: 2016-10-14
+
+description: >
+ Apache service configured with Puppet. Note this is typically included
+ automatically via other services which run via Apache.
+
+parameters:
+ ApacheMaxRequestWorkers:
+ default: 256
+ description: Maximum number of simultaneously processed requests.
+ type: number
+ ApacheServerLimit:
+ default: 256
+ description: Maximum number of Apache processes.
+ type: number
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+
+resources:
+
+ ApacheTLS:
+ type: OS::TripleO::Services::ApacheTLS
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+
+outputs:
+ role_data:
+ description: Role data for the Apache role.
+ value:
+ service_name: apache
+ config_settings:
+ map_merge:
+ - get_attr: [ApacheTLS, role_data, config_settings]
+ -
+ # for the given network; replacement examples (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ apache::ip: {get_param: [ServiceNetMap, ApacheNetwork]}
+ apache::server_signature: 'Off'
+ apache::server_tokens: 'Prod'
+ apache_remote_proxy_ips_network:
+ str_replace:
+ template: "NETWORK_subnet"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, ApacheNetwork]}
+ apache::mod::prefork::maxclients: { get_param: ApacheMaxRequestWorkers }
+ apache::mod::prefork::serverlimit: { get_param: ApacheServerLimit }
+ apache::mod::remoteip::proxy_ips:
+ - "%{hiera('apache_remote_proxy_ips_network')}"
diff --git a/puppet/services/ceilometer-agent-central.yaml b/puppet/services/ceilometer-agent-central.yaml
index 72bad632..c4abc307 100644
--- a/puppet/services/ceilometer-agent-central.yaml
+++ b/puppet/services/ceilometer-agent-central.yaml
@@ -22,6 +22,14 @@ parameters:
description: The password for the redis service account.
type: string
hidden: true
+ MonitoringSubscriptionCeilometerCentral:
+ default: 'overcloud-ceilometer-agent-central'
+ type: string
+ CeilometerAgentCentralLoggingSource:
+ type: json
+ default:
+ tag: openstack.ceilometer.agent.central
+ path: /var/log/ceilometer/central.log
resources:
CeilometerServiceBase:
@@ -36,16 +44,13 @@ outputs:
description: Role data for the Ceilometer Central Agent role.
value:
service_name: ceilometer_agent_central
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCentral}
+ logging_source: {get_param: CeilometerAgentCentralLoggingSource}
+ logging_groups:
+ - ceilometer
config_settings:
map_merge:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
- - ceilometer::agent::central::coordination_url:
- list_join:
- - ''
- - - 'redis://:'
- - {get_param: RedisPassword}
- - '@'
- - "%{hiera('redis_vip')}"
- - ':6379/'
+ - ceilometer_redis_password: {get_param: RedisPassword}
step_config: |
include ::tripleo::profile::base::ceilometer::agent::central
diff --git a/puppet/services/ceilometer-agent-compute.yaml b/puppet/services/ceilometer-agent-compute.yaml
index 5bfecfed..5457539c 100644
--- a/puppet/services/ceilometer-agent-compute.yaml
+++ b/puppet/services/ceilometer-agent-compute.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCeilometerCompute:
+ default: 'overcloud-ceilometer-agent-compute'
+ type: string
resources:
CeilometerServiceBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Ceilometer Compute Agent role.
value:
service_name: ceilometer_agent_compute
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCompute}
config_settings:
get_attr: [CeilometerServiceBase, role_data, config_settings]
step_config: |
diff --git a/puppet/services/ceilometer-agent-notification.yaml b/puppet/services/ceilometer-agent-notification.yaml
index 7873706d..ea403aa1 100644
--- a/puppet/services/ceilometer-agent-notification.yaml
+++ b/puppet/services/ceilometer-agent-notification.yaml
@@ -18,6 +18,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCeilometerNotification:
+ default: 'overcloud-ceilometer-agent-notification'
+ type: string
+ CeilometerAgentNotificationLoggingSource:
+ type: json
+ default:
+ tag: openstack.ceilometer.agent.notification
+ path: /var/log/ceilometer/agent-notification.log
resources:
@@ -33,6 +41,10 @@ outputs:
description: Role data for the Ceilometer Notification Agent role.
value:
service_name: ceilometer_agent_notification
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerNotification}
+ logging_source: {get_param: CeilometerAgentNotificationLoggingSource}
+ logging_groups:
+ - ceilometer
config_settings:
get_attr: [CeilometerServiceBase, role_data, config_settings]
step_config: |
diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml
index 201a2b7b..27c32bfd 100644
--- a/puppet/services/ceilometer-api.yaml
+++ b/puppet/services/ceilometer-api.yaml
@@ -18,6 +18,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCeilometerApi:
+ default: 'overcloud-ceilometer-api'
+ type: string
+ CeilometerApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.ceilometer.api
+ path: /var/log/ceilometer/api.log
resources:
@@ -28,13 +36,25 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
outputs:
role_data:
description: Role data for the Ceilometer API role.
value:
service_name: ceilometer_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerApi}
+ logging_source: {get_param: CeilometerApiLoggingSource}
+ logging_groups:
+ - ceilometer
config_settings:
map_merge:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- get_attr: [CeilometerServiceBase, role_data, config_settings]
- tripleo.ceilometer_api.firewall_rules:
'124 ceilometer':
@@ -48,8 +68,17 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- ceilometer::api::service_name: 'httpd'
+ ceilometer::api::enable_proxy_headers_parsing: true
ceilometer::api::host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
ceilometer::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
ceilometer::wsgi::apache::ssl: false
+ ceilometer::wsgi::apache::servername:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
+ service_config_settings:
+ get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::api
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
index 62fdd5c1..4ace7526 100644
--- a/puppet/services/ceilometer-base.yaml
+++ b/puppet/services/ceilometer-base.yaml
@@ -101,29 +101,32 @@ outputs:
ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
ceilometer::agent::auth::auth_tenant_name: 'service'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
- ceilometer::db::mysql::password: {get_param: CeilometerPassword}
ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]}
ceilometer::dispatcher::gnocchi::filter_project: 'service'
ceilometer::dispatcher::gnocchi::archive_policy: 'low'
ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml'
- ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
- ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
- ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
- ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
- ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
- ceilometer::keystone::auth::tenant: 'service'
ceilometer::rabbit_userid: {get_param: RabbitUserName}
ceilometer::rabbit_password: {get_param: RabbitPassword}
ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
ceilometer::rabbit_port: {get_param: RabbitClientPort}
- ceilometer::db::mysql::user: ceilometer
- ceilometer::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- ceilometer::db::mysql::dbname: ceilometer
- ceilometer::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
ceilometer::rabbit_heartbeat_timeout_threshold: 60
ceilometer::db::database_db_max_retries: -1
ceilometer::db::database_max_retries: -1
ceilometer::telemetry_secret: {get_param: CeilometerMeteringSecret}
+ service_config_settings:
+ keystone:
+ ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
+ ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
+ ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
+ ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
+ ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
+ ceilometer::keystone::auth::tenant: 'service'
+ mysql:
+ ceilometer::db::mysql::password: {get_param: CeilometerPassword}
+ ceilometer::db::mysql::user: ceilometer
+ ceilometer::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ ceilometer::db::mysql::dbname: ceilometer
+ ceilometer::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/ceilometer-collector.yaml b/puppet/services/ceilometer-collector.yaml
index ef7ffbd6..e3f1ef4e 100644
--- a/puppet/services/ceilometer-collector.yaml
+++ b/puppet/services/ceilometer-collector.yaml
@@ -18,6 +18,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCeilometerCollector:
+ default: 'overcloud-ceilometer-collector'
+ type: string
+ CeilometerCollectorLoggingSource:
+ type: json
+ default:
+ tag: openstack.ceilometer.collector
+ path: /var/log/ceilometer/collector.log
resources:
CeilometerServiceBase:
@@ -27,12 +35,27 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ MongoDbBase:
+ type: ./database/mongodb-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
outputs:
role_data:
description: Role data for the Ceilometer Collector role.
value:
service_name: ceilometer_collector
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCollector}
+ logging_source: {get_param: CeilometerCollectorLoggingSource}
+ logging_groups:
+ - ceilometer
config_settings:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
+ map_merge:
+ - get_attr: [MongoDbBase, role_data, config_settings]
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ service_config_settings:
+ get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::collector
diff --git a/puppet/services/ceilometer-expirer.yaml b/puppet/services/ceilometer-expirer.yaml
index 63a6d41d..3b811c4d 100644
--- a/puppet/services/ceilometer-expirer.yaml
+++ b/puppet/services/ceilometer-expirer.yaml
@@ -18,7 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
-
+ MonitoringSubscriptionCeilometerExpirer:
+ default: 'overcloud-ceilometer-expirer'
+ type: string
resources:
CeilometerServiceBase:
@@ -33,6 +35,7 @@ outputs:
description: Role data for the Ceilometer Expirer role.
value:
service_name: ceilometer_expirer
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerExpirer}
config_settings:
get_attr: [CeilometerServiceBase, role_data, config_settings]
step_config: |
diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml
index 4d98546d..786e9ddd 100644
--- a/puppet/services/ceph-base.yaml
+++ b/puppet/services/ceph-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
Ceph base service. Shared by all Ceph services.
@@ -20,9 +20,6 @@ parameters:
CephClusterFSID:
type: string
description: The Ceph cluster FSID. Must be a UUID.
- CephIPv6:
- default: False
- type: boolean
CinderRbdPoolName:
default: volumes
type: string
@@ -32,9 +29,20 @@ parameters:
GlanceRbdPoolName:
default: images
type: string
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
GnocchiRbdPoolName:
default: metrics
type: string
+ NovaEnableRbdBackend:
+ default: false
+ description: Whether to enable or not the Rbd backend for Nova
+ type: boolean
NovaRbdPoolName:
default: vms
type: string
@@ -66,19 +74,34 @@ parameter_groups:
parameters:
- ControllerEnableCephStorage
+conditions:
+ glance_multiple_locations:
+ and:
+ - equals:
+ - get_param: GlanceBackend
+ - rbd
+ - equals:
+ - get_param: NovaEnableRbdBackend
+ - true
+
outputs:
role_data:
description: Role data for the Ceph base service.
value:
service_name: ceph_base
config_settings:
- tripleo::profile::base::ceph::ceph_ipv6: {get_param: CephIPv6}
tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage}
ceph::profile::params::osd_pool_default_min_size: 1
ceph::profile::params::osds: {/srv/data: {}}
ceph::profile::params::manage_repo: false
ceph::profile::params::authentication_type: cephx
ceph::profile::params::fsid: {get_param: CephClusterFSID}
+ # FIXME(gfidente): we should not have to list the packages explicitly in the templates,
+ # but this has to stay until https://bugs.launchpad.net/puppet-ceph/+bug/1629933 is fixed
+ ceph::params::packages:
+ - ceph-base
+ - ceph-mon
+ - ceph-osd
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
@@ -93,8 +116,8 @@ outputs:
str_replace:
template: "NETWORK_subnet"
params:
- NETWORK: {get_param: [ServiceNetMap, CephPublicNetwork]}
- ceph::profile::params::public_addr: {get_param: [ServiceNetMap, CephPublicNetwork]}
+ NETWORK: {get_param: [ServiceNetMap, CephMonNetwork]}
+ ceph::profile::params::public_addr: {get_param: [ServiceNetMap, CephMonNetwork]}
ceph::profile::params::client_keys:
str_replace:
template: "{
@@ -126,3 +149,6 @@ outputs:
CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
GLANCE_POOL: {get_param: GlanceRbdPoolName}
GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ service_config_settings:
+ glance_api:
+ glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
diff --git a/puppet/services/ceph-client.yaml b/puppet/services/ceph-client.yaml
index a9e4621a..b482dd2e 100644
--- a/puppet/services/ceph-client.yaml
+++ b/puppet/services/ceph-client.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCephClient:
+ default: 'overcloud-ceph-client'
+ type: string
resources:
CephBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Cinder OSD service.
value:
service_name: ceph_client
+ monitoring_subscription: {get_param: MonitoringSubscriptionCephClient}
config_settings:
get_attr: [CephBase, role_data, config_settings]
step_config: |
diff --git a/puppet/services/ceph-external.yaml b/puppet/services/ceph-external.yaml
index 959cee26..7d75074c 100644
--- a/puppet/services/ceph-external.yaml
+++ b/puppet/services/ceph-external.yaml
@@ -47,12 +47,16 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCephExternal:
+ default: 'overcloud-ceph-external'
+ type: string
outputs:
role_data:
description: Role data for the Ceph External service.
value:
service_name: ceph_external
+ monitoring_subscription: {get_param: MonitoringSubscriptionCephExternal}
config_settings:
tripleo::profile::base::ceph::ceph_mon_host: {get_param: CephExternalMonHost}
ceph::profile::params::fsid: {get_param: CephClusterFSID}
@@ -74,5 +78,7 @@ outputs:
CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
GLANCE_POOL: {get_param: GlanceRbdPoolName}
GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ service_config_settings:
+ get_attr: [CephBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceph::client
diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml
index f634ce8a..3471f16c 100644
--- a/puppet/services/ceph-mon.yaml
+++ b/puppet/services/ceph-mon.yaml
@@ -53,6 +53,9 @@ parameters:
}
default: {}
type: json
+ MonitoringSubscriptionCephMon:
+ default: 'overcloud-ceph-mon'
+ type: string
resources:
CephBase:
@@ -67,11 +70,15 @@ outputs:
description: Role data for the Ceph Monitor service.
value:
service_name: ceph_mon
+ monitoring_subscription: {get_param: MonitoringSubscriptionCephMon}
config_settings:
map_merge:
- get_attr: [CephBase, role_data, config_settings]
- ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
ceph::profile::params::mon_key: {get_param: CephMonKey}
+ ceph::profile::params::osd_pool_default_pg_num: 32
+ ceph::profile::params::osd_pool_default_pgp_num: 32
+ ceph::profile::params::osd_pool_default_size: 3
# repeat returns items in a list, so we need to map_merge twice
tripleo::profile::base::ceph::mon::ceph_pools:
map_merge:
@@ -86,13 +93,15 @@ outputs:
- {get_param: GnocchiRbdPoolName}
template:
<%pool%>:
- pg_num: 32
- pgp_num: 32
- size: 3
+ pg_num: "%{hiera('ceph::profile::params::osd_pool_default_pg_num')}"
+ pgp_num: "%{hiera('ceph::profile::params::osd_pool_default_pgp_num')}"
+ size: "%{hiera('ceph::profile::params::osd_pool_default_size')}"
- {get_param: CephPools}
tripleo.ceph_mon.firewall_rules:
'110 ceph_mon':
dport:
- 6789
+ service_config_settings:
+ get_attr: [CephBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceph::mon
diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml
index d18ccabf..f6378720 100644
--- a/puppet/services/ceph-osd.yaml
+++ b/puppet/services/ceph-osd.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCephOsd:
+ default: 'overcloud-ceph-osd'
+ type: string
resources:
CephBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Cinder OSD service.
value:
service_name: ceph_osd
+ monitoring_subscription: {get_param: MonitoringSubscriptionCephOsd}
config_settings:
map_merge:
- get_attr: [CephBase, role_data, config_settings]
diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml
new file mode 100644
index 00000000..18a4b780
--- /dev/null
+++ b/puppet/services/ceph-rgw.yaml
@@ -0,0 +1,79 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Ceph RadosGW service.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ AdminToken:
+ description: The keystone auth secret and db password.
+ type: string
+ hidden: true
+ CephRgwKey:
+ description: The cephx key for the radosgw client. Can be created
+ with ceph-authtool --gen-print-key.
+ type: string
+ hidden: true
+ SwiftPassword:
+ description: The password for the swift service account, used by the Ceph RGW services.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+
+resources:
+ CephBase:
+ type: ./ceph-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceph RadosGW service.
+ value:
+ service_name: ceph_rgw
+ config_settings:
+ map_merge:
+ - get_attr: [CephBase, role_data, config_settings]
+ - tripleo::profile::base::ceph::rgw::rgw_key: {get_param: CephRgwKey}
+ tripleo::profile::base::ceph::rgw::keystone_admin_token: {get_param: AdminToken}
+ tripleo::profile::base::ceph::rgw::keystone_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ ceph::profile::params::frontend_type: 'civetweb'
+ ceph_rgw_civetweb_bind_address: {get_param: [ServiceNetMap, CephRgwNetwork]}
+ ceph::profile::params::rgw_frontends:
+ list_join:
+ - ''
+ - - 'civetweb port='
+ - '%{hiera("ceph_rgw_civetweb_bind_address")}'
+ - ':'
+ - {get_param: [EndpointMap, CephRgwInternal, port]}
+ tripleo.ceph_rgw.firewall_rules:
+ '122 ceph rgw':
+ dport: {get_param: [EndpointMap, CephRgwInternal, port]}
+ step_config: |
+ include ::tripleo::profile::base::ceph::rgw
+ service_config_settings:
+ keystone:
+ ceph::rgw::keystone::auth::public_url: {get_param: [EndpointMap, CephRgwPublic, uri]}
+ ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
+ ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
+ ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
+ ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
+ ceph::rgw::keystone::auth::tenant: 'service'
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index 5df0739f..3c624e3a 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
OpenStack Cinder API service configured with Puppet
@@ -31,9 +31,31 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ MonitoringSubscriptionCinderApi:
+ default: 'overcloud-cinder-api'
+ type: string
+ CinderApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.cinder.api
+ path: /var/log/cinder/cinder-api.log
+ CinderWorkers:
+ type: string
+ description: Set the number of workers for cinder::wsgi::apache
+ default: '"%{::os_workers}"'
+
+conditions:
+ cinder_workers_zero: {equals : [{get_param: CinderWorkers}, 0]}
resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
CinderBase:
type: ./cinder-base.yaml
properties:
@@ -46,26 +68,20 @@ outputs:
description: Role data for the Cinder API role.
value:
service_name: cinder_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionCinderApi}
+ logging_source: {get_param: CinderApiLoggingSource}
+ logging_groups:
+ - cinder
config_settings:
map_merge:
- get_attr: [CinderBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
cinder::keystone::authtoken::password: {get_param: CinderPassword}
cinder::keystone::authtoken::project_name: 'service'
- cinder::keystone::auth::tenant: 'service'
- cinder::keystone::auth::public_url: {get_param: [EndpointMap, CinderPublic, uri]}
- cinder::keystone::auth::internal_url: {get_param: [EndpointMap, CinderInternal, uri]}
- cinder::keystone::auth::admin_url: {get_param: [EndpointMap, CinderAdmin, uri]}
- cinder::keystone::auth::public_url_v2: {get_param: [EndpointMap, CinderV2Public, uri]}
- cinder::keystone::auth::internal_url_v2: {get_param: [EndpointMap, CinderV2Internal, uri]}
- cinder::keystone::auth::admin_url_v2: {get_param: [EndpointMap, CinderV2Admin, uri]}
- cinder::keystone::auth::public_url_v3: {get_param: [EndpointMap, CinderV3Public, uri]}
- cinder::keystone::auth::internal_url_v3: {get_param: [EndpointMap, CinderV3Internal, uri]}
- cinder::keystone::auth::admin_url_v3: {get_param: [EndpointMap, CinderV3Admin, uri]}
- cinder::keystone::auth::password: {get_param: CinderPassword}
- cinder::keystone::auth::region: {get_param: KeystoneRegion}
cinder::api::enable_proxy_headers_parsing: true
+
cinder::api::nova_catalog_info: 'compute:Compute Service:internalURL'
# TODO(emilien) move it to puppet-cinder
cinder::config:
@@ -84,5 +100,40 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
cinder::api::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
+ cinder::wsgi::apache::ssl: false
+ cinder::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
+ cinder::wsgi::apache::servername:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ -
+ if:
+ - cinder_workers_zero
+ - {}
+ - cinder::wsgi::apache::workers: {get_param: CinderWorkers}
step_config: |
include ::tripleo::profile::base::cinder::api
+ service_config_settings:
+ keystone:
+ cinder::keystone::auth::tenant: 'service'
+ cinder::keystone::auth::public_url: {get_param: [EndpointMap, CinderPublic, uri]}
+ cinder::keystone::auth::internal_url: {get_param: [EndpointMap, CinderInternal, uri]}
+ cinder::keystone::auth::admin_url: {get_param: [EndpointMap, CinderAdmin, uri]}
+ cinder::keystone::auth::public_url_v2: {get_param: [EndpointMap, CinderV2Public, uri]}
+ cinder::keystone::auth::internal_url_v2: {get_param: [EndpointMap, CinderV2Internal, uri]}
+ cinder::keystone::auth::admin_url_v2: {get_param: [EndpointMap, CinderV2Admin, uri]}
+ cinder::keystone::auth::public_url_v3: {get_param: [EndpointMap, CinderV3Public, uri]}
+ cinder::keystone::auth::internal_url_v3: {get_param: [EndpointMap, CinderV3Internal, uri]}
+ cinder::keystone::auth::admin_url_v3: {get_param: [EndpointMap, CinderV3Admin, uri]}
+ cinder::keystone::auth::password: {get_param: CinderPassword}
+ cinder::keystone::auth::region: {get_param: KeystoneRegion}
+ mysql:
+ cinder::db::mysql::password: {get_param: CinderPassword}
+ cinder::db::mysql::user: cinder
+ cinder::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ cinder::db::mysql::dbname: cinder
+ cinder::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/cinder-backup.yaml b/puppet/services/cinder-backup.yaml
index f92fdfdb..80795457 100644
--- a/puppet/services/cinder-backup.yaml
+++ b/puppet/services/cinder-backup.yaml
@@ -30,6 +30,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCinderBackup:
+ default: 'overcloud-cinder-backup'
+ type: string
resources:
@@ -45,6 +48,7 @@ outputs:
description: Role data for the Cinder Backup role.
value:
service_name: cinder_backup
+ monitoring_subscription: {get_param: MonitoringSubscriptionCinderBackup}
config_settings:
map_merge:
- get_attr: [CinderBase, role_data, config_settings]
diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml
index 0db17189..59c9b844 100644
--- a/puppet/services/cinder-base.yaml
+++ b/puppet/services/cinder-base.yaml
@@ -60,20 +60,12 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/cinder'
- cinder::db::mysql::password: {get_param: CinderPassword}
cinder::debug: {get_param: Debug}
cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
cinder::rabbit_userid: {get_param: RabbitUserName}
cinder::rabbit_password: {get_param: RabbitPassword}
cinder::rabbit_port: {get_param: RabbitClientPort}
- cinder::db::mysql::user: cinder
- cinder::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- cinder::db::mysql::dbname: cinder
- cinder::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
cinder::rabbit_heartbeat_timeout_threshold: 60
- cinder::host: hostgroup
cinder::cron::db_purge::destination: '/dev/null'
cinder::db::database_db_max_retries: -1
cinder::db::database_max_retries: -1
diff --git a/puppet/services/cinder-scheduler.yaml b/puppet/services/cinder-scheduler.yaml
index 129706b1..94c263ea 100644
--- a/puppet/services/cinder-scheduler.yaml
+++ b/puppet/services/cinder-scheduler.yaml
@@ -18,6 +18,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCinderScheduler:
+ default: 'overcloud-cinder-scheduler'
+ type: string
+ CinderSchedulerLoggingSource:
+ type: json
+ default:
+ tag: openstack.cinder.scheduler
+ path: /var/log/cinder/cinder-scheduler.log
resources:
@@ -33,6 +41,10 @@ outputs:
description: Role data for the Cinder Scheduler role.
value:
service_name: cinder_scheduler
+ monitoring_subscription: {get_param: MonitoringSubscriptionCinderScheduler}
+ logging_source: {get_param: CinderSchedulerLoggingSource}
+ logging_groups:
+ - cinder
config_settings:
map_merge:
- get_attr: [CinderBase, role_data, config_settings]
diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml
index de7e6bab..82e16f39 100644
--- a/puppet/services/cinder-volume.yaml
+++ b/puppet/services/cinder-volume.yaml
@@ -56,6 +56,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCinderVolume:
+ default: 'overcloud-cinder-volume'
+ type: string
+ CinderVolumeLoggingSource:
+ type: json
+ default:
+ tag: openstack.cinder.volume
+ path: /var/log/cinder/cinder-volume.log
resources:
@@ -71,6 +79,10 @@ outputs:
description: Role data for the Cinder Volume role.
value:
service_name: cinder_volume
+ monitoring_subscription: {get_param: MonitoringSubscriptionCinderVolume}
+ logging_source: {get_param: CinderVolumeLoggingSource}
+ logging_groups:
+ - cinder
config_settings:
map_merge:
- get_attr: [CinderBase, role_data, config_settings]
@@ -95,10 +107,6 @@ outputs:
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_address:
- str_replace:
- template: "NETWORK_uri"
- params:
- NETWORK: {get_param: [ServiceNetMap, CinderIscsiNetwork]}
+ tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_address: {get_param: [ServiceNetMap, CinderIscsiNetwork]}
step_config: |
include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/database/mongodb.yaml b/puppet/services/database/mongodb.yaml
index 36962a34..01daeafe 100644
--- a/puppet/services/database/mongodb.yaml
+++ b/puppet/services/database/mongodb.yaml
@@ -19,6 +19,15 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MongoDbLoggingSource:
+ type: json
+ description: Fluentd logging configuration for mongodb.
+ default:
+ tag: database.mongodb
+ path: /var/log/mongodb/mongodb.log
+ format: >-
+ /(?<time>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+\+\d{4})
+ (?<message>.*)$/
resources:
MongoDbBase:
@@ -33,6 +42,9 @@ outputs:
description: Service mongodb using composable services.
value:
service_name: mongodb
+ logging_groups:
+ - mongodb
+ logging_source: {get_param: MongoDbLoggingSource}
config_settings:
map_merge:
- get_attr: [MongoDbBase, role_data, config_settings]
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
index b0eea481..094a7c9f 100644
--- a/puppet/services/database/mysql.yaml
+++ b/puppet/services/database/mysql.yaml
@@ -74,5 +74,11 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
mysql_bind_host: {get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::bind_address:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
step_config: |
include ::tripleo::profile::base::database::mysql
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index d2376af3..33abdbf9 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -18,125 +18,92 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- CephClientUserName:
- default: openstack
- type: string
Debug:
default: ''
description: Set to True to enable debugging on all services.
type: string
- GlanceNotifierStrategy:
- description: Strategy to use for Glance notification queue
- type: string
- default: noop
- GlanceLogFile:
- description: The filepath of the file to use for logging messages from Glance.
- type: string
- default: ''
GlancePassword:
description: The password for the glance service and db account, used by the glance services.
type: string
hidden: true
- GlanceBackend:
- default: swift
- description: The short name of the Glance backend to use. Should be one
- of swift, rbd, or file
- type: string
- constraints:
- - allowed_values: ['swift', 'file', 'rbd']
GlanceWorkers:
- default: 0
- description: Number of workers for Glance service.
- type: number
- GlanceRbdPoolName:
- default: images
- type: string
- RabbitPassword:
- description: The password for RabbitMQ
- type: string
- hidden: true
- RabbitUserName:
- default: guest
- description: The username for RabbitMQ
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
+ default: ''
+ description: |
+ Number of API worker processes for Glance. If left unset (empty string), the
+ default value will result in the configuration being left unset and a
+ system-dependent default value will be chosen (e.g.: number of
+ processors). Please note that this will create a large number of
+ processes on systems with a large number of CPUs resulting in excess
+ memory consumption. It is recommended that a suitable non-default value
+ be selected on such systems.
type: string
- KeystoneRegion:
+ MonitoringSubscriptionGlanceApi:
+ default: 'overcloud-glance-api'
type: string
- default: 'regionOne'
- description: Keystone region for endpoint
+ GlanceApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.glance.api
+ path: /var/log/glance/api.log
+
+resources:
+ GlanceBase:
+ type: ./glance-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
description: Role data for the Glance API role.
value:
service_name: glance_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionGlanceApi}
+ logging_source: {get_param: GlanceApiLoggingSource}
+ logging_groups:
+ - glance
config_settings:
- glance::api::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://glance:'
- - {get_param: GlancePassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/glance'
- glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]}
- glance::api::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- glance::api::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
- glance::api::registry_host:
- str_replace:
- template: "'REGISTRY_HOST'"
- params:
- REGISTRY_HOST: {get_param: [EndpointMap, GlanceRegistryInternal, host]}
- glance::api::keystone_password: {get_param: GlancePassword}
- glance::api::enable_proxy_headers_parsing: true
- glance::api::debug: {get_param: Debug}
- glance::api::workers: {get_param: GlanceWorkers}
- glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
- glance_log_file: {get_param: GlanceLogFile}
- glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
- glance::backend::swift::swift_store_user: service:glance
- glance::backend::swift::swift_store_key: {get_param: GlancePassword}
- glance::backend::swift::swift_store_create_container_on_put: true
- glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
- glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
- glance_backend: {get_param: GlanceBackend}
- glance::db::mysql::password: {get_param: GlancePassword}
- glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
- glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
- glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
- glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
- glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
- glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
- glance::keystone::auth::password: {get_param: GlancePassword }
- glance::keystone::auth::region: {get_param: KeystoneRegion}
- glance::registry::db::database_db_max_retries: -1
- glance::registry::db::database_max_retries: -1
- tripleo.glance_api.firewall_rules:
- '112 glance_api':
- dport:
- - 9292
- - 13292
- glance::keystone::auth::tenant: 'service'
- glance::api::keystone_tenant: 'service'
- glance::api::pipeline: 'keystone'
- glance::api::show_image_direct_url: true
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- glance::api::bind_host: {get_param: [ServiceNetMap, GlanceApiNetwork]}
+ map_merge:
+ - get_attr: [GlanceBase, role_data, config_settings]
+ - glance::api::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://glance:'
+ - {get_param: GlancePassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/glance'
+ glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]}
+ glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ glance::api::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ glance::api::registry_host:
+ str_replace:
+ template: "'REGISTRY_HOST'"
+ params:
+ REGISTRY_HOST: {get_param: [EndpointMap, GlanceRegistryInternal, host]}
+ glance::api::registry_client_protocol: {get_param: [EndpointMap, GlanceRegistryInternal, protocol] }
+ glance::api::authtoken::password: {get_param: GlancePassword}
+ glance::api::enable_proxy_headers_parsing: true
+ glance::api::debug: {get_param: Debug}
+ glance::api::workers: {get_param: GlanceWorkers}
+ tripleo.glance_api.firewall_rules:
+ '112 glance_api':
+ dport:
+ - 9292
+ - 13292
+ glance::api::authtoken::project_name: 'service'
+ glance::api::pipeline: 'keystone'
+ glance::api::show_image_direct_url: true
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ glance::api::bind_host: {get_param: [ServiceNetMap, GlanceApiNetwork]}
step_config: |
include ::tripleo::profile::base::glance::api
+ service_config_settings:
+ get_attr: [GlanceBase, role_data, service_config_settings]
diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml
new file mode 100644
index 00000000..3294fc0f
--- /dev/null
+++ b/puppet/services/glance-base.yaml
@@ -0,0 +1,110 @@
+heat_template_version: 2016-10-14
+
+description: >
+ OpenStack Glance Common settings with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ CephClientUserName:
+ default: openstack
+ type: string
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ GlanceNotifierStrategy:
+ description: Strategy to use for Glance notification queue
+ type: string
+ default: noop
+ GlanceLogFile:
+ description: The filepath of the file to use for logging messages from Glance.
+ type: string
+ default: ''
+ GlancePassword:
+ description: The password for the glance service and db account, used by the glance services.
+ type: string
+ hidden: true
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
+ GlanceRbdPoolName:
+ default: images
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+
+outputs:
+ role_data:
+ description: Role data for the Glance common role.
+ value:
+ service_name: glance_base
+ config_settings:
+ glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
+ glance_log_file: {get_param: GlanceLogFile}
+ glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ glance::backend::swift::swift_store_user: service:glance
+ glance::backend::swift::swift_store_key: {get_param: GlancePassword}
+ glance::backend::swift::swift_store_create_container_on_put: true
+ glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+ glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
+ glance_backend: {get_param: GlanceBackend}
+ glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
+ glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
+ glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
+ glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ glance::notify::rabbitmq::notification_driver: messagingv2
+ glance::registry::db::database_db_max_retries: -1
+ glance::registry::db::database_max_retries: -1
+ service_config_settings:
+ keystone:
+ glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
+ glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
+ glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
+ glance::keystone::auth::password: {get_param: GlancePassword }
+ glance::keystone::auth::region: {get_param: KeystoneRegion}
+ glance::keystone::auth::tenant: 'service'
+ mysql:
+ glance::db::mysql::password: {get_param: GlancePassword}
+ glance::db::mysql::user: glance
+ glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ glance::db::mysql::dbname: glance
+ glance::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml
index 06ef9379..c45582d4 100644
--- a/puppet/services/glance-registry.yaml
+++ b/puppet/services/glance-registry.yaml
@@ -27,51 +27,74 @@ parameters:
type: string
hidden: true
GlanceWorkers:
- default: 0
- description: Number of workers for Glance service.
- type: number
+ default: ''
+ description: |
+ Number of worker processes for glance registry. If left unset (empty
+ string), the default value will result in the configuration being left
+ unset and a system-dependent default value will be chosen (e.g.: number of
+ processors). Please note that this will create a large number of processes
+ on systems with a large number of CPUs resulting in excess memory
+ consumption. It is recommended that a suitable non-default value be
+ selected on such systems.
+ type: string
+ MonitoringSubscriptionGlanceRegistry:
+ default: 'overcloud-glance-registry'
+ type: string
+ GlanceRegistryLoggingSource:
+ type: json
+ default:
+ tag: openstack.glance.registry
+ path: /var/log/glance/registry.log
+
+resources:
+ GlanceBase:
+ type: ./glance-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
description: Role data for the Glance Registry role.
value:
service_name: glance_registry
+ monitoring_subscription: {get_param: MonitoringSubscriptionGlanceRegistry}
+ logging_source: {get_param: GlanceRegistryLoggingSource}
+ logging_groups:
+ - glance
config_settings:
- glance::registry::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://glance:'
- - {get_param: GlancePassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/glance'
- glance::registry::keystone_password: {get_param: GlancePassword}
- glance::registry::keystone_tenant: 'service'
- glance::registry::pipeline: 'keystone'
- glance::registry::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- glance::registry::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
- glance::registry::debug: {get_param: Debug}
- glance::registry::workers: {get_param: GlanceWorkers}
- glance::db::mysql::user: glance
- glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- glance::db::mysql::dbname: glance
- glance::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
- glance::registry::db::database_db_max_retries: -1
- glance::registry::db::database_max_retries: -1
+ map_merge:
+ - get_attr: [GlanceBase, role_data, config_settings]
- tripleo.glance_registry.firewall_rules:
- '112 glance_registry':
- dport:
- - 9191
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- glance::registry::bind_host: {get_param: [ServiceNetMap, GlanceRegistryNetwork]}
+ - glance::registry::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://glance:'
+ - {get_param: GlancePassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/glance'
+ glance::registry::authtoken::password: {get_param: GlancePassword}
+ glance::registry::authtoken::project_name: 'service'
+ glance::registry::pipeline: 'keystone'
+ glance::registry::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ glance::registry::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ glance::registry::debug: {get_param: Debug}
+ glance::registry::workers: {get_param: GlanceWorkers}
+ tripleo.glance_registry.firewall_rules:
+ '112 glance_registry':
+ dport:
+ - 9191
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ glance::registry::bind_host: {get_param: [ServiceNetMap, GlanceRegistryNetwork]}
step_config: |
include ::tripleo::profile::base::glance::registry
+ service_config_settings:
+ get_attr: [GlanceBase, role_data, config_settings]
diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml
index ec42f3f5..e3397769 100644
--- a/puppet/services/gnocchi-api.yaml
+++ b/puppet/services/gnocchi-api.yaml
@@ -33,8 +33,17 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ MonitoringSubscriptionGnocchiApi:
+ default: 'overcloud-gnocchi-api'
+ type: string
+ GnocchiApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.gnocchi.api
+ path: /var/log/gnocchi/app.log
resources:
+
GnocchiServiceBase:
type: ./gnocchi-base.yaml
properties:
@@ -42,13 +51,25 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
outputs:
role_data:
description: Role data for the Gnocchi role.
value:
service_name: gnocchi_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiApi}
+ logging_source: {get_param: GnocchiApiLoggingSource}
+ logging_groups:
+ - gnocchi
config_settings:
map_merge:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- get_attr: [GnocchiServiceBase, role_data, config_settings]
- tripleo.gnocchi_api.firewall_rules:
'129 gnocchi-api':
@@ -56,18 +77,19 @@ outputs:
- 8041
- 13041
gnocchi::api::enabled: true
+ gnocchi::api::enable_proxy_headers_parsing: true
gnocchi::api::service_name: 'httpd'
- gnocchi::keystone::auth::admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] }
- gnocchi::keystone::auth::internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]}
- gnocchi::keystone::auth::password: {get_param: GnocchiPassword}
- gnocchi::keystone::auth::public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] }
- gnocchi::keystone::auth::region: {get_param: KeystoneRegion}
- gnocchi::keystone::auth::tenant: 'service'
gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
gnocchi::keystone::authtoken::project_name: 'service'
gnocchi::wsgi::apache::ssl: false
+ gnocchi::wsgi::apache::servername:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
tripleo::profile::base::gnocchi::api::gnocchi_backend: {get_param: GnocchiBackend}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
@@ -83,3 +105,19 @@ outputs:
gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri]}
step_config: |
include ::tripleo::profile::base::gnocchi::api
+ service_config_settings:
+ keystone:
+ gnocchi::keystone::auth::admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] }
+ gnocchi::keystone::auth::internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]}
+ gnocchi::keystone::auth::password: {get_param: GnocchiPassword}
+ gnocchi::keystone::auth::public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] }
+ gnocchi::keystone::auth::region: {get_param: KeystoneRegion}
+ gnocchi::keystone::auth::tenant: 'service'
+ mysql:
+ gnocchi::db::mysql::password: {get_param: GnocchiPassword}
+ gnocchi::db::mysql::user: gnocchi
+ gnocchi::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ gnocchi::db::mysql::dbname: gnocchi
+ gnocchi::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml
index 9f114ac4..556baae0 100644
--- a/puppet/services/gnocchi-base.yaml
+++ b/puppet/services/gnocchi-base.yaml
@@ -56,6 +56,7 @@ outputs:
service_name: gnocchi_base
config_settings:
#Gnocchi engine
+ gnocchi_redis_password: {get_param: RedisPassword}
gnocchi::debug: {get_param: Debug}
gnocchi::db::database_connection:
list_join:
@@ -66,16 +67,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/gnocchi'
- gnocchi::db::mysql::password: {get_param: GnocchiPassword}
gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types'
- gnocchi::storage::coordination_url:
- list_join:
- - ''
- - - 'redis://:'
- - {get_param: RedisPassword}
- - '@'
- - "%{hiera('redis_vip')}"
- - ':6379/'
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 2
gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword}
@@ -94,9 +86,3 @@ outputs:
gnocchi::statsd::project_id: '6c38cd8d-099a-4cb2-aecf-17be688e8616'
gnocchi::statsd::flush_delay: 10
gnocchi::statsd::archive_policy_name: 'low'
- gnocchi::db::mysql::user: gnocchi
- gnocchi::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- gnocchi::db::mysql::dbname: gnocchi
- gnocchi::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/gnocchi-metricd.yaml b/puppet/services/gnocchi-metricd.yaml
index 205d0552..1400bc98 100644
--- a/puppet/services/gnocchi-metricd.yaml
+++ b/puppet/services/gnocchi-metricd.yaml
@@ -18,6 +18,13 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionGnocchiMetricd:
+ default: 'overcloud-gnocchi-metricd'
+ type: string
+ GnocchiMetricdWorkers:
+ default: ''
+ description: Number of workers for Gnocchi MetricD
+ type: string
resources:
GnocchiServiceBase:
@@ -32,8 +39,10 @@ outputs:
description: Role data for the Gnocchi role.
value:
service_name: gnocchi_metricd
+ monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiMetricd}
config_settings:
map_merge:
- get_attr: [GnocchiServiceBase, role_data, config_settings]
+ - gnocchi::metricd::workers: {get_param: GnocchiMetricdWorkers}
step_config: |
include ::tripleo::profile::base::gnocchi::metricd
diff --git a/puppet/services/gnocchi-statsd.yaml b/puppet/services/gnocchi-statsd.yaml
index 018ad2b1..04339f46 100644
--- a/puppet/services/gnocchi-statsd.yaml
+++ b/puppet/services/gnocchi-statsd.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionGnocchiStatsd:
+ default: 'overcloud-gnocchi-statsd'
+ type: string
resources:
GnocchiServiceBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Gnocchi role.
value:
service_name: gnocchi_statsd
+ monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiStatsd}
config_settings:
map_merge:
- get_attr: [GnocchiServiceBase, role_data, config_settings]
diff --git a/puppet/services/haproxy-internal-tls-certmonger.yaml b/puppet/services/haproxy-internal-tls-certmonger.yaml
new file mode 100644
index 00000000..c6d53542
--- /dev/null
+++ b/puppet/services/haproxy-internal-tls-certmonger.yaml
@@ -0,0 +1,51 @@
+heat_template_version: 2016-10-14
+
+description: >
+ HAProxy deployment with TLS enabled, powered by certmonger
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the HAProxy internal TLS via certmonger role.
+ value:
+ service_name: haproxy_internal_tls_certmonger
+ config_settings:
+ generate_service_certificates: true
+ tripleo::haproxy::use_internal_certificates: true
+ certificates_specs:
+ map_merge:
+ repeat:
+ template:
+ haproxy-NETWORK:
+ service_pem: '/etc/pki/tls/certs/overcloud-haproxy-NETWORK.pem'
+ service_certificate: '/etc/pki/tls/certs/overcloud-haproxy-NETWORK.crt'
+ service_key: '/etc/pki/tls/private/overcloud-haproxy-NETWORK.key'
+ hostname: "%{hiera('cloud_name_NETWORK')}"
+ postsave_cmd: "" # TODO
+ principal: "haproxy/%{hiera('cloud_name_NETWORK')}"
+ for_each:
+ NETWORK:
+ # NOTE(jaosorior) Get unique network names to create
+ # certificates for those. We skip the tenant network since
+ # we don't need a certificate for that, and the external
+ # network will be handled in another template.
+ yaql:
+ expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
+ data:
+ map:
+ get_param: ServiceNetMap
diff --git a/puppet/services/haproxy-public-tls-certmonger.yaml b/puppet/services/haproxy-public-tls-certmonger.yaml
new file mode 100644
index 00000000..1551d16a
--- /dev/null
+++ b/puppet/services/haproxy-public-tls-certmonger.yaml
@@ -0,0 +1,37 @@
+heat_template_version: 2016-10-14
+
+description: >
+ HAProxy deployment with TLS enabled, powered by certmonger
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the HAProxy public TLS via certmonger role.
+ value:
+ service_name: haproxy_public_tls_certmonger
+ config_settings:
+ generate_service_certificates: true
+ tripleo::haproxy::service_certificate: '/etc/pki/tls/certs/overcloud-haproxy-external.pem'
+ certificates_specs:
+ haproxy-external:
+ service_pem: '/etc/pki/tls/certs/overcloud-haproxy-external.pem'
+ service_certificate: '/etc/pki/tls/certs/overcloud-haproxy-external.crt'
+ service_key: '/etc/pki/tls/private/overcloud-haproxy-external.key'
+ hostname: "%{hiera('cloud_name_external')}"
+ postsave_cmd: "" # TODO
+ principal: "haproxy/%{hiera('cloud_name_external')}"
diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml
index c0e1c113..0813cb7e 100644
--- a/puppet/services/haproxy.yaml
+++ b/puppet/services/haproxy.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
HAproxy service configured with Puppet
@@ -44,43 +44,48 @@ parameters:
Specifies the interface where the public-facing virtual ip will be assigned.
This should be int_public when a VLAN is being used.
type: string
+ MonitoringSubscriptionHaproxy:
+ default: 'overcloud-haproxy'
+ type: string
+
+resources:
+
+ HAProxyPublicTLS:
+ type: OS::TripleO::Services::HAProxyPublicTLS
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+ HAProxyInternalTLS:
+ type: OS::TripleO::Services::HAProxyInternalTLS
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
description: Role data for the HAproxy role.
value:
service_name: haproxy
+ monitoring_subscription: {get_param: MonitoringSubscriptionHaproxy}
config_settings:
- tripleo.haproxy.firewall_rules:
- '107 haproxy stats':
- dport: 1993
- # TODO(emilien) make it composable to find which services are actually running
- tripleo::haproxy::keystone_admin: true
- tripleo::haproxy::keystone_public: true
- tripleo::haproxy::neutron: true
- tripleo::haproxy::cinder: true
- tripleo::haproxy::glance_api: true
- tripleo::haproxy::glance_registry: true
- tripleo::haproxy::nova_osapi: true
- tripleo::haproxy::nova_metadata: true
- tripleo::haproxy::nova_novncproxy: true
- tripleo::haproxy::mysql: true
- tripleo::haproxy::redis: true
- tripleo::haproxy::sahara: true
- tripleo::haproxy::swift_proxy_server: true
- tripleo::haproxy::ceilometer: true
- tripleo::haproxy::aodh: true
- tripleo::haproxy::gnocchi: true
- tripleo::haproxy::heat_api: true
- tripleo::haproxy::heat_cloudwatch: true
- tripleo::haproxy::heat_cfn: true
- tripleo::haproxy::horizon: true
- tripleo::haproxy::ironic: true
- tripleo::haproxy::haproxy_log_address: {get_param: HAProxySyslogAddress}
- tripleo::haproxy::haproxy_stats_user: {get_param: HAProxyStatsUser}
- tripleo::haproxy::haproxy_stats_password: {get_param: HAProxyStatsPassword}
- tripleo::haproxy::redis_password: {get_param: RedisPassword}
- tripleo::haproxy::control_virtual_interface: {get_param: ControlVirtualInterface}
- tripleo::haproxy::public_virtual_interface: {get_param: PublicVirtualInterface}
+ map_merge:
+ - get_attr: [HAProxyPublicTLS, role_data, config_settings]
+ - get_attr: [HAProxyInternalTLS, role_data, config_settings]
+ - tripleo.haproxy.firewall_rules:
+ '107 haproxy stats':
+ dport: 1993
+ tripleo::haproxy::haproxy_log_address: {get_param: HAProxySyslogAddress}
+ tripleo::haproxy::haproxy_stats_user: {get_param: HAProxyStatsUser}
+ tripleo::haproxy::haproxy_stats_password: {get_param: HAProxyStatsPassword}
+ tripleo::haproxy::redis_password: {get_param: RedisPassword}
+ tripleo::haproxy::control_virtual_interface: {get_param: ControlVirtualInterface}
+ tripleo::haproxy::public_virtual_interface: {get_param: PublicVirtualInterface}
+ tripleo::profile::base::haproxy::certificates_specs:
+ map_merge:
+ - get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
+ - get_attr: [HAProxyInternalTLS, role_data, certificates_specs]
step_config: |
include ::tripleo::profile::base::haproxy
diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml
index a15ea32d..1a86ec71 100644
--- a/puppet/services/heat-api-cfn.yaml
+++ b/puppet/services/heat-api-cfn.yaml
@@ -30,6 +30,14 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ MonitoringSubscriptionHeatApiCnf:
+ default: 'overcloud-heat-api-cfn'
+ type: string
+ HeatApiCfnLoggingSource:
+ type: json
+ default:
+ tag: openstack.heat.api.cfn
+ path: /var/log/heat/heat-api-cfn.log
resources:
HeatBase:
@@ -44,16 +52,14 @@ outputs:
description: Role data for the Heat CloudFormation API role.
value:
service_name: heat_api_cfn
+ monitoring_subscription: {get_param: MonitoringSubscriptionHeatApiCnf}
+ logging_source: {get_param: HeatApiCfnLoggingSource}
+ logging_groups:
+ - heat
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- heat::api_cfn::workers: {get_param: HeatWorkers}
- heat::keystone::auth_cfn::tenant: 'service'
- heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
- heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
- heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
- heat::keystone::auth_cfn::password: {get_param: HeatPassword}
- heat::keystone::auth::region: {get_param: KeystoneRegion}
tripleo.heat_api_cfn.firewall_rules:
'125 heat_cfn':
dport:
@@ -68,3 +74,11 @@ outputs:
heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
step_config: |
include ::tripleo::profile::base::heat::api_cfn
+ service_config_settings:
+ keystone:
+ heat::keystone::auth_cfn::tenant: 'service'
+ heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
+ heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
+ heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
+ heat::keystone::auth_cfn::password: {get_param: HeatPassword}
+ heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml
index 6d645ee7..6dfeaaf3 100644
--- a/puppet/services/heat-api-cloudwatch.yaml
+++ b/puppet/services/heat-api-cloudwatch.yaml
@@ -22,6 +22,14 @@ parameters:
default: 0
description: Number of workers for Heat service.
type: number
+ MonitoringSubscriptionHeatApiCloudwatch:
+ default: 'overcloud-heat-api-cloudwatch'
+ type: string
+ HeatApiCloudwatchLoggingSource:
+ type: json
+ default:
+ tag: openstack.heat.api.cloudwatch
+ path: /var/log/heat/heat-api-cloudwatch.log
resources:
HeatBase:
@@ -36,6 +44,10 @@ outputs:
description: Role data for the Heat Cloudwatch API role.
value:
service_name: heat_api_cloudwatch
+ monitoring_subscription: {get_param: MonitoringSubscriptionHeatApiCloudwatch}
+ logging_source: {get_param: HeatApiCloudwatchLoggingSource}
+ logging_groups:
+ - heat
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml
index ec3b0e37..2ea96fc0 100644
--- a/puppet/services/heat-api.yaml
+++ b/puppet/services/heat-api.yaml
@@ -30,6 +30,14 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ MonitoringSubscriptionHeatApi:
+ default: 'overcloud-heat-api'
+ type: string
+ HeatApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.heat.api
+ path: /var/log/heat/heat-api.log
resources:
HeatBase:
@@ -44,16 +52,14 @@ outputs:
description: Role data for the Heat API role.
value:
service_name: heat_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionHeatApi}
+ logging_source: {get_param: HeatApiLoggingSource}
+ logging_groups:
+ - heat
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- heat::api::workers: {get_param: HeatWorkers}
- heat::keystone::auth::tenant: 'service'
- heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
- heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
- heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
- heat::keystone::auth::password: {get_param: HeatPassword}
- heat::keystone::auth::region: {get_param: KeystoneRegion}
tripleo.heat_api.firewall_rules:
'125 heat_api':
dport:
@@ -68,3 +74,11 @@ outputs:
heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
step_config: |
include ::tripleo::profile::base::heat::api
+ service_config_settings:
+ keystone:
+ heat::keystone::auth::tenant: 'service'
+ heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
+ heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
+ heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
+ heat::keystone::auth::password: {get_param: HeatPassword}
+ heat::keystone::auth::region: {get_param: KeystoneRegion}
diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml
index 226d2a51..7eb58f56 100644
--- a/puppet/services/heat-base.yaml
+++ b/puppet/services/heat-base.yaml
@@ -32,6 +32,10 @@ parameters:
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
+ HeatPassword:
+ description: The password for the Heat service and db account, used by the Heat services.
+ type: string
+ hidden: true
DefaultPasswords:
default: {}
type: json
@@ -60,11 +64,13 @@ outputs:
key: 'context_is_admin'
value: 'role:admin'
heat::rabbit_heartbeat_timeout_threshold: 60
- heat::keystone_tenant: 'service'
+ heat::keystone::authtoken::project_name: 'service'
+ heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ heat::keystone::authtoken::password: {get_param: HeatPassword}
heat::keystone::domain::domain_name: 'heat_stack'
heat::keystone::domain::domain_admin: 'heat_stack_domain_admin'
heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost'
- heat::auth_plugin: 'password'
heat::cron::purge_deleted::age: 30
heat::cron::purge_deleted::age_type: 'days'
heat::cron::purge_deleted::maxdelay: 3600
diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml
index b230ec1d..20415eef 100644
--- a/puppet/services/heat-engine.yaml
+++ b/puppet/services/heat-engine.yaml
@@ -40,6 +40,14 @@ parameters:
type: string
hidden: true
default: ''
+ MonitoringSubscriptionHeatEngine:
+ default: 'overcloud-heat-engine'
+ type: string
+ HeatEngineLoggingSource:
+ type: json
+ default:
+ tag: openstack.heat.engine
+ path: /var/log/heat/heat-engine.log
resources:
HeatBase:
@@ -54,6 +62,10 @@ outputs:
description: Role data for the Heat Engine role.
value:
service_name: heat_engine
+ monitoring_subscription: {get_param: MonitoringSubscriptionHeatEngine}
+ logging_source: {get_param: HeatEngineLoggingSource}
+ logging_groups:
+ - heat
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
@@ -71,16 +83,7 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/heat'
heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
- heat::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- heat::keystone_password: {get_param: HeatPassword}
- heat::db::mysql::password: {get_param: HeatPassword}
heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
- heat::db::mysql::user: heat
- heat::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- heat::db::mysql::dbname: heat
- heat::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
heat::engine::auth_encryption_key:
yaql:
expression: $.data.passwords.where($ != '').first()
@@ -90,3 +93,16 @@ outputs:
- {get_param: [DefaultPasswords, heat_auth_encryption_key]}
step_config: |
include ::tripleo::profile::base::heat::engine
+
+ service_config_settings:
+ mysql:
+ heat::db::mysql::password: {get_param: HeatPassword}
+ heat::db::mysql::user: heat
+ heat::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ heat::db::mysql::dbname: heat
+ heat::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+ keystone:
+ # This is needed because the keystone profile handles creating the domain
+ heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index bdb171ae..1e08415c 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
Horizon service configured with Puppet
@@ -10,6 +10,10 @@ parameters:
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
DefaultPasswords:
default: {}
type: json
@@ -23,17 +27,30 @@ parameters:
description: A list of IP/Hostname for the server Horizonis running on.
Used for header checks.
type: comma_delimited_list
+ HorizonSecret:
+ description: Secret key for Django
+ type: string
+ hidden: true
+ default: ''
NeutronMechanismDrivers:
default: 'openvswitch'
description: |
The mechanism drivers for the Neutron tenant network.
type: comma_delimited_list
+ MemcachedIPv6:
+ default: false
+ description: Enable IPv6 features in Memcached.
+ type: boolean
+ MonitoringSubscriptionHorizon:
+ default: 'overcloud-horizon'
+ type: string
outputs:
role_data:
description: Role data for the Horizon role.
value:
service_name: horizon
+ monitoring_subscription: {get_param: MonitoringSubscriptionHorizon}
config_settings:
horizon::allowed_hosts: {get_param: HorizonAllowedHosts}
neutron::plugins::ml2::mechanism_drivers:
@@ -52,5 +69,16 @@ outputs:
add_listen: false
priority: 10
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
+ horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
+ horizon::django_debug: {get_param: Debug}
+ horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ horizon::secret_key:
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: HorizonSecret}
+ - {get_param: [DefaultPasswords, horizon_secret]}
+ memcached_ipv6: {get_param: MemcachedIPv6}
step_config: |
include ::tripleo::profile::base::horizon
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
index d0516e1b..c8a2e833 100644
--- a/puppet/services/ironic-api.yaml
+++ b/puppet/services/ironic-api.yaml
@@ -22,6 +22,9 @@ parameters:
description: The password for the Ironic service and db account, used by the Ironic services
type: string
hidden: true
+ MonitoringSubscriptionIronicApi:
+ default: 'overcloud-ironic-api'
+ type: string
resources:
IronicBase:
@@ -36,30 +39,25 @@ outputs:
description: Role data for the Ironic API role.
value:
service_name: ironic_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionIronicApi}
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
- # NOTE(dtantsur): the my_ip parameter is heavily overloaded in
- # ironic. It's used as a default value for e.g. TFTP server IP,
- # glance and neutron endpoints, virtual console IP. We override
- # the TFTP server IP in ironic-conductor.yaml as it should not be
- # the VIP, but rather a real IP of the controller.
- - ironic::my_ip: {get_param: [EndpointMap, MysqlInternal, host]}
- ironic::api::authtoken::password: {get_param: IronicPassword}
+ - ironic::api::authtoken::password: {get_param: IronicPassword}
ironic::api::authtoken::project_name: 'service'
ironic::api::authtoken::username: 'ironic'
ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- ironic::api::host_ip: {get_input: ironic_api_network}
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ ironic::api::host_ip: {get_param: [ServiceNetMap, IronicApiNetwork]}
ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
# This is used to build links in responses
ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
- ironic::keystone::auth::admin_url: {get_param: [EndpointMap, IronicAdmin, uri]}
- ironic::keystone::auth::internal_url: {get_param: [EndpointMap, IronicInternal, uri]}
- ironic::keystone::auth::public_url: {get_param: [EndpointMap, IronicPublic, uri]}
- ironic::keystone::auth::auth_name: 'ironic'
- ironic::keystone::auth::password: {get_param: IronicPassword }
- ironic::keystone::auth::tenant: 'service'
tripleo.ironic_api.firewall_rules:
'133 ironic api':
dport:
@@ -67,3 +65,19 @@ outputs:
- 13385
step_config: |
include ::tripleo::profile::base::ironic::api
+ service_config_settings:
+ keystone:
+ ironic::keystone::auth::admin_url: {get_param: [EndpointMap, IronicAdmin, uri_no_suffix]}
+ ironic::keystone::auth::internal_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
+ ironic::keystone::auth::public_url: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+ ironic::keystone::auth::auth_name: 'ironic'
+ ironic::keystone::auth::password: {get_param: IronicPassword }
+ ironic::keystone::auth::tenant: 'service'
+ mysql:
+ ironic::db::mysql::password: {get_param: IronicPassword}
+ ironic::db::mysql::user: ironic
+ ironic::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ ironic::db::mysql::dbname: ironic
+ ironic::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/ironic-base.yaml b/puppet/services/ironic-base.yaml
index 2f242da8..0ff393c6 100644
--- a/puppet/services/ironic-base.yaml
+++ b/puppet/services/ironic-base.yaml
@@ -65,12 +65,5 @@ outputs:
ironic::rabbit_password: {get_param: RabbitPassword}
ironic::rabbit_port: {get_param: RabbitClientPort}
ironic::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- ironic::db::mysql::password: {get_param: IronicPassword}
- ironic::db::mysql::user: ironic
- ironic::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- ironic::db::mysql::dbname: ironic
- ironic::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
step_config: |
include ::tripleo::profile::base::ironic
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index 27479f79..4ac9fc30 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -18,10 +18,27 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ IronicCleaningDiskErase:
+ default: 'full'
+ description: Type of disk cleaning before and between deployments,
+ "full" for full cleaning, "metadata" to clean only disk
+ metadata (partition table).
+ type: string
IronicEnabledDrivers:
- default: ['pxe_ipmitool', 'agent_ipmitool']
+ default: ['pxe_ipmitool', 'pxe_drac', 'pxe_ilo']
description: Enabled Ironic drivers
type: comma_delimited_list
+ IronicIPXEEnabled:
+ default: true
+ description: Whether to use iPXE instead of PXE for deployment.
+ type: boolean
+ IronicIPXEPort:
+ default: 8088
+ description: Port to use for serving images when iPXE is used.
+ type: string
+ MonitoringSubscriptionIronicConductor:
+ default: 'overcloud-ironic-conductor'
+ type: string
resources:
IronicBase:
@@ -36,20 +53,48 @@ outputs:
description: Role data for the Ironic conductor role.
value:
service_name: ironic_conductor
+ monitoring_subscription: {get_param: MonitoringSubscriptionIronicConductor}
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
# FIXME: I have no idea why neutron_url is in "api" manifest
- ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
+ ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase}
+ ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers}
+ # We need an endpoint containing a real IP, not a VIP here
+ ironic_conductor_http_host: {get_param: [ServiceNetMap, IronicNetwork]}
+ ironic::conductor::http_url:
+ list_join:
+ - ''
+ - - 'http://'
+ - '%{hiera("ironic_conductor_http_host")}:'
+ - {get_param: IronicIPXEPort}
+ ironic::drivers::pxe::ipxe_enabled: {get_param: IronicIPXEEnabled}
ironic::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
- ironic::enabled_drivers: {get_param: IronicEnabledDrivers}
- # Prevent tftp_server from defaulting to my_ip setting, which is
- # controller VIP, not a real IP.
- ironic::drivers::pxe::tftp_server: {get_input: ironic_api_network}
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ ironic::drivers::pxe::tftp_server: {get_param: [ServiceNetMap, IronicNetwork]}
+ # NOTE(dtantsur): UEFI only works with iPXE currently for us
+ ironic::drivers::pxe::uefi_pxe_config_template: '$pybasedir/drivers/modules/ipxe_config.template'
+ ironic::drivers::pxe::uefi_pxe_bootfile_name: 'ipxe.efi'
tripleo.ironic_conductor.firewall_rules:
'134 ironic conductor TFTP':
dport: 69
proto: udp
+ '135 ironic conductor HTTP':
+ dport: {get_param: IronicIPXEPort}
+ # NOTE(dtantsur): the my_ip parameter is heavily overloaded in
+ # ironic. It's used as a default value for e.g. TFTP server IP,
+ # glance and neutron endpoints, virtual console IP. We override
+ # the TFTP server IP in ironic-conductor.yaml as it should not be
+ # the VIP, but rather a real IP of the host.
+ ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
+ ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
+
step_config: |
include ::tripleo::profile::base::ironic::conductor
diff --git a/puppet/services/keepalived.yaml b/puppet/services/keepalived.yaml
index b783345b..38cfbe22 100644
--- a/puppet/services/keepalived.yaml
+++ b/puppet/services/keepalived.yaml
@@ -28,14 +28,21 @@ parameters:
Specifies the interface where the public-facing virtual ip will be assigned.
This should be int_public when a VLAN is being used.
type: string
+ MonitoringSubscriptionKeepalived:
+ default: 'overcloud-keepalived'
+ type: string
outputs:
role_data:
description: Role data for the Keepalived role.
value:
service_name: keepalived
+ monitoring_subscription: {get_param: MonitoringSubscriptionKeepalived}
config_settings:
tripleo::keepalived::control_virtual_interface: {get_param: ControlVirtualInterface}
tripleo::keepalived::public_virtual_interface: {get_param: PublicVirtualInterface}
+ tripleo.keepalived.firewall_rules:
+ '106 keepalived vrrp':
+ proto: vrrp
step_config: |
include ::tripleo::profile::base::keepalived
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
index 2f01578e..1fc88bf1 100644
--- a/puppet/services/kernel.yaml
+++ b/puppet/services/kernel.yaml
@@ -39,8 +39,12 @@ outputs:
net.netfilter.nf_conntrack_max:
value: 500000
# prevent neutron bridges from autoconfiguring ipv6 addresses
+ net.ipv6.conf.all.accept_ra:
+ value: 0
net.ipv6.conf.default.accept_ra:
value: 0
+ net.ipv6.conf.all.autoconf:
+ value: 0
net.ipv6.conf.default.autoconf:
value: 0
net.core.netdev_max_backlog:
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index c763c391..1f83b680 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -83,82 +83,146 @@ parameters:
KeystoneWorkers:
type: string
description: Set the number of workers for keystone::wsgi::apache
- default: '"%{::processorcount}"'
+ default: '"%{::os_workers}"'
+ MonitoringSubscriptionKeystone:
+ default: 'overcloud-kestone'
+ type: string
+ KeystoneCredential0:
+ type: string
+ description: The first Keystone credential key. Must be a valid key.
+ KeystoneCredential1:
+ type: string
+ description: The second Keystone credential key. Must be a valid key.
+ KeystoneLoggingSource:
+ type: json
+ default:
+ tag: openstack.keystone
+ path: /var/log/keystone/keystone.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
outputs:
role_data:
description: Role data for the Keystone role.
value:
service_name: keystone
+ monitoring_subscription: {get_param: MonitoringSubscriptionKeystone}
+ logging_source: {get_param: KeystoneLoggingSource}
+ logging_groups:
+ - keystone
config_settings:
- keystone::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://keystone:'
- - {get_param: AdminToken}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/keystone'
- keystone::admin_token: {get_param: AdminToken}
- keystone::roles::admin::password: {get_param: AdminPassword}
- keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
- keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
- keystone::enable_proxy_headers_parsing: true
- keystone::debug: {get_param: Debug}
- keystone::db::mysql::password: {get_param: AdminToken}
- keystone::rabbit_userid: {get_param: RabbitUserName}
- keystone::rabbit_password: {get_param: RabbitPassword}
- keystone::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- keystone::rabbit_port: {get_param: RabbitClientPort}
- keystone::notification_driver: {get_param: KeystoneNotificationDriver}
- keystone::notification_format: {get_param: KeystoneNotificationFormat}
- keystone::roles::admin::email: {get_param: AdminEmail}
- keystone::roles::admin::password: {get_param: AdminPassword}
- keystone::endpoint::public_url: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]}
- keystone::endpoint::internal_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- keystone::endpoint::region: {get_param: KeystoneRegion}
- keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
- keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]}
- keystone::db::mysql::user: keystone
- keystone::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- keystone::db::mysql::dbname: keystone
- keystone::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
- keystone::rabbit_heartbeat_timeout_threshold: 60
- keystone::cron::token_flush::maxdelay: 3600
- keystone::roles::admin::service_tenant: 'service'
- keystone::roles::admin::admin_tenant: 'admin'
- keystone::cron::token_flush::destination: '/dev/null'
- keystone::config::keystone_config:
- ec2/driver:
- value: 'keystone.contrib.ec2.backends.sql.Ec2'
- keystone::service_name: 'httpd'
- keystone::wsgi::apache::ssl: false
-
- keystone::wsgi::apache::workers: {get_param: KeystoneWorkers}
- # override via extraconfig:
- keystone::wsgi::apache::threads: 1
- keystone::db::database_db_max_retries: -1
- keystone::db::database_max_retries: -1
- tripleo.keystone.firewall_rules:
- '111 keystone':
- dport:
- - 5000
- - 13000
- - 35357
- - 13357
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- # NOTE: this applies to all 4 bind IP settings below...
- keystone::admin_bind_host: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
- keystone::public_bind_host: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}
- keystone::wsgi::apache::bind_host: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}
- keystone::wsgi::apache::admin_bind_host: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
+ map_merge:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - keystone::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://keystone:'
+ - {get_param: AdminToken}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/keystone'
+ keystone::admin_token: {get_param: AdminToken}
+ keystone::admin_password: {get_param: AdminPassword}
+ keystone::roles::admin::password: {get_param: AdminPassword}
+ keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
+ keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
+ keystone::enable_proxy_headers_parsing: true
+ keystone::enable_credential_setup: true
+ keystone::credential_keys:
+ '/etc/keystone/credential-keys/0':
+ content: {get_param: KeystoneCredential0}
+ '/etc/keystone/credential-keys/1':
+ content: {get_param: KeystoneCredential1}
+ keystone::debug: {get_param: Debug}
+ keystone::rabbit_userid: {get_param: RabbitUserName}
+ keystone::rabbit_password: {get_param: RabbitPassword}
+ keystone::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ keystone::rabbit_port: {get_param: RabbitClientPort}
+ keystone::notification_driver: {get_param: KeystoneNotificationDriver}
+ keystone::notification_format: {get_param: KeystoneNotificationFormat}
+ keystone::roles::admin::email: {get_param: AdminEmail}
+ keystone::roles::admin::password: {get_param: AdminPassword}
+ keystone::endpoint::public_url: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]}
+ keystone::endpoint::internal_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ keystone::endpoint::region: {get_param: KeystoneRegion}
+ keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
+ keystone::rabbit_heartbeat_timeout_threshold: 60
+ keystone::cron::token_flush::maxdelay: 3600
+ keystone::roles::admin::service_tenant: 'service'
+ keystone::roles::admin::admin_tenant: 'admin'
+ keystone::cron::token_flush::destination: '/dev/null'
+ keystone::config::keystone_config:
+ ec2/driver:
+ value: 'keystone.contrib.ec2.backends.sql.Ec2'
+ keystone::service_name: 'httpd'
+ keystone::enable_ssl: {get_param: EnableInternalTLS}
+ keystone::wsgi::apache::ssl: {get_param: EnableInternalTLS}
+ keystone::wsgi::apache::servername:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}
+ keystone::wsgi::apache::servername_admin:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
+ keystone::wsgi::apache::workers: {get_param: KeystoneWorkers}
+ # override via extraconfig:
+ keystone::wsgi::apache::threads: 1
+ keystone::db::database_db_max_retries: -1
+ keystone::db::database_max_retries: -1
+ tripleo.keystone.firewall_rules:
+ '111 keystone':
+ dport:
+ - 5000
+ - 13000
+ - 35357
+ - 13357
+ keystone::admin_bind_host:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
+ keystone::public_bind_host:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ # NOTE: this applies to all 2 bind IP settings below...
+ keystone::wsgi::apache::bind_host: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}
+ keystone::wsgi::apache::admin_bind_host: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
step_config: |
include ::tripleo::profile::base::keystone
+ service_config_settings:
+ mysql:
+ keystone::db::mysql::password: {get_param: AdminToken}
+ keystone::db::mysql::user: keystone
+ keystone::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ keystone::db::mysql::dbname: keystone
+ keystone::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/logging/fluentd-base.yaml b/puppet/services/logging/fluentd-base.yaml
new file mode 100644
index 00000000..c8f67556
--- /dev/null
+++ b/puppet/services/logging/fluentd-base.yaml
@@ -0,0 +1,37 @@
+heat_template_version: 2016-04-08
+
+description: Fluentd base service
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: >
+ Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+
+outputs:
+ role_data:
+ description: Role data for the Fluentd role.
+ value:
+ service_name: fluentd_base
+ config_settings:
+ fluentd::package_name: fluentd
+ fluentd::service_name: fluentd
+ fluentd::config_file: /etc/fluentd/fluent.conf
+ fluentd::config_owner: fluentd
+ fluentd::config_group: fluentd
+ fluentd::config_path: /etc/fluentd/config.d
+ fluentd::plugin_provider: yum
+ fluentd::service_provider: systemd
+ fluentd::repo_install: false
diff --git a/puppet/services/logging/fluentd-client.yaml b/puppet/services/logging/fluentd-client.yaml
new file mode 100644
index 00000000..3ae7110f
--- /dev/null
+++ b/puppet/services/logging/fluentd-client.yaml
@@ -0,0 +1,64 @@
+heat_template_version: 2016-10-14
+
+description: Fluentd client configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: >
+ Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+ FluentdBase:
+ type: ./fluentd-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+ LoggingConfiguration:
+ type: OS::TripleO::LoggingConfiguration
+
+outputs:
+ role_data:
+ description: Role data for the Fluentd client role.
+ value:
+ service_name: fluentd_client
+ config_settings:
+ map_merge:
+ - get_attr: [FluentdBase, role_data, config_settings]
+ - tripleo::profile::base::logging::fluentd::fluentd_servers:
+ get_attr: [LoggingConfiguration, LoggingServers]
+ tripleo::profile::base::logging::fluentd::fluentd_filters:
+ yaql:
+ expression: >
+ $.data.filters.flatten().where($)
+ data:
+ filters:
+ - get_attr: [LoggingConfiguration, LoggingDefaultFilters]
+ - get_attr: [LoggingConfiguration, LoggingExtraFilters]
+ tripleo::profile::base::logging::fluentd::fluentd_pos_file_path:
+ get_attr: [LoggingConfiguration, LoggingPosFilePath]
+ tripleo::profile::base::logging::fluentd::fluentd_use_ssl:
+ get_attr: [LoggingConfiguration, LoggingUsesSSL]
+ tripleo::profile::base::logging::fluentd::fluentd_ssl_certificate:
+ get_attr: [LoggingConfiguration, LoggingSSLCertificate]
+ tripleo::profile::base::logging::fluentd::fluentd_ssl_key:
+ get_attr: [LoggingConfiguration, LoggingSSLKey]
+ tripleo::profile::base::logging::fluentd::fluentd_ssl_key_passphrase:
+ get_attr: [LoggingConfiguration, LoggingSSLKeyPassphrase]
+ tripleo::profile::base::logging::fluentd::fluentd_shared_key:
+ get_attr: [LoggingConfiguration, LoggingSharedKey]
+ step_config: |
+ include ::tripleo::profile::base::logging::fluentd
diff --git a/puppet/services/logging/fluentd-config.yaml b/puppet/services/logging/fluentd-config.yaml
new file mode 100644
index 00000000..58b423fd
--- /dev/null
+++ b/puppet/services/logging/fluentd-config.yaml
@@ -0,0 +1,154 @@
+heat_template_version: 2016-10-14
+
+description: Fluentd logging configuration
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: >
+ Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ LoggingDefaultFormat:
+ description: >
+ Default format used to parse messages from log files.
+ type: string
+ default: >-
+ /(?<time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d+)
+ (?<pid>\d+)
+ (?<priority>\S+)
+ (?<message>.*)$/
+ LoggingPosFilePath:
+ description: >
+ Directory in which to place fluentd pos_file files (used to track
+ file position for the 'tail' input type).
+ type: string
+ default: /var/cache/fluentd
+ LoggingDefaultGroups:
+ description: >
+ Make fluentd user a member of these groups. Only override this parameter
+ if you want to modify the default list of groups. Use
+ LoggingExtraGroups to add the fluentd user to additional groups.
+ type: comma_delimited_list
+ default:
+ - root
+ LoggingExtraGroups:
+ description: >
+ Make fluentd user a member of these groups (in addition to
+ LoggingDefaultGroups and the groups provided by individual
+ composable services).
+ type: comma_delimited_list
+ default: []
+ LoggingServers:
+ description: |
+ A list of destinations to which fluentd will forward log messages. Expects
+ a list of dictionaries of the form:
+
+ - host: loghost1.example.com
+ port: 24224
+ - host: loghost2.example.com
+ port: 24224
+ type: json
+ default: []
+ LoggingDefaultFilters:
+ description: >
+ A list of fluentd default filters. This will be passed verbatim
+ to the 'filter' key of a fluentd::config resource. Only override this
+ if you do not want the default set of filters; use LoggingExtraFilters
+ if you just want to add additional servers.
+ type: json
+ default:
+ - tag_pattern: '**'
+ type: record_transformer
+ record:
+ host: '${hostname}'
+
+ - tag_pattern: 'openstack.**'
+ type: record_transformer
+ record:
+ component: '${tag_parts[1]}'
+ LoggingExtraFilters:
+ description: >
+ A list of additional fluentd filters. This will be passed
+ verbatim to the 'filter' key of a fluentd::config resource.
+ type: json
+ default: []
+ LoggingUsesSSL:
+ description: >
+ A boolean value indicating whether or not we should forward log messages
+ use the secure_forward plugin.
+ type: boolean
+ default: false
+ LoggingSSLCertificate:
+ description: >
+ PEM-encoded SSL CA certificate for fluentd.
+ type: string
+ default: ""
+ LoggingSSLKey:
+ description: >
+ PEM-encoded key for fluentd CA certificate (used by in_secure_forward).
+ type: string
+ default: ""
+ LoggingSSLKeyPassphrase:
+ description: >
+ Passphrase for LoggingSSLKey (used by in_secure_forward).
+ type: string
+ default: ""
+ LoggingSharedKey:
+ description: >
+ Shared secret for fluentd secure-forward plugin.
+ type: string
+ default: ""
+ LoggingDefaultSources:
+ description: >
+ A list of default logging sources for fluentd. You should only override
+ this parameter if you wish to disable the default logging sources. Use
+ LoggingExtraSources to define additional source configurations.
+ type: json
+ default: []
+ LoggingExtraSources:
+ description: >
+ A list of additional logging sources for fluentd. These will be combined
+ with the LoggingDefaultSources and any logging sources defined by
+ composable services.
+ type: json
+ default: []
+
+outputs:
+ LoggingDefaultFormat:
+ value: {get_param: LoggingDefaultFormat}
+ LoggingDefaultFilters:
+ value: {get_param: LoggingDefaultFilters}
+ LoggingExtraFilters:
+ value: {get_param: LoggingExtraFilters}
+ LoggingDefaultGroups:
+ value: {get_param: LoggingDefaultGroups}
+ LoggingExtraGroups:
+ value: {get_param: LoggingExtraGroups}
+ LoggingPosFilePath:
+ value: {get_param: LoggingPosFilePath}
+ LoggingSSLCertificate:
+ value: {get_param: LoggingSSLCertificate}
+ LoggingSSLKey:
+ value: {get_param: LoggingSSLKey}
+ LoggingSSLKeyPassphrase:
+ value: {get_param: LoggingSSLKeyPassphrase}
+ LoggingServers:
+ value: {get_param: LoggingServers}
+ LoggingSharedKey:
+ value: {get_param: LoggingSharedKey}
+ LoggingUsesSSL:
+ value: {get_param: LoggingUsesSSL}
+ LoggingDefaultSources:
+ value: {get_param: LoggingDefaultSources}
+ LoggingExtraSources:
+ value: {get_param: LoggingExtraSources}
diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml
index b3987747..4d3fd47c 100644
--- a/puppet/services/manila-api.yaml
+++ b/puppet/services/manila-api.yaml
@@ -26,6 +26,9 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ MonitoringSubscriptionManilaApi:
+ default: 'overcloud-manila-api'
+ type: string
resources:
ManilaBase:
@@ -40,6 +43,7 @@ outputs:
description: Role data for the Manila-api role.
value:
service_name: manila_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionManilaApi}
config_settings:
map_merge:
- get_attr: [ManilaBase, role_data, config_settings]
@@ -47,14 +51,6 @@ outputs:
manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
manila::keystone::authtoken::project_name: 'service'
- manila::keystone::auth::public_url: {get_param: [EndpointMap, ManilaV1Public, uri]}
- manila::keystone::auth::internal_url: {get_param: [EndpointMap, ManilaV1Internal, uri]}
- manila::keystone::auth::admin_url: {get_param: [EndpointMap, ManilaV1Admin, uri]}
- manila::keystone::auth::public_url_v2: {get_param: [EndpointMap, ManilaPublic, uri]}
- manila::keystone::auth::internal_url_v2: {get_param: [EndpointMap, ManilaInternal, uri]}
- manila::keystone::auth::admin_url_v2: {get_param: [EndpointMap, ManilaAdmin, uri]}
- manila::keystone::auth::password: {get_param: ManilaPassword }
- manila::keystone::auth::region: {get_param: KeystoneRegion }
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
# (eg. for internal_api):
@@ -62,6 +58,25 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
manila::api::bind_host: {get_param: [ServiceNetMap, ManilaApiNetwork]}
+ manila::api::enable_proxy_headers_parsing: true
step_config: |
include ::tripleo::profile::base::manila::api
-
+ service_config_settings:
+ keystone:
+ manila::keystone::auth::tenant: 'service'
+ manila::keystone::auth::public_url: {get_param: [EndpointMap, ManilaV1Public, uri]}
+ manila::keystone::auth::internal_url: {get_param: [EndpointMap, ManilaV1Internal, uri]}
+ manila::keystone::auth::admin_url: {get_param: [EndpointMap, ManilaV1Admin, uri]}
+ manila::keystone::auth::public_url_v2: {get_param: [EndpointMap, ManilaPublic, uri]}
+ manila::keystone::auth::internal_url_v2: {get_param: [EndpointMap, ManilaInternal, uri]}
+ manila::keystone::auth::admin_url_v2: {get_param: [EndpointMap, ManilaAdmin, uri]}
+ manila::keystone::auth::password: {get_param: ManilaPassword}
+ manila::keystone::auth::region: {get_param: KeystoneRegion}
+ mysql:
+ manila::db::mysql::password: {get_param: ManilaPassword}
+ manila::db::mysql::user: manila
+ manila::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ manila::db::mysql::dbname: manila
+ manila::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/manila-backend-cephfs.yaml b/puppet/services/manila-backend-cephfs.yaml
new file mode 100644
index 00000000..0fc39e2a
--- /dev/null
+++ b/puppet/services/manila-backend-cephfs.yaml
@@ -0,0 +1,57 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Manila Cephfs backend
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ # CephFS Native backend params:
+ ManilaCephFSNativeBackendName:
+ type: string
+ default: cephfsnative
+ ManilaCephFSNativeDriverHandlesShareServers:
+ type: boolean
+ default: false
+ ManilaCephFSNativeShareBackendName:
+ type: string
+ default: 'cephfs'
+ ManilaCephFSNativeCephFSConfPath:
+ type: string
+ default: '/etc/ceph/ceph.conf'
+ ManilaCephFSNativeCephFSAuthId:
+ type: string
+ default: 'manila'
+ ManilaCephFSNativeCephFSClusterName:
+ type: string
+ default: 'ceph'
+ ManilaCephFSNativeCephFSEnableSnapshots:
+ type: boolean
+ default: true
+
+outputs:
+ role_data:
+ description: Role data for the Manila Cephfs backend.
+ value:
+ service_name: manila_backend_cephfs
+ config_settings:
+ manila::backend::cephfsnative::title: {get_param: ManilaCephFSNativeBackendName}
+ manila::backend::cephfsnative::driver_handles_share_servers: {get_param: ManilaCephFSNativeDriverHandlesShareServers}
+ manila::backend::cephfsnative::share_backend_name: {get_param: ManilaCephFSNativeShareBackendName}
+ manila::backend::cephfsnative::cephfs_conf_path: {get_param: ManilaCephFSNativeCephFSConfPath}
+ manila::backend::cephfsnative::cephfs_auth_id: {get_param: ManilaCephFSNativeCephFSAuthId}
+ manila::backend::cephfsnative::cephfs_cluster_name: {get_param: ManilaCephFSNativeCephFSClusterName}
+ manila::backend::cephfsnative::cephfs_enable_snapshots: {get_param: ManilaCephFSNativeCephFSEnableSnapshots}
+ step_config:
diff --git a/puppet/services/manila-backend-generic.yaml b/puppet/services/manila-backend-generic.yaml
new file mode 100644
index 00000000..c527666e
--- /dev/null
+++ b/puppet/services/manila-backend-generic.yaml
@@ -0,0 +1,89 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Manila generic backend.
+
+parameters:
+ ManilaGenericBackendName:
+ type: string
+ default: tripleo_generic
+ ManilaGenericDriverHandlesShareServers:
+ type: string
+ default: true
+ ManilaGenericSmbTemplateConfigPath:
+ type: string
+ default: '$state_path/smb.conf'
+ ManilaGenericVolumeNameTemplate:
+ type: string
+ default: 'manila-share-%s'
+ ManilaGenericVolumeSnapshotNameTemplate:
+ type: string
+ default: 'manila-snapshot-%s'
+ ManilaGenericShareMountPath:
+ type: string
+ default: '/shares'
+ ManilaGenericMaxTimeToCreateVolume:
+ type: string
+ default: '180'
+ ManilaGenericMaxTimeToAttach:
+ type: string
+ default: '120'
+ ManilaGenericServiceInstanceSmbConfigPath:
+ type: string
+ default: '$share_mount_path/smb.conf'
+ ManilaGenericShareVolumeFsType:
+ type: string
+ default: 'ext4'
+ ManilaGenericCinderVolumeType:
+ type: string
+ default: ''
+ ManilaServiceInstanceUser:
+ type: string
+ default: ''
+ ManilaServiceInstancePassword: #SET THIS via parameter_defaults
+ type: string
+ hidden: true
+ ManilaServiceInstanceFlavorId:
+ type: number
+ default: 1
+ ManilaServiceNetworkCidr:
+ type: string
+ default: '172.16.0.0/16'
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+outputs:
+ role_data:
+ description: Role data for the Manila Generic backend.
+ value:
+ service_name: manila_backend_generic
+ config_settings:
+ manila::backend::generic::title: {get_param: ManilaGenericBackendName}
+ manila::backend::generic::driver_handles_share_servers: {get_param: ManilaGenericDriverHandlesShareServers}
+ manila::backend::generic::smb_template_config_path: {get_param: ManilaGenericSmbTemplateConfigPath}
+ manila::backend::generic::volume_name_template: {get_param: ManilaGenericVolumeNameTemplate}
+ manila::backend::generic::volume_snapshot_name_template: {get_param: ManilaGenericVolumeSnapshotNameTemplate}
+ manila::backend::generic::share_mount_path: {get_param: ManilaGenericShareMountPath}
+ manila::backend::generic::max_time_to_create_volume: {get_param: ManilaGenericMaxTimeToCreateVolume}
+ manila::backend::generic::max_time_to_attach: {get_param: ManilaGenericMaxTimeToAttach}
+ manila::backend::generic::service_instance_smb_config_path: {get_param: ManilaGenericServiceInstanceSmbConfigPath}
+ manila::backend::generic::share_volume_fstype: {get_param: ManilaGenericShareVolumeFsType}
+ manila::backend::generic::cinder_volume_type: {get_param: ManilaGenericCinderVolumeType}
+ manila::service_instance::service_instance_user: {get_param: ManilaServiceInstanceUser}
+ manila::service_instance::service_instance_password: {get_param: ManilaServiceInstancePassword}
+ manila::service_instance::service_instance_flavor_id: {get_param: ManilaServiceInstanceFlavorId}
+ manila::service_instance::service_network_cidr: {get_param: ManilaServiceNetworkCidr}
+
+ step_config:
diff --git a/puppet/services/manila-backend-netapp.yaml b/puppet/services/manila-backend-netapp.yaml
new file mode 100644
index 00000000..e6d2f250
--- /dev/null
+++ b/puppet/services/manila-backend-netapp.yaml
@@ -0,0 +1,108 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Openstack Manila netapp backend.
+
+parameters:
+ ManilaNetappDriverHandlesShareServers:
+ type: string
+ default: true
+ ManilaNetappBackendName:
+ type: string
+ default: tripleo_netapp
+ ManilaNetappLogin:
+ type: string
+ default: ''
+ ManilaNetappPassword:
+ type: string
+ default: ''
+ ManilaNetappServerHostname:
+ type: string
+ default: ''
+ ManilaNetappTransportType:
+ type: string
+ default: 'http'
+ ManilaNetappStorageFamily:
+ type: string
+ default: 'ontap_cluster'
+ ManilaNetappServerPort:
+ type: number
+ default: 80
+ ManilaNetappVolumeNameTemplate:
+ type: string
+ default: 'share_%(share_id)s'
+ ManilaNetappVserver:
+ type: string
+ default: ''
+ ManilaNetappVserverNameTemplate:
+ type: string
+ default: 'os_%s'
+ ManilaNetappLifNameTemplate:
+ type: string
+ default: 'os_%(net_allocation_id)s'
+ ManilaNetappAggrNameSearchPattern:
+ type: string
+ default: '(.*)'
+ ManilaNetappRootVolumeAggr:
+ type: string
+ default: ''
+ ManilaNetappRootVolume:
+ type: string
+ default: 'root'
+ ManilaNetappPortNameSearchPattern:
+ type: string
+ default: '(.*)'
+ ManilaNetappTraceFlags:
+ type: string
+ default: ''
+ ManilaNetappEnabledShareProtocols:
+ type: string
+ default: 'nfs3, nfs4.0'
+ ManilaNetappVolumeSnapshotReservePercent:
+ type: number
+ default: 5
+ ManilaNetappSnapmirrorQuiesceTimeout:
+ type: number
+ default: 3600
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+outputs:
+ role_data:
+ description: Role data for the Manila Netapp backend.
+ value:
+ service_name: manila_backend_netapp
+ config_settings:
+ manila::backend::netapp::title: {get_param: ManilaNetappBackendName}
+ manila::backend::netapp::netapp_login: {get_param: ManilaNetappLogin}
+ manila::backend::netapp::driver_handles_share_servers: {get_param: ManilaNetappDriverHandlesShareServers}
+ manila::backend::netapp::netapp_password: {get_param: ManilaNetappPassword}
+ manila::backend::netapp::netapp_server_hostname: {get_param: ManilaNetappServerHostname}
+ manila::backend::netapp::netapp_transport_type: {get_param: ManilaNetappTransportType}
+ manila::backend::netapp::netapp_storage_family: {get_param: ManilaNetappStorageFamily}
+ manila::backend::netapp::netapp_server_port: {get_param: ManilaNetappServerPort}
+ manila::backend::netapp::netapp_volume_name_template: {get_param: ManilaNetappVolumeNameTemplate}
+ manila::backend::netapp::netapp_vserver: {get_param: ManilaNetappVserver}
+ manila::backend::netapp::netapp_vserver_name_template: {get_param: ManilaNetappVserverNameTemplate}
+ manila::backend::netapp::netapp_lif_name_template: {get_param: ManilaNetappLifNameTemplate}
+ manila::backend::netapp::netapp_aggregate_name_search_pattern: {get_param: ManilaNetappAggrNameSearchPattern}
+ manila::backend::netapp::netapp_root_volume_aggregate: {get_param: ManilaNetappRootVolumeAggr}
+ manila::backend::netapp::netapp_root_volume: {get_param: ManilaNetappRootVolume}
+ manila::backend::netapp::netapp_port_name_search_pattern: {get_param: ManilaNetappPortNameSearchPattern}
+ manila::backend::netapp::netapp_trace_flags: {get_param: ManilaNetappTraceFlags}
+ manila::backend::netapp::netapp_enabled_share_protocols: {get_param: ManilaNetappEnabledShareProtocols}
+ manila::backend::netapp::netapp_volume_snapshot_reserve_percent: {get_param: ManilaNetappVolumeSnapshotReservePercent}
+ manila::backend::netapp::netapp_snapmirror_quiesce_timeout: {get_param: ManilaNetappSnapmirrorQuiesceTimeout}
+ step_config:
diff --git a/puppet/services/manila-base.yaml b/puppet/services/manila-base.yaml
index 78bf1c63..d228577a 100644
--- a/puppet/services/manila-base.yaml
+++ b/puppet/services/manila-base.yaml
@@ -40,55 +40,6 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
- # Config specific parameters, to be provided via parameter_defaults
- ManilaGenericEnableBackend:
- type: boolean
- default: true
- ManilaGenericBackendName:
- type: string
- default: tripleo_generic
- ManilaGenericDriverHandlesShareServers:
- type: string
- default: true
- ManilaGenericSmbTemplateConfigPath:
- type: string
- default: '$state_path/smb.conf'
- ManilaGenericVolumeNameTemplate:
- type: string
- default: 'manila-share-%s'
- ManilaGenericVolumeSnapshotNameTemplate:
- type: string
- default: 'manila-snapshot-%s'
- ManilaGenericShareMountPath:
- type: string
- default: '/shares'
- ManilaGenericMaxTimeToCreateVolume:
- type: string
- default: '180'
- ManilaGenericMaxTimeToAttach:
- type: string
- default: '120'
- ManilaGenericServiceInstanceSmbConfigPath:
- type: string
- default: '$share_mount_path/smb.conf'
- ManilaGenericShareVolumeFsType:
- type: string
- default: 'ext4'
- ManilaGenericCinderVolumeType:
- type: string
- default: ''
- ManilaGenericServiceInstanceUser:
- type: string
- default: ''
- ManilaGenericServiceInstancePassword: #SET THIS via parameter_defaults
- type: string
- hidden: true
- ManilaGenericServiceInstanceFlavorId:
- type: number
- default: 1
- ManilaGenericServiceNetworkCidr:
- type: string
- default: '172.16.0.0/16'
outputs:
role_data:
@@ -101,28 +52,5 @@ outputs:
manila::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
manila::rabbit_port: {get_param: RabbitClientPort}
manila::debug: {get_param: Debug}
- manila::db::mysql::user: manila
- manila::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- manila::db::mysql::dbname: manila
manila::db::database_db_max_retries: -1
manila::db::database_max_retries: -1
- manila_generic_enable_backend: {get_param: ManilaGenericEnableBackend}
- manila::backend::generic::title: {get_param: ManilaGenericBackendName}
- manila::backend::generic::driver_handles_share_servers: {get_param: ManilaGenericDriverHandlesShareServers}
- manila::backend::generic::smb_template_config_path: {get_param: ManilaGenericSmbTemplateConfigPath}
- manila::backend::generic::volume_name_template: {get_param: ManilaGenericVolumeNameTemplate}
- manila::backend::generic::volume_snapshot_name_template: {get_param: ManilaGenericVolumeSnapshotNameTemplate}
- manila::backend::generic::share_mount_path: {get_param: ManilaGenericShareMountPath}
- manila::backend::generic::max_time_to_create_volume: {get_param: ManilaGenericMaxTimeToCreateVolume}
- manila::backend::generic::max_time_to_attach: {get_param: ManilaGenericMaxTimeToAttach}
- manila::backend::generic::service_instance_smb_config_path: {get_param: ManilaGenericServiceInstanceSmbConfigPath}
- manila::backend::generic::share_volume_fstype: {get_param: ManilaGenericShareVolumeFsType}
- manila::backend::generic::cinder_volume_type: {get_param: ManilaGenericCinderVolumeType}
- manila::service_instance::service_instance_user: {get_param: ManilaGenericServiceInstanceUser}
- manila::service_instance::service_instance_password: {get_param: ManilaGenericServiceInstancePassword}
- manila::service_instance::service_instance_flavor_id: {get_param: ManilaGenericServiceInstanceFlavorId}
- manila::service_instance::service_network_cidr: {get_param: ManilaGenericServiceNetworkCidr}
- manila::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-
diff --git a/puppet/services/manila-scheduler.yaml b/puppet/services/manila-scheduler.yaml
index a5122ba0..474cc24f 100644
--- a/puppet/services/manila-scheduler.yaml
+++ b/puppet/services/manila-scheduler.yaml
@@ -30,6 +30,9 @@ parameters:
description: The password for the manila service account.
type: string
hidden: true
+ MonitoringSubscriptionManilaScheduler:
+ default: 'overcloud-manila-scheduler'
+ type: string
resources:
ManilaBase:
@@ -44,13 +47,13 @@ outputs:
description: Role data for the Manila-scheduler role.
value:
service_name: manila_scheduler
+ monitoring_subscription: {get_param: MonitoringSubscriptionManilaScheduler}
config_settings:
map_merge:
- get_attr: [ManilaBase, role_data, config_settings]
- manila::compute::nova::nova_admin_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
manila::compute::nova::nova_admin_password: {get_param: NovaPassword}
manila::compute::nova::nova_admin_tenant_name: 'service'
- manila::db::mysql::password: {get_param: ManilaPassword}
manila::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
manila::network::neutron::neutron_admin_auth_url: {get_param: [EndpointMap, NeutronAdmin, uri]}
manila::network::neutron::neutron_admin_password: {get_param: NeutronPassword}
@@ -65,4 +68,3 @@ outputs:
- '/manila'
step_config: |
include ::tripleo::profile::base::manila::scheduler
-
diff --git a/puppet/services/manila-share.yaml b/puppet/services/manila-share.yaml
index 184f3694..e42d2fae 100644
--- a/puppet/services/manila-share.yaml
+++ b/puppet/services/manila-share.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionManilaShare:
+ default: 'overcloud-manila-share'
+ type: string
resources:
ManilaBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Manila-share role.
value:
service_name: manila_share
+ monitoring_subscription: {get_param: MonitoringSubscriptionManilaShare}
config_settings:
map_merge:
- get_attr: [ManilaBase, role_data, config_settings]
diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml
index 3b47261e..9e3f6375 100644
--- a/puppet/services/memcached.yaml
+++ b/puppet/services/memcached.yaml
@@ -18,12 +18,16 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionMemcached:
+ default: 'overcloud-memcached'
+ type: string
outputs:
role_data:
description: Role data for the Memcached role.
value:
service_name: memcached
+ monitoring_subscription: {get_param: MonitoringSubscriptionMemcached}
config_settings:
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
diff --git a/puppet/services/monitoring/sensu-base.yaml b/puppet/services/monitoring/sensu-base.yaml
new file mode 100644
index 00000000..d7350d07
--- /dev/null
+++ b/puppet/services/monitoring/sensu-base.yaml
@@ -0,0 +1,68 @@
+heat_template_version: 2016-04-08
+
+description: Sensu base service
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ MonitoringRabbitHost:
+ description: RabbitMQ host Sensu has to connect to.
+ type: string
+ default: ''
+ MonitoringRabbitPort:
+ default: 5672
+ description: Set RabbitMQ subscriber port, change this if using SSL.
+ type: number
+ MonitoringRabbitUseSSL:
+ default: false
+ description: >
+ RabbitMQ client subscriber parameter to specify an SSL connection
+ to the RabbitMQ host.
+ type: string
+ MonitoringRabbitPassword:
+ description: The RabbitMQ password used for monitoring purposes.
+ type: string
+ hidden: true
+ MonitoringRabbitUserName:
+ description: The RabbitMQ username used for monitoring purposes.
+ type: string
+ default: sensu
+ MonitoringRabbitVhost:
+ description: The RabbitMQ vhost used for monitoring purposes.
+ type: string
+ default: '/sensu'
+
+
+outputs:
+ role_data:
+ description: Role data for the Sensu role.
+ value:
+ service_name: sensu_base
+ config_settings:
+ sensu::enterprise: false
+ sensu::enterprise_dashboard: false
+ sensu::install_repo: false
+ sensu::manage_user: false
+ sensu::rabbitmq_host: {get_param: MonitoringRabbitHost}
+ sensu::rabbitmq_password: {get_param: MonitoringRabbitPassword}
+ sensu::rabbitmq_port: {get_param: MonitoringRabbitPort}
+ sensu::rabbitmq_ssl: {get_param: MonitoringRabbitUseSSL}
+ sensu::rabbitmq_user: {get_param: MonitoringRabbitUserName}
+ sensu::rabbitmq_vhost: {get_param: MonitoringRabbitVhost}
+ #sensu::redis_host: {get_param: MonitoringRedisHost}
+ #sensu::redis_password: {get_param: MonitoringRedisPassword}
+ sensu::sensu_plugin_provider: 'yum'
+ sensu::sensu_plugin_name: 'rubygem-sensu-plugin'
+ sensu::version: 'present'
diff --git a/puppet/services/monitoring/sensu-client.yaml b/puppet/services/monitoring/sensu-client.yaml
new file mode 100644
index 00000000..3f37e750
--- /dev/null
+++ b/puppet/services/monitoring/sensu-client.yaml
@@ -0,0 +1,49 @@
+heat_template_version: 2016-04-08
+
+description: Sensu client configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: >
+ Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ SensuClientCustomConfig:
+ default: {}
+ description: Hash containing custom sensu-client variables.
+ type: json
+ label: Custom configuration for Sensu Client variables
+
+resources:
+ SensuBase:
+ type: ./sensu-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Sensu client role.
+ value:
+ service_name: sensu_client
+ monitoring_subscription: all
+ config_settings:
+ map_merge:
+ - get_attr: [SensuBase, role_data, config_settings]
+ - sensu::api: false
+ sensu::client: true
+ sensu::server: false
+ sensu::client_custom: {get_param: SensuClientCustomConfig}
+ step_config: |
+ include ::tripleo::profile::base::monitoring::sensu
diff --git a/puppet/services/network/contrail-analytics.yaml b/puppet/services/network/contrail-analytics.yaml
new file mode 100644
index 00000000..1c2331fa
--- /dev/null
+++ b/puppet/services/network/contrail-analytics.yaml
@@ -0,0 +1,90 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Contrail Analytics service deployment using puppet, this YAML file
+ creates the interface between the HOT template
+ and the puppet manifest that actually installs
+ and configures Contrail Analytics.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ContrailAnalyticsHostIP:
+ description: host IP address of Analytics
+ type: string
+ ContrailAnalyticsRedisServerIp:
+ description: Redis server ip address
+ type: string
+ ContrailAnalyticsCollectorServerHttpPort:
+ description: Collector http port
+ type: number
+ default: 8089
+ ContrailAnalyticsCollectorSandeshPort:
+ description: Collector sandesh port
+ type: number
+ default: 8086
+ ContrailAnalyticsHttpServerPort:
+ description: Analytics http port
+ type: number
+ default: 8090
+ ContrailAnalyticsListenAddress:
+ default: '0.0.0.0'
+ description: IP address Config API is listening on
+ type: string
+ ContrailAnalyticsListenPort:
+ default: 8082
+ description: Port Config API is listening on
+ type: number
+ ContrailAnalyticsRedisServerPort:
+ description: Redis server port
+ type: number
+ default: 6379
+ ContrailAnalyticsRestApiIp:
+ description: IP address Analytics rest interface listens on
+ type: string
+ default: '0.0.0.0'
+ ContrailAnalyticsRestApiPort:
+ description: Analytics rest port
+ type: number
+ default: 8081
+
+resources:
+ ContrailBase:
+ type: ./contrail-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role Contrail Analytics using composable services.
+ value:
+ service_name: contrail_analytics
+ config_settings:
+ map_merge:
+ - get_attr: [ContrailBase, role_data, config_settings]
+ - contrail::analytics::collector_http_server_port: {get_param: ContrailAnalyticsCollectorServerHttpPort}
+ contrail::analytics::collector_sandesh_port: {get_param: ContrailAnalyticsCollectorSandeshPort}
+ contrail::analytics::host_ip: {get_param: ContrailAnalyticsHostIP}
+ contrail::analytics::http_server_port: {get_param: ContrailAnalyticsHttpServerPort}
+ contrail::analytics::listen_ip_address: {get_param: ContrailAnalyticsListenAddress}
+ contrail::analytics::listen_port: {get_param: ContrailAnalyticsListenPort}
+ contrail::analytics::redis_server: {get_param: ContrailAnalyticsRedisServerIp}
+ contrail::analytics::redis_server_port: {get_param: ContrailAnalyticsRedisServerPort}
+ contrail::analytics::rest_api_ip: {get_param: ContrailAnalyticsRestApiIp}
+ contrail::analytics::rest_api_port: {get_param: ContrailAnalyticsRestApiPort}
+ step_config: |
+ include ::tripleo::network::contrail::analytics
diff --git a/puppet/services/network/contrail-base.yaml b/puppet/services/network/contrail-base.yaml
new file mode 100644
index 00000000..03dbea5b
--- /dev/null
+++ b/puppet/services/network/contrail-base.yaml
@@ -0,0 +1,100 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Base parameters for all Contrail Services.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ AdminPassword:
+ description: Keystone admin user password
+ type: string
+ AdminTenantName:
+ description: Keystone admin tenant name
+ type: string
+ AdminToken:
+ description: Keystone admin token
+ type: string
+ AdminUser:
+ description: Keystone admin user name
+ type: string
+ AuthHost:
+ description: Keystone host IP address
+ type: string
+ AuthPort:
+ default: 35357
+ description: Keystone port
+ type: number
+ AuthProtocol:
+ default: 'http'
+ description: Keystone authentication protocol
+ type: string
+ ContrailDiscoveryServerIp:
+ description: Discovery server ip address
+ type: string
+ ContrailKafkaBrokerList:
+ description: List of kafka servers
+ type: comma_delimited_list
+ ContrailAuth:
+ default: 'keystone'
+ description: Keystone authentication method
+ type: string
+ ContrailCassandraServerList:
+ default: []
+ description: List of cassandra servers
+ type: comma_delimited_list
+ ContrailDiscoveryServerPort:
+ description: Discovery server port
+ type: number
+ default: 5998
+ ContrailInsecure:
+ default: false
+ description: Keystone insecure mode
+ type: boolean
+ ContrailMemcachedServer:
+ default: '127.0.0.1:12111'
+ description: Memcached server
+ type: string
+ ContrailMultiTenancy:
+ default: true
+ description: Turn on/off multi-tenancy
+ type: boolean
+ ContrailZkServerIp:
+ default: []
+ description: List of zookeeper servers
+ type: comma_delimited_list
+
+outputs:
+ role_data:
+ description: Shared role data for the Contrail services.
+ value:
+ service_name: contrail_base
+ config_settings:
+ contrail::admin_password: {get_param: AdminPassword}
+ contrail::admin_tenant_name: {get_param: AdminTenantName}
+ contrail::admin_token: {get_param: AdminToken}
+ contrail::admin_user: {get_param: AdminUser}
+ contrail::auth_host: {get_param: [EndpointMap, KeystoneInternal, host] }
+ contrail::auth_port: {get_param: [EndpointMap, KeystoneInternal, port] }
+ contrail::auth_protocol: {get_param: [EndpointMap, KeystoneInternal, protocol] }
+ contrail::disc_server_ip: {get_param: ContrailDiscoveryServerIp}
+ contrail::kafka_broker_list: {get_param: ContrailKafkaBrokerList}
+ contrail::auth: {get_param: ContrailAuth}
+ contrail::cassandra_server_list: {get_param: ContrailCassandraServerList}
+ contrail::disc_server_port: {get_param: ContrailDiscoveryServerPort}
+ contrail::insecure: {get_param: ContrailInsecure}
+ contrail::memcached_server: {get_param: ContrailMemcachedServer}
+ contrail::multi_tenancy: {get_param: ContrailMultiTenancy}
+ contrail::zk_server_ip: {get_param: ContrailZkServerIp}
diff --git a/puppet/services/network/contrail-config.yaml b/puppet/services/network/contrail-config.yaml
new file mode 100644
index 00000000..0987fc75
--- /dev/null
+++ b/puppet/services/network/contrail-config.yaml
@@ -0,0 +1,72 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Contrail Config service deployment using puppet, this YAML file
+ creates the interface between the HOT template
+ and the puppet manifest that actually installs
+ and configures Contrail Config.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ContrailConfigIfmapServerIp:
+ description: Ifmap server ip address
+ type: string
+ ContrailConfigIfmapUserName:
+ description: Ifmap user name
+ type: string
+ ContrailConfigIfmapUserPassword:
+ description: Ifmap user password
+ type: string
+ ContrailConfigRabbitServerIp:
+ description: RabbitMq server ip address
+ type: string
+ ContrailConfigRedisServerIp:
+ description: Redis server ip address
+ type: string
+ ContrailConfigListenAddress:
+ default: '0.0.0.0'
+ description: IP address Config API is listening on
+ type: string
+ ContrailConfigListenPort:
+ default: 8082
+ description: Port Config API is listening on
+ type: number
+
+resources:
+ ContrailBase:
+ type: ./contrail-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role Contrail Config using composable services.
+ value:
+ service_name: contrail_config
+ config_settings:
+ map_merge:
+ - get_attr: [ContrailBase, role_data, config_settings]
+ - contrail::config::ifmap_password: {get_param: ContrailConfigIfmapUserPassword}
+ contrail::config::ifmap_server_ip: {get_param: ContrailConfigIfmapServerIp}
+ contrail::config::ifmap_username: {get_param: ContrailConfigIfmapUserName}
+ contrail::config::listen_ip_address: {get_param: ContrailConfigListenAddress}
+ contrail::config::listen_port: {get_param: ContrailConfigListenPort}
+ contrail::config::rabbit_server: {get_param: ContrailConfigRabbitServerIp}
+ contrail::config::redis_server: {get_param: ContrailConfigRedisServerIp}
+ step_config: |
+ include ::tripleo::network::contrail::config
diff --git a/puppet/services/network/contrail-control.yaml b/puppet/services/network/contrail-control.yaml
new file mode 100644
index 00000000..9356e9e9
--- /dev/null
+++ b/puppet/services/network/contrail-control.yaml
@@ -0,0 +1,54 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Contrail Control service deployment using puppet, this YAML file
+ creates the interface between the HOT template
+ and the puppet manifest that actually installs
+ and configures Contrail Control.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ContrailControlHostIP:
+ description: host IP address of Analytics
+ type: string
+ ContrailControlIfmapUserName:
+ description: Ifmap user name
+ type: string
+ ContrailControlIfmapUserPassword:
+ description: Ifmap user password
+ type: string
+
+resources:
+ ContrailBase:
+ type: ./contrail-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role Contrail Control using composable services.
+ value:
+ service_name: contrail_control
+ config_settings:
+ map_merge:
+ - get_attr: [ContrailBase, role_data, config_settings]
+ - contrail::control::host_ip: {get_param: ContrailControlHostIP}
+ contrail::control::ifmap_username: {get_param: ContrailControlIfmapUserName}
+ contrail::control::ifmap_password: {get_param: ContrailControlIfmapUserPassword}
+ step_config: |
+ include ::tripleo::network::contrail::control
diff --git a/puppet/services/network/contrail-database.yaml b/puppet/services/network/contrail-database.yaml
new file mode 100644
index 00000000..e5712618
--- /dev/null
+++ b/puppet/services/network/contrail-database.yaml
@@ -0,0 +1,51 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Contrail Database service deployment using puppet, this YAML file
+ creates the interface between the HOT template
+ and the puppet manifest that actually installs
+ and configures Contrail Database.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ContrailDatabaseHostIP:
+ description: host IP address of Database node
+ type: string
+ ContrailDatabaseMinDisk:
+ description: Minimum disk size for database
+ type: number
+ default: 64
+
+resources:
+ ContrailBase:
+ type: ./contrail-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role Contrail Database using composable services.
+ value:
+ service_name: contrail_database
+ config_settings:
+ map_merge:
+ - get_attr: [ContrailBase, role_data, config_settings]
+ - contrail::database::host_ip: {get_param: ContrailDatabaseHostIP}
+ contrail::database::minimum_diskGB: {get_param: ContrailDatabaseMinDisk}
+ step_config: |
+ include ::tripleo::profile::contrail::database
diff --git a/puppet/services/network/contrail-webui.yaml b/puppet/services/network/contrail-webui.yaml
new file mode 100644
index 00000000..72b9e1c0
--- /dev/null
+++ b/puppet/services/network/contrail-webui.yaml
@@ -0,0 +1,69 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Contrail WebUI service deployment using puppet, this YAML file
+ creates the interface between the HOT template
+ and the puppet manifest that actually installs
+ and configures Contrail WebUI.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ContrailWebUiAnalyticsVip:
+ description: Contrail Analytics VIP
+ type: string
+ ContrailWebUiConfigVip:
+ description: Contrail Config VIP
+ type: string
+ ContrailWebUiNeutronVip:
+ description: Neutron VIP
+ type: string
+ ContrailWebuiHttpPort:
+ default: 8080
+ description: HTTP Port of Webui
+ type: number
+ ContrailWebuiHttpsPort:
+ default: 8143
+ description: HTTPS Port of Webui
+ type: number
+ ContrailWebUiRedisIp:
+ description: Redis IP
+ type: string
+ default: '127.0.0.1'
+
+resources:
+ ContrailBase:
+ type: ./contrail-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role Contrail WebUI using composable services.
+ value:
+ service_name: contrail_webui
+ config_settings:
+ map_merge:
+ - get_attr: [ContrailBase, role_data, config_settings]
+ - contrail::webui::contrail_analytics_vip: {get_param: ContrailWebUiAnalyticsVip}
+ contrail::webui::contrail_config_vip: {get_param: ContrailWebUiConfigVip}
+ contrail::webui::contrail_webui_http_port: {get_param: ContrailWebuiHttpPort}
+ contrail::webui::contrail_webui_https_port: {get_param: ContrailWebuiHttpsPort}
+ contrail::webui::neutron_vip: {get_param: ContrailWebUiNeutronVip}
+ contrail::webui::redis_ip: {get_param: ContrailWebUiRedisIp}
+ step_config: |
+ include ::tripleo::network::contrail::webui
diff --git a/puppet/services/neutron-api.yaml b/puppet/services/neutron-api.yaml
index 35ac32db..408eb795 100644
--- a/puppet/services/neutron-api.yaml
+++ b/puppet/services/neutron-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
OpenStack Neutron Server configured with Puppet
@@ -19,9 +19,16 @@ parameters:
via parameter_defaults in the resource registry.
type: json
NeutronWorkers:
- default: 0
- description: Number of workers for Neutron service.
- type: number
+ default: ''
+ description: |
+ Sets the number of API and RPC workers for the Neutron service. The
+ default value results in the configuration being left unset and a
+ system-dependent default will be chosen (usually the number of
+ processors). Please note that this can result in a large number of
+ processes and memory consumption on systems with a large core count. On
+ such systems it is recommended that a non-default value be selected that
+ matches the load requirements.
+ type: string
NeutronPassword:
description: The password for the neutron service and db account, used by neutron agents.
type: string
@@ -30,10 +37,6 @@ parameters:
default: 'True'
description: Allow automatic l3-agent failover
type: string
- NeutronL3HA:
- default: false
- description: Whether to enable HA for virtual routers
- type: boolean
NovaPassword:
description: The password for the nova service and db account, used by nova-api.
type: string
@@ -46,6 +49,44 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ MonitoringSubscriptionNeutronServer:
+ default: 'overcloud-neutron-server'
+ type: string
+ NeutronApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.neutron.api
+ path: /var/log/neutron/server.log
+ ControllerCount:
+ description: |
+ Under normal conditions, this should not be overridden manually and is
+ set at deployment time. The default value is present to allow the
+ template to be used in environments that do not override it.
+ default: 1
+ type: number
+
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Ocata cycle.
+ NeutronL3HA:
+ default: false
+ description: |
+ Whether to enable HA for virtual routers. While the default value is
+ 'false', L3 HA will be automatically enabled if the number of nodes
+ hosting controller configurations and DVR is disabled. This parameter is
+ being deprecated in Newton and is scheduled to be removed in Ocata.
+ Future releases will enable L3 HA by default if it is appropriate for the
+ deployment type. Alternate mechanisms will be available to override.
+ type: boolean
+
+parameter_groups:
+- label: deprecated
+ description: |
+ The following parameters are deprecated and will be removed. They should not
+ be relied on for new deployments. If you have concerns regarding deprecated
+ parameters, please contact the TripleO development team on IRC or the
+ OpenStack mailing list.
+ parameters:
+ - NeutronL3HA
resources:
@@ -56,15 +97,31 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+conditions:
+
+ auto_enable_l3_ha:
+ and:
+ - not:
+ equals:
+ - get_param: ControllerCount
+ - 1
+ - equals:
+ - get_param: NeutronEnableDVR
+ - false
+
outputs:
role_data:
description: Role data for the Neutron Server agent service.
value:
service_name: neutron_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionNeutronServer}
+ logging_source: {get_param: NeutronApiLoggingSource}
+ logging_groups:
+ - neutron
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- neutron::server::database_connection:
+ - neutron::server::database_connection:
list_join:
- ''
- - {get_param: [EndpointMap, MysqlInternal, protocol]}
@@ -73,43 +130,49 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ovs_neutron'
- neutron::keystone::auth::tenant: 'service'
- neutron::keystone::auth::public_url: {get_param: [EndpointMap, NeutronPublic, uri]}
- neutron::keystone::auth::internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] }
- neutron::keystone::auth::admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] }
- neutron::keystone::auth::password: {get_param: NeutronPassword}
- neutron::keystone::auth::region: {get_param: KeystoneRegion}
- neutron::server::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- neutron::server::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
neutron::server::api_workers: {get_param: NeutronWorkers}
+ neutron::server::rpc_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
- neutron::server::l3_ha: {get_param: NeutronL3HA}
- neutron::server::password: {get_param: NeutronPassword}
+ neutron::server::l3_ha: {if: ["auto_enable_l3_ha", true, {get_param: NeutronL3HA}]}
+ neutron::server::enable_proxy_headers_parsing: true
+ neutron::keystone::authtoken::password: {get_param: NeutronPassword}
neutron::server::notifications::nova_url: { get_param: [ EndpointMap, NovaInternal, uri ] }
neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] }
neutron::server::notifications::tenant_name: 'service'
neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_param: NovaPassword}
- neutron::server::project_name: 'service'
+ neutron::keystone::authtoken::project_name: 'service'
neutron::server::sync_db: true
- neutron::db::mysql::password: {get_param: NeutronPassword}
- neutron::db::mysql::user: neutron
- neutron::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- neutron::db::mysql::dbname: ovs_neutron
- neutron::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
- tripleo.neutron_server.firewall_rules:
- '114 neutron server':
+ tripleo.neutron_api.firewall_rules:
+ '114 neutron api':
dport:
- 9696
- 13696
- '118 neutron vxlan networks':
- proto: 'udp'
- dport: 4789
- '106 vrrp':
- proto: vrrp
neutron::server::router_distributed: {get_param: NeutronEnableDVR}
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ neutron::bind_host: {get_param: [ServiceNetMap, NeutronApiNetwork]}
step_config: |
include tripleo::profile::base::neutron::server
+ service_config_settings:
+ keystone:
+ neutron::keystone::auth::tenant: 'service'
+ neutron::keystone::auth::public_url: {get_param: [EndpointMap, NeutronPublic, uri]}
+ neutron::keystone::auth::internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] }
+ neutron::keystone::auth::admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] }
+ neutron::keystone::auth::password: {get_param: NeutronPassword}
+ neutron::keystone::auth::region: {get_param: KeystoneRegion}
+ mysql:
+ neutron::db::mysql::password: {get_param: NeutronPassword}
+ neutron::db::mysql::user: neutron
+ neutron::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ neutron::db::mysql::dbname: ovs_neutron
+ neutron::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml
index 39ffea24..6bb4ba08 100644
--- a/puppet/services/neutron-base.yaml
+++ b/puppet/services/neutron-base.yaml
@@ -33,7 +33,7 @@ parameters:
from neutron.core_plugins namespace.
type: string
NeutronServicePlugins:
- default: "router,qos"
+ default: "router,qos,trunk"
description: |
Comma-separated list of service plugin entrypoints to be loaded from the
neutron.service_plugins namespace.
@@ -48,6 +48,18 @@ parameters:
description: >
Remove configuration that is not generated by TripleO. Setting
to false may result in configuration remnants after updates/upgrades.
+ NeutronGlobalPhysnetMtu:
+ type: number
+ default: 1496
+ description: |
+ MTU of the underlying physical network. Neutron uses this value to
+ calculate MTU for all virtual network components. For flat and VLAN
+ networks, neutron uses this value without modification. For overlay
+ networks such as VXLAN, neutron automatically subtracts the overlay
+ protocol overhead from this value. The default value of 1496 is
+ currently in effect to compensate for some additional overhead when
+ deploying with some network configurations (e.g. network isolation over
+ single network interfaces)
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -87,3 +99,4 @@ outputs:
neutron::host: '"%{::fqdn}"' #NOTE: extra quoting is needed
neutron::db::database_db_max_retries: -1
neutron::db::database_max_retries: -1
+ neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu}
diff --git a/puppet/services/neutron-compute-plugin-ovn.yaml b/puppet/services/neutron-compute-plugin-ovn.yaml
new file mode 100644
index 00000000..95e05dd4
--- /dev/null
+++ b/puppet/services/neutron-compute-plugin-ovn.yaml
@@ -0,0 +1,45 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron Compute OVN agent
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ OVNDbHost:
+ description: IP address on which the OVN DB servers are listening
+ type: string
+ OVNSouthboundServerPort:
+ description: Port of the Southbound DB Server
+ type: number
+ default: 6642
+ OVNTunnelEncapType:
+ description: Tunnel encapsulation type
+ type: string
+ default: geneve
+
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Compute OVN agent
+ value:
+ service_name: neutron_compute_plugin_ovn
+ config_settings:
+ tripleo::profile::base::neutron::agents::ovn::ovn_db_host: {get_param: OVNDbHost}
+ ovn::southbound::port: {get_param: OVNSouthboundServerPort}
+ ovn::southbound::encap_type: {get_param: OVNTunnelEncapType}
+ ovn::controller::ovn_encap_ip: {get_param: [ServiceNetMap, NeutronApiNetwork]}
+ step_config: |
+ include ::tripleo::profile::base::neutron::agents::ovn
diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml
index 513cb2d4..2cd08f98 100644
--- a/puppet/services/neutron-dhcp.yaml
+++ b/puppet/services/neutron-dhcp.yaml
@@ -31,6 +31,14 @@ parameters:
default: false
description: If True, DHCP always provides metadata route to VM.
type: boolean
+ MonitoringSubscriptionNeutronDhcp:
+ default: 'overcloud-neutron-dhcp'
+ type: string
+ NeutronDhcpAgentLoggingSource:
+ type: json
+ default:
+ tag: openstack.neutron.agent.dhcp
+ path: /var/log/neutron/dhcp-agent.log
resources:
@@ -46,6 +54,10 @@ outputs:
description: Role data for the Neutron DHCP agent service.
value:
service_name: neutron_dhcp
+ monitoring_subscription: {get_param: MonitoringSubscriptionNeutronDhcp}
+ logging_source: {get_param: NeutronDhcpAgentLoggingSource}
+ logging_groups:
+ - neutron
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
diff --git a/puppet/services/neutron-l3-compute-dvr.yaml b/puppet/services/neutron-l3-compute-dvr.yaml
index 0f3c2a70..b6c29116 100644
--- a/puppet/services/neutron-l3-compute-dvr.yaml
+++ b/puppet/services/neutron-l3-compute-dvr.yaml
@@ -26,6 +26,14 @@ parameters:
description: Name of bridge used for external network traffic.
type: string
default: 'br-ex'
+ MonitoringSubscriptionNeutronL3Dvr:
+ default: 'overcloud-neutron-l3-dvr'
+ type: string
+ NeutronL3ComputeAgentLoggingSource:
+ type: json
+ default:
+ tag: openstack.neutron.agent.l3-compute
+ path: /var/log/neutron/l3-agent.log
resources:
@@ -41,6 +49,10 @@ outputs:
description: Role data for DVR L3 Agent on Compute Nodes
value:
service_name: neutron_l3_compute_dvr
+ monitoring_subscription: {get_param: MonitoringSubscriptionNeutronL3Dvr}
+ logging_source: {get_param: NeutronL3ComputeAgentLoggingSource}
+ logging_groups:
+ - neutron
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml
index 54beee6b..a89e3d75 100644
--- a/puppet/services/neutron-l3.yaml
+++ b/puppet/services/neutron-l3.yaml
@@ -34,6 +34,14 @@ parameters:
- allowed_values:
- legacy
- dvr_snat
+ MonitoringSubscriptionNeutronL3:
+ default: 'overcloud-neutron-l3-agent'
+ type: string
+ NeutronL3AgentLoggingSource:
+ type: json
+ default:
+ tag: openstack.neutron.agent.l3
+ path: /var/log/neutron/l3-agent.log
resources:
@@ -49,11 +57,18 @@ outputs:
description: Role data for the Neutron L3 agent service.
value:
service_name: neutron_l3
+ monitoring_subscription: {get_param: MonitoringSubscriptionNeutronL3}
+ logging_source: {get_param: NeutronL3AgentLoggingSource}
+ logging_groups:
+ - neutron
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
neutron::agents::l3::router_delete_namespaces: True
neutron::agents::l3::agent_mode : {get_param: NeutronL3AgentMode}
+ tripleo.neutron_l3.firewall_rules:
+ '106 neutron_l3 vrrp':
+ proto: vrrp
step_config: |
include tripleo::profile::base::neutron::l3
diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml
index b9ec277a..8be4c6d6 100644
--- a/puppet/services/neutron-metadata.yaml
+++ b/puppet/services/neutron-metadata.yaml
@@ -23,13 +23,28 @@ parameters:
type: string
hidden: true
NeutronWorkers:
- default: 0
- description: Number of workers for Neutron service.
- type: number
+ default: ''
+ description: |
+ Sets the number of worker processes for the neutron metadata agent. The
+ default value results in the configuration being left unset and a
+ system-dependent default will be chosen (usually the number of
+ processors). Please note that this can result in a large number of
+ processes and memory consumption on systems with a large core count. On
+ such systems it is recommended that a non-default value be selected that
+ matches the load requirements.
+ type: string
NeutronPassword:
description: The password for the neutron service and db account, used by neutron agents.
type: string
hidden: true
+ MonitoringSubscriptionNeutronMetadata:
+ default: 'overcloud-neutron-metadata'
+ type: string
+ NeutronMetadataAgentLoggingSource:
+ type: json
+ default:
+ tag: openstack.neutron.agent.metadata
+ path: /var/log/neutron/metadata-agent.log
resources:
@@ -45,6 +60,10 @@ outputs:
description: Role data for the Neutron Metadata agent service.
value:
service_name: neutron_metadata
+ monitoring_subscription: {get_param: MonitoringSubscriptionNeutronMetadata}
+ logging_source: {get_param: NeutronMetadataAgentLoggingSource}
+ logging_groups:
+ - neutron
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
@@ -53,5 +72,6 @@ outputs:
neutron::agents::metadata::auth_password: {get_param: NeutronPassword}
neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
neutron::agents::metadata::auth_tenant: 'service'
+ neutron::agents::metadata::metadata_ip: '"%{hiera(\"nova_metadata_vip\")}"'
step_config: |
include tripleo::profile::base::neutron::metadata
diff --git a/puppet/services/neutron-midonet.yaml b/puppet/services/neutron-midonet.yaml
index 48830d81..0de256c0 100644
--- a/puppet/services/neutron-midonet.yaml
+++ b/puppet/services/neutron-midonet.yaml
@@ -40,12 +40,16 @@ parameters:
description: 'Whether enable Cassandra cluster on Controller'
type: boolean
default: false
+ MonitoringSubscriptionNeutronMidonet:
+ default: 'overcloud-neutron-midonet'
+ type: string
outputs:
role_data:
description: Role data for the Neutron Midonet plugin and services
value:
service_name: neutron_midonet
+ monitoring_subscription: {get_param: MonitoringSubscriptionNeutronMidonet}
config_settings:
tripleo::profile::base::neutron::midonet::admin_password: {get_param: AdminPassword}
tripleo::profile::base::neutron::midonet::keystone_admin_token: {get_param: AdminToken}
diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml
index 1b19f90f..cca0deee 100644
--- a/puppet/services/neutron-ovs-agent.yaml
+++ b/puppet/services/neutron-ovs-agent.yaml
@@ -53,6 +53,22 @@ parameters:
description: |
Enable ARP responder feature in the OVS Agent.
type: boolean
+ MonitoringSubscriptionNeutronOvs:
+ default: 'overcloud-neutron-ovs-agent'
+ type: string
+ NeutronOVSFirewallDriver:
+ default: ''
+ description: |
+ Configure the classname of the firewall driver to use for implementing
+ security groups. Possible values depend on system configuration. Some
+ examples are: noop, openvswitch, iptables_hybrid. The default value of an
+ empty string will result in a default supported configuration.
+ type: string
+ NeutronOpenVswitchAgentLoggingSource:
+ type: json
+ default:
+ tag: openstack.neutron.agent.openvswitch
+ path: /var/log/neutron/openvswitch-agent.log
resources:
@@ -68,10 +84,14 @@ outputs:
description: Role data for the Neutron OVS agent service.
value:
service_name: neutron_ovs_agent
+ monitoring_subscription: {get_param: MonitoringSubscriptionNeutronOvs}
+ logging_source: {get_param: NeutronOpenVswitchAgentLoggingSource}
+ logging_groups:
+ - neutron
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- neutron::agents::ml2::ovs::l2_population: {get_param: NeutronEnableL2Pop}
+ - neutron::agents::ml2::ovs::l2_population: {get_param: NeutronEnableL2Pop}
neutron::agents::ml2::ovs::enable_distributed_routing: {get_param: NeutronEnableDVR}
neutron::agents::ml2::ovs::arp_responder: {get_param: NeutronEnableARPResponder}
neutron::agents::ml2::ovs::bridge_mappings:
@@ -96,5 +116,12 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+ neutron::agents::ml2::ovs::firewall_driver: {get_param: NeutronOVSFirewallDriver}
+ tripleo.neutron_ovs_agent.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '136 neutron gre networks':
+ proto: 'gre'
step_config: |
include ::tripleo::profile::base::neutron::ovs
diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml
index 1f1e14ab..fdfa1c03 100644
--- a/puppet/services/neutron-ovs-dpdk-agent.yaml
+++ b/puppet/services/neutron-ovs-dpdk-agent.yaml
@@ -19,13 +19,15 @@ parameters:
via parameter_defaults in the resource registry.
type: json
NeutronDpdkCoreList:
- default: ""
description: List of cores to be used for DPDK Poll Mode Driver
type: string
+ constraints:
+ - allowed_pattern: "'[0-9,-]+'"
NeutronDpdkMemoryChannels:
- default: ""
description: Number of memory channels to be used for DPDK
type: string
+ constraints:
+ - allowed_pattern: "[0-9]+"
NeutronDpdkSocketMemory:
default: ""
description: Memory allocated for each socket
@@ -63,7 +65,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronOvsAgent, role_data, config_settings]
- neutron::agents::ml2::ovs::enable_dpdk: true
+ - neutron::agents::ml2::ovs::enable_dpdk: true
neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType}
neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir}
vswitch::dpdk::core_list: {get_param: NeutronDpdkCoreList}
diff --git a/puppet/services/neutron-plugin-ml2-ovn.yaml b/puppet/services/neutron-plugin-ml2-ovn.yaml
new file mode 100644
index 00000000..e98ed497
--- /dev/null
+++ b/puppet/services/neutron-plugin-ml2-ovn.yaml
@@ -0,0 +1,79 @@
+heat_template_version: 2016-04-08
+
+description: >
+ OpenStack Neutron ML2/OVN plugin configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OVNDbHost:
+ description: IP address on which the OVN DB servers are listening
+ type: string
+ OVNNorthboundServerPort:
+ description: Port of the OVN Northbound DB server
+ type: number
+ default: 6641
+ OVNDbConnectionTimeout:
+ description: Timeout in seconds for the OVSDB connection transaction
+ type: number
+ default: 60
+ OVNVifType:
+ description: Type of VIF to be used for ports
+ type: string
+ default: ovs
+ constraints:
+ - allowed_values:
+ - ovs
+ - vhostuser
+ OVNNeutronSyncMode:
+ description: The synchronization mode of OVN with Neutron DB
+ type: string
+ default: log
+ constraints:
+ - allowed_values:
+ - log
+ - off
+ - repair
+ OVNQosDriver:
+ description: OVN notification driver for Neutron QOS service plugin
+ type: string
+ default: NULL
+
+resources:
+
+ NeutronMl2Base:
+ type: ./neutron-plugin-ml2.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron ML2/OVN plugin.
+ value:
+ service_name: neutron_plugin_ml2_ovn
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMl2Base, role_data, config_settings]
+ - ovn::northbound::port: {get_param: OVNNorthboundServerPort}
+ tripleo::profile::base::neutron::plugins::ml2::ovn::ovn_db_host: {get_param: OVNDbHost}
+ neutron::plugins::ovn::ovsdb_connection_timeout: {get_param: OVNDbConnectionTimeout}
+ neutron::plugins::ovn::neutron_sync_mode: {get_param: OVNNeutronSyncMode}
+ neutron::plugins::ovn::ovn_l3_mode: true
+ neutron::plugins::ovn::vif_type: {get_param: OVNVifType}
+ neutron::server::qos_notification_drivers: {get_param: OVNQosDriver}
+ step_config: |
+ include ::tripleo::profile::base::neutron::plugins::ml2
diff --git a/puppet/services/neutron-plugin-opencontrail.yaml b/puppet/services/neutron-plugin-opencontrail.yaml
index 4e294965..098c9d05 100644
--- a/puppet/services/neutron-plugin-opencontrail.yaml
+++ b/puppet/services/neutron-plugin-opencontrail.yaml
@@ -59,7 +59,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions
+ - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions,/usr/lib/python2.7/site-packages/neutron_lbaas/extensions
neutron::plugins::opencontrail::api_server_ip: {get_param: ContrailApiServerIp}
neutron::plugins::opencontrail::api_server_port: {get_param: ContrailApiServerPort}
diff --git a/puppet/services/neutron-sriov-agent.yaml b/puppet/services/neutron-sriov-agent.yaml
index 559500df..44f7f242 100644
--- a/puppet/services/neutron-sriov-agent.yaml
+++ b/puppet/services/neutron-sriov-agent.yaml
@@ -14,6 +14,11 @@ parameters:
DefaultPasswords:
default: {}
type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
NeutronPhysicalDevMappings:
description: >
List of <physical_network>:<physical device>
@@ -39,11 +44,15 @@ parameters:
Example "eth1:4096","eth2:128"
type: comma_delimited_list
default: ""
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
+
+resources:
+
+ NeutronBase:
+ type: ./neutron-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
@@ -51,8 +60,10 @@ outputs:
value:
service_name: neutron_sriov_agent
config_settings:
- neutron::agents::ml2::sriov::physical_device_mappings: {get_param: NeutronPhysicalDevMappings}
- neutron::agents::ml2::sriov::exclude_devices: {get_param: NeutronExcludeDevices}
- neutron::agents::ml2::sriov::number_of_vfs: {get_param: NeutronSriovNumVFs}
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - neutron::agents::ml2::sriov::physical_device_mappings: {get_param: NeutronPhysicalDevMappings}
+ neutron::agents::ml2::sriov::exclude_devices: {get_param: NeutronExcludeDevices}
+ tripleo::host::sriov::number_of_vfs: {get_param: NeutronSriovNumVFs}
step_config: |
include ::tripleo::profile::base::neutron::sriov
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index c2bd395e..ba7fb2e1 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
OpenStack Nova API service configured with Puppet
@@ -30,8 +30,39 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ NeutronMetadataProxySharedSecret:
+ description: Shared secret to prevent spoofing
+ type: string
+ hidden: true
+ InstanceNameTemplate:
+ default: 'instance-%08x'
+ description: Template string to be used to generate instance names
+ type: string
+ NovaEnableDBPurge:
+ default: true
+ description: |
+ Whether to create cron job for purging soft deleted rows in Nova database.
+ type: boolean
+ MonitoringSubscriptionNovaApi:
+ default: 'overcloud-nova-api'
+ type: string
+ NovaApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.nova.api
+ path: /var/log/nova/nova-api.log
+
+conditions:
+ nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
NovaBase:
type: ./nova-base.yaml
properties:
@@ -44,36 +75,81 @@ outputs:
description: Role data for the Nova API service.
value:
service_name: nova_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionNovaApi}
+ logging_source: {get_param: NovaApiLoggingSource}
+ logging_groups:
+ - nova
config_settings:
map_merge:
- - get_attr: [NovaBase, role_data, config_settings]
+ - get_attr: [NovaBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - nova::cron::archive_deleted_rows::hour: '"*/12"'
+ nova::cron::archive_deleted_rows::destination: '"/dev/null"'
+ tripleo.nova_api.firewall_rules:
+ '113 nova_api':
+ dport:
+ - 6080
+ - 13080
+ - 8773
+ - 3773
+ - 8774
+ - 13774
+ - 8775
+ nova::keystone::authtoken::project_name: 'service'
+ nova::keystone::authtoken::password: {get_param: NovaPassword}
+ nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ nova::api::enabled: true
+ nova::api::default_floating_pool: 'public'
+ nova::api::sync_db_api: true
+ nova::api::enable_proxy_headers_parsing: true
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ nova::api::api_bind_address: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::api::service_name: 'httpd'
+ nova::wsgi::apache::ssl: false
+ nova::wsgi::apache::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::wsgi::apache::servername:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ nova::wsgi::apache::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
+ nova::api::instance_name_template: {get_param: InstanceNameTemplate}
+ nova_enable_db_purge: {get_param: NovaEnableDBPurge}
+ -
+ if:
+ - nova_workers_zero
+ - {}
- nova::api::osapi_compute_workers: {get_param: NovaWorkers}
- nova::api::metadata_workers: {get_param: NovaWorkers}
- nova::cron::archive_deleted_rows::hour: '"*/12"'
- nova::cron::archive_deleted_rows::destination: '"/dev/null"'
- tripleo.nova_api.firewall_rules:
- '113 nova_api':
- dport:
- - 6080
- - 13080
- - 8773
- - 3773
- - 8774
- - 13774
- - 8775
- nova::keystone::authtoken::project_name: 'service'
- nova::keystone::authtoken::password: {get_param: NovaPassword}
- nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- nova::api::enabled: true
- nova::api::default_floating_pool: 'public'
- nova::api::sync_db_api: true
- nova::api::enable_proxy_headers_parsing: true
- nova::keystone::auth::tenant: 'service'
- nova::keystone::auth::public_url: {get_param: [EndpointMap, NovaPublic, uri]}
- nova::keystone::auth::internal_url: {get_param: [EndpointMap, NovaInternal, uri]}
- nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
- nova::keystone::auth::password: {get_param: NovaPassword}
- nova::keystone::auth::region: {get_param: KeystoneRegion}
+ nova::wsgi::apache::workers: {get_param: NovaWorkers}
step_config: |
include tripleo::profile::base::nova::api
+ service_config_settings:
+ keystone:
+ nova::keystone::auth::tenant: 'service'
+ nova::keystone::auth::public_url: {get_param: [EndpointMap, NovaPublic, uri]}
+ nova::keystone::auth::internal_url: {get_param: [EndpointMap, NovaInternal, uri]}
+ nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
+ nova::keystone::auth::password: {get_param: NovaPassword}
+ nova::keystone::auth::region: {get_param: KeystoneRegion}
+ mysql:
+ nova::db::mysql::password: {get_param: NovaPassword}
+ nova::db::mysql::user: nova
+ nova::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ nova::db::mysql::dbname: nova
+ nova::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+ nova::db::mysql_api::password: {get_param: NovaPassword}
+ nova::db::mysql_api::user: nova_api
+ nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ nova::db::mysql_api::dbname: nova_api
+ nova::db::mysql_api::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml
index 471ece34..8db00d8f 100644
--- a/puppet/services/nova-base.yaml
+++ b/puppet/services/nova-base.yaml
@@ -95,20 +95,6 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/nova_api'
- nova::db::mysql::password: {get_input: nova_password}
- nova::db::mysql::user: nova
- nova::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- nova::db::mysql::dbname: nova
- nova::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
- nova::db::mysql_api::password: {get_input: nova_password}
- nova::db::mysql_api::user: nova_api
- nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- nova::db::mysql_api::dbname: nova_api
- nova::db::mysql_api::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
nova::debug: {get_param: Debug}
nova::purge_config: {get_param: EnableConfigPurge}
nova::network::neutron::neutron_project_name: 'service'
@@ -123,18 +109,6 @@ outputs:
nova::notify_on_state_change: 'vm_and_task_state'
nova::notification_driver: messagingv2
nova::network::neutron::neutron_auth_type: 'v3password'
- nova::db::mysql::user: nova
- nova::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- nova::db::mysql::dbname: nova
- nova::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
- nova::db::mysql_api::user: nova_api
- nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- nova::db::mysql_api::dbname: nova_api
- nova::db::mysql_api::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
nova::db::database_db_max_retries: -1
nova::db::database_max_retries: -1
nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index 6bc1c187..f7f2510e 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -60,6 +60,21 @@ parameters:
Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8
type: comma_delimited_list
default: []
+ NovaReservedHostMemory:
+ description: >
+ Reserved RAM for host processes.
+ type: number
+ default: 2048
+ constraints:
+ - range: { min: 512 }
+ MonitoringSubscriptionNovaCompute:
+ default: 'overcloud-nova-compute'
+ type: string
+ NovaComputeLoggingSource:
+ type: json
+ default:
+ tag: openstack.nova.compute
+ path: /var/log/nova/nova-compute.log
resources:
NovaBase:
@@ -74,6 +89,10 @@ outputs:
description: Role data for the Nova Compute service.
value:
service_name: nova_compute
+ monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute}
+ logging_source: {get_param: NovaComputeLoggingSource}
+ logging_groups:
+ - nova
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
@@ -84,6 +103,7 @@ outputs:
params:
JSON_PARAM: {get_param: NovaPCIPassthrough}
nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
+ nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
# we manage migration in nova common puppet profile
nova::compute::libvirt::migration_support: false
tripleo::profile::base::nova::manage_migration: true
@@ -108,11 +128,6 @@ outputs:
# encryption work will obsolete the need to use TUNNELLED transport
# mode.
nova::migration::live_migration_tunnelled: {get_param: NovaEnableRbdBackend}
- # Changing the default from 512MB. The current templates can not deploy
- # overclouds with swap. On an idle compute node, we see ~1024MB of RAM
- # used. 2048 is suggested to account for other possible operations for
- # example openvswitch.
- nova::compute::reserved_host_memory: 2048
nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
@@ -121,7 +136,10 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
- nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host]}
+ nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
+ nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
+ nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
+ nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
step_config: |
# TODO(emilien): figure how to deal with libvirt profile.
# We'll probably treat it like we do with Neutron plugins.
diff --git a/puppet/services/nova-conductor.yaml b/puppet/services/nova-conductor.yaml
index 0b6169da..a10d9560 100644
--- a/puppet/services/nova-conductor.yaml
+++ b/puppet/services/nova-conductor.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
OpenStack Nova Conductor service configured with Puppet
@@ -22,6 +22,17 @@ parameters:
default: 0
description: Number of workers for Nova Conductor service.
type: number
+ MonitoringSubscriptionNovaConductor:
+ default: 'overcloud-nova-conductor'
+ type: string
+ NovaSchedulerLoggingSource:
+ type: json
+ default:
+ tag: openstack.nova.scheduler
+ path: /var/log/nova/nova-scheduler.log
+
+conditions:
+ nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
resources:
NovaBase:
@@ -36,9 +47,17 @@ outputs:
description: Role data for the Nova Conductor service.
value:
service_name: nova_conductor
+ monitoring_subscription: {get_param: MonitoringSubscriptionNovaConductor}
+ logging_source: {get_param: NovaSchedulerLoggingSource}
+ logging_groups:
+ - nova
config_settings:
map_merge:
- - get_attr: [NovaBase, role_data, config_settings]
+ - get_attr: [NovaBase, role_data, config_settings]
+ -
+ if:
+ - nova_workers_zero
+ - {}
- nova::conductor::workers: {get_param: NovaWorkers}
step_config: |
include tripleo::profile::base::nova::conductor
diff --git a/puppet/services/nova-consoleauth.yaml b/puppet/services/nova-consoleauth.yaml
index 67ff2ec3..85e60420 100644
--- a/puppet/services/nova-consoleauth.yaml
+++ b/puppet/services/nova-consoleauth.yaml
@@ -18,6 +18,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionNovaConsoleauth:
+ default: 'overcloud-nova-consoleauth'
+ type: string
+ NovaConsoleauthLoggingSource:
+ type: json
+ default:
+ tag: openstack.nova.consoleauth
+ path: /var/log/nova/nova-consoleauth.log
resources:
NovaBase:
@@ -32,6 +40,10 @@ outputs:
description: Role data for the Nova Consoleauth service.
value:
service_name: nova_consoleauth
+ monitoring_subscription: {get_param: MonitoringSubscriptionNovaConsoleauth}
+ logging_source: {get_param: NovaConsoleauthLoggingSource}
+ logging_groups:
+ - nova
config_settings:
get_attr: [NovaBase, role_data, config_settings]
step_config: |
diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml
index 1ebec974..31732580 100644
--- a/puppet/services/nova-libvirt.yaml
+++ b/puppet/services/nova-libvirt.yaml
@@ -21,6 +21,9 @@ parameters:
NovaComputeLibvirtType:
type: string
default: kvm
+ MonitoringSubscriptionNovaLibvirt:
+ default: 'overcloud-nova-libvirt'
+ type: string
resources:
NovaBase:
@@ -35,6 +38,7 @@ outputs:
description: Role data for the Libvirt service.
value:
service_name: nova_libvirt
+ monitoring_subscription: {get_param: MonitoringSubscriptionNovaLibvirt}
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
@@ -46,6 +50,10 @@ outputs:
tripleo::profile::base::nova::libvirt_enabled: true
nova::compute::libvirt::services::libvirt_virt_type: {get_param: NovaComputeLibvirtType}
nova::compute::libvirt::libvirt_virt_type: {get_param: NovaComputeLibvirtType}
+ tripleo.nova_libvirt.firewall_rules:
+ '200 nova_libvirt':
+ dport:
+ - 16509
step_config: |
include tripleo::profile::base::nova::libvirt
diff --git a/puppet/services/nova-metadata.yaml b/puppet/services/nova-metadata.yaml
new file mode 100644
index 00000000..40931da6
--- /dev/null
+++ b/puppet/services/nova-metadata.yaml
@@ -0,0 +1,42 @@
+heat_template_version: 2016-10-14
+
+description: >
+ OpenStack Nova API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NovaWorkers:
+ default: 0
+ description: Number of workers for Nova API service.
+ type: number
+
+conditions:
+ nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Metadata service.
+ value:
+ service_name: nova_metadata
+ config_settings:
+ map_merge:
+ - nova::api::metadata_listen: {get_param: [ServiceNetMap, NovaMetadataNetwork]}
+ -
+ if:
+ - nova_workers_zero
+ - {}
+ - nova::api::metadata_workers: {get_param: NovaWorkers}
+ step_config: ""
diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml
index c8f2591d..d89e3e11 100644
--- a/puppet/services/nova-scheduler.yaml
+++ b/puppet/services/nova-scheduler.yaml
@@ -29,6 +29,14 @@ parameters:
An array of filters used by Nova to filter a node.These filters will be
applied in the order they are listed, so place your most restrictive
filters first to make the filtering process more efficient.
+ MonitoringSubscriptionNovaScheduler:
+ default: 'overcloud-nova-scheduler'
+ type: string
+ NovaSchedulerLoggingSource:
+ type: json
+ default:
+ tag: openstack.nova.scheduler
+ path: /var/log/nova/nova-scheduler.log
resources:
NovaBase:
@@ -43,6 +51,10 @@ outputs:
description: Role data for the Nova Scheduler service.
value:
service_name: nova_scheduler
+ monitoring_subscription: {get_param: MonitoringSubscriptionNovaScheduler}
+ logging_source: {get_param: NovaSchedulerLoggingSource}
+ logging_groups:
+ - nova
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
diff --git a/puppet/services/nova-vncproxy.yaml b/puppet/services/nova-vnc-proxy.yaml
index 0a1785d8..85d59ae6 100644
--- a/puppet/services/nova-vncproxy.yaml
+++ b/puppet/services/nova-vnc-proxy.yaml
@@ -18,6 +18,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionNovaVNCProxy:
+ default: 'overcloud-nova-vncproxy'
+ type: string
+ NovaVncproxyLoggingSource:
+ type: json
+ default:
+ tag: openstack.nova.vncproxy
+ path: /var/log/nova/nova-vncproxy.log
resources:
NovaBase:
@@ -31,20 +39,23 @@ outputs:
role_data:
description: Role data for the Nova Vncproxy service.
value:
- service_name: nova_vncproxy
+ service_name: nova_vnc_proxy
+ monitoring_subscription: {get_param: MonitoringSubscriptionNovaVNCProxy}
+ logging_source: {get_param: NovaVncproxyLoggingSource}
+ logging_groups:
+ - nova
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- nova::vncproxy::enabled: true
nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
- # Remove brackets that may come if the IP address is IPv6.
- # For DNS names and IPv4, this will just get NovaVNCProxyPublic
- nova::vncproxy::common::vncproxy_host:
- str_replace:
- template: {get_param: [EndpointMap, NovaVNCProxyPublic, host]}
- params:
- '[': ''
- ']': ''
+ nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ nova::vncproxy::host: {get_param: [ServiceNetMap, NovaApiNetwork]}
step_config: |
include tripleo::profile::base::nova::vncproxy
diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml
index 64dd7663..318c898e 100644
--- a/puppet/services/opendaylight-api.yaml
+++ b/puppet/services/opendaylight-api.yaml
@@ -8,10 +8,6 @@ parameters:
default: 8081
description: Set opendaylight service port
type: number
- EnableOpenDaylightOnController:
- default: false
- description: Whether to install OpenDaylight on control nodes.
- type: boolean
OpenDaylightUsername:
default: 'admin'
description: The username for the opendaylight server.
@@ -33,14 +29,6 @@ parameters:
description: List of features to install with ODL
type: comma_delimited_list
default: ["odl-netvirt-openstack","odl-netvirt-ui"]
- OpenDaylightConnectionProtocol:
- description: L7 protocol used for REST access
- type: string
- default: 'http'
- OpenDaylightCheckURL:
- description: URL postfix to verify ODL has finished starting up
- type: string
- default: 'restconf/operational/network-topology:network-topology/topology/netvirt:1'
OpenDaylightApiVirtualIP:
type: string
default: ''
@@ -66,16 +54,11 @@ outputs:
service_name: opendaylight_api
config_settings:
opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
- odl_on_controller: {get_param: EnableOpenDaylightOnController}
- opendaylight_check_url: {get_param: OpenDaylightCheckURL}
opendaylight::username: {get_param: OpenDaylightUsername}
opendaylight::password: {get_param: OpenDaylightPassword}
opendaylight::enable_l3: {get_param: OpenDaylightEnableL3}
opendaylight::extra_features: {get_param: OpenDaylightFeatures}
opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
- opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpenDaylightApiNetwork]}
- tripleo::haproxy::opendaylight: true
step_config: |
include tripleo::profile::base::neutron::opendaylight
- include tripleo::profile::base::neutron::plugins::ovs::opendaylight
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index 8bcb72f7..268ca244 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -19,6 +19,11 @@ parameters:
OpenDaylightApiVirtualIP:
type: string
default: ''
+ OpenDaylightProviderMappings:
+ description: Mappings between logical networks and physical interfaces.
+ Required for VLAN deployments. For example physnet1 -> eth1.
+ type: comma_delimited_list
+ default: "datacentre:br-ex"
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
@@ -43,5 +48,11 @@ outputs:
opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
opendaylight_check_url: {get_param: OpenDaylightCheckURL}
opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
+ neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+ neutron::plugins::ovs::opendaylight::provider_mappings:
+ str_replace:
+ template: MAPPINGS
+ params:
+ MAPPINGS: {get_param: OpenDaylightProviderMappings}
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml
index ac5b85c8..abfb9c80 100644
--- a/puppet/services/pacemaker.yaml
+++ b/puppet/services/pacemaker.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
Pacemaker service configured with Puppet
@@ -18,12 +18,74 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionPacemaker:
+ default: 'overcloud-pacemaker'
+ type: string
+ CorosyncIPv6:
+ default: false
+ description: Enable IPv6 in Corosync
+ type: boolean
+ EnableFencing:
+ default: false
+ description: Whether to enable fencing in Pacemaker or not.
+ type: boolean
+ PcsdPassword:
+ type: string
+ description: The password for the 'pcsd' user for pacemaker.
+ hidden: true
+ default: ''
+ FencingConfig:
+ default: {}
+ description: |
+ Pacemaker fencing configuration. The JSON should have
+ the following structure:
+ {
+ "devices": [
+ {
+ "agent": "AGENT_NAME",
+ "host_mac": "HOST_MAC_ADDRESS",
+ "params": {"PARAM_NAME": "PARAM_VALUE"}
+ }
+ ]
+ }
+ For instance:
+ {
+ "devices": [
+ {
+ "agent": "fence_xvm",
+ "host_mac": "52:54:00:aa:bb:cc",
+ "params": {
+ "multicast_address": "225.0.0.12",
+ "port": "baremetal_0",
+ "manage_fw": true,
+ "manage_key_file": true,
+ "key_file": "/etc/fence_xvm.key",
+ "key_file_password": "abcdef"
+ }
+ }
+ ]
+ }
+ type: json
+ PacemakerLoggingSource:
+ type: json
+ default:
+ tag: system.pacemaker
+ path: /var/log/pacemaker.log,/var/log/cluster/corosync.log
+ format: >-
+ /^(?<time>[^ ]*\s*[^ ]* [^ ]*)
+ \[(?<pid>[^ ]*)\]
+ (?<host>[^ ]*)
+ (?<message>.*)$/
outputs:
role_data:
description: Role data for the Pacemaker role.
value:
service_name: pacemaker
+ monitoring_subscription: {get_param: MonitoringSubscriptionPacemaker}
+ logging_groups:
+ - haclient
+ logging_source: {get_param: PacemakerLoggingSource}
config_settings:
pacemaker::corosync::cluster_name: 'tripleo_cluster'
pacemaker::corosync::manage_fw: false
@@ -40,5 +102,15 @@ outputs:
'131 pacemaker udp':
proto: 'udp'
dport: 5405
+ corosync_ipv6: {get_param: CorosyncIPv6}
+ tripleo::fencing::config: {get_param: FencingConfig}
+ enable_fencing: {get_param: EnableFencing}
+ hacluster_pwd:
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: PcsdPassword}
+ - {get_param: [DefaultPasswords, pcsd_password]}
step_config: |
include ::tripleo::profile::base::pacemaker
diff --git a/puppet/services/pacemaker/ceilometer-agent-central.yaml b/puppet/services/pacemaker/ceilometer-agent-central.yaml
index 78714878..5dcb62ca 100644
--- a/puppet/services/pacemaker/ceilometer-agent-central.yaml
+++ b/puppet/services/pacemaker/ceilometer-agent-central.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCeilometerCentral:
+ default: 'overcloud-ceilometer-agent-central'
+ type: string
resources:
CeilometerServiceBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Ceilometer Central Agent pacemaker role.
value:
service_name: ceilometer_agent_central
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCentral}
config_settings:
map_merge:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/ceilometer-agent-notification.yaml b/puppet/services/pacemaker/ceilometer-agent-notification.yaml
index 6290203a..dbe14499 100644
--- a/puppet/services/pacemaker/ceilometer-agent-notification.yaml
+++ b/puppet/services/pacemaker/ceilometer-agent-notification.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCeilometerNotification:
+ default: 'overcloud-ceilometer-agent-notification'
+ type: string
resources:
CeilometerServiceBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Ceilometer Notification Agent pacemaker role.
value:
service_name: ceilometer_agent_notification
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerNotification}
config_settings:
map_merge:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/ceilometer-api.yaml b/puppet/services/pacemaker/ceilometer-api.yaml
index d130a4bb..4b6c18f6 100644
--- a/puppet/services/pacemaker/ceilometer-api.yaml
+++ b/puppet/services/pacemaker/ceilometer-api.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCeilometerApi:
+ default: 'overcloud-ceilometer-api'
+ type: string
resources:
CeilometerServiceBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Ceilometer API pacemaker role.
value:
service_name: ceilometer_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerApi}
config_settings:
map_merge:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/ceilometer-collector.yaml b/puppet/services/pacemaker/ceilometer-collector.yaml
index 97da92e8..4c919515 100644
--- a/puppet/services/pacemaker/ceilometer-collector.yaml
+++ b/puppet/services/pacemaker/ceilometer-collector.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionCeilometerCollector:
+ default: 'overcloud-ceilometer-collector'
+ type: string
resources:
CeilometerServiceBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Ceilometer Collector pacemaker role.
value:
service_name: ceilometer_collector
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCollector}
config_settings:
map_merge:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/cinder-api.yaml b/puppet/services/pacemaker/cinder-api.yaml
index 7c83037d..6823789e 100644
--- a/puppet/services/pacemaker/cinder-api.yaml
+++ b/puppet/services/pacemaker/cinder-api.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Cinder API role.
value:
service_name: cinder_api
+ monitoring_subscription: {get_attr: [CinderApiBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [CinderApiBase, role_data, logging_source]}
+ logging_groups: {get_attr: [CinderApiBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [CinderApiBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/cinder-backup.yaml b/puppet/services/pacemaker/cinder-backup.yaml
index 7e940c7e..2ebc7680 100644
--- a/puppet/services/pacemaker/cinder-backup.yaml
+++ b/puppet/services/pacemaker/cinder-backup.yaml
@@ -48,6 +48,7 @@ outputs:
description: Role data for the Cinder Backup role.
value:
service_name: cinder_backup
+ monitoring_subscription: {get_attr: [CinderBackupBase, role_data, monitoring_subscription]}
config_settings:
map_merge:
- get_attr: [CinderBackupBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/cinder-scheduler.yaml b/puppet/services/pacemaker/cinder-scheduler.yaml
index 6f26b412..15e44be2 100644
--- a/puppet/services/pacemaker/cinder-scheduler.yaml
+++ b/puppet/services/pacemaker/cinder-scheduler.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Cinder Scheduler role.
value:
service_name: cinder_scheduler
+ monitoring_subscription: {get_attr: [CinderSchedulerBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [CinderSchedulerBase, role_data, logging_source]}
+ logging_groups: {get_attr: [CinderSchedulerBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [CinderSchedulerBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/cinder-volume.yaml b/puppet/services/pacemaker/cinder-volume.yaml
index ffcdb529..d91a0181 100644
--- a/puppet/services/pacemaker/cinder-volume.yaml
+++ b/puppet/services/pacemaker/cinder-volume.yaml
@@ -33,10 +33,14 @@ outputs:
description: Role data for the Cinder Volume role.
value:
service_name: cinder_volume
+ monitoring_subscription: {get_attr: [CinderVolumeBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [CinderVolumeBase, role_data, logging_source]}
+ logging_groups: {get_attr: [CinderVolumeBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [CinderVolumeBase, role_data, config_settings]
- cinder::volume::manage_service: false
cinder::volume::enabled: false
+ cinder::host: hostgroup
step_config:
include ::tripleo::profile::pacemaker::cinder::volume
diff --git a/puppet/services/pacemaker/database/mongodb.yaml b/puppet/services/pacemaker/database/mongodb.yaml
index 64ae2e91..982b6064 100644
--- a/puppet/services/pacemaker/database/mongodb.yaml
+++ b/puppet/services/pacemaker/database/mongodb.yaml
@@ -22,7 +22,7 @@ parameters:
resources:
MongoDbBase:
- type: ../../database/mongodb-base.yaml
+ type: ../../database/mongodb.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
diff --git a/puppet/services/pacemaker/database/mysql.yaml b/puppet/services/pacemaker/database/mysql.yaml
index d555ed0a..7deaf0ca 100644
--- a/puppet/services/pacemaker/database/mysql.yaml
+++ b/puppet/services/pacemaker/database/mysql.yaml
@@ -35,6 +35,21 @@ outputs:
value:
service_name: mysql
config_settings:
- get_attr: [MysqlBase, role_data, config_settings]
+ map_merge:
+ - get_attr: [MysqlBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::database::mysql::bind_address:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ tripleo::profile::pacemaker::database::mysql::gmcast_listen_addr:
+ get_param: [ServiceNetMap, MysqlNetwork]
step_config: |
include ::tripleo::profile::pacemaker::database::mysql
diff --git a/puppet/services/pacemaker/database/redis.yaml b/puppet/services/pacemaker/database/redis.yaml
index d9156e67..196754eb 100644
--- a/puppet/services/pacemaker/database/redis.yaml
+++ b/puppet/services/pacemaker/database/redis.yaml
@@ -21,7 +21,7 @@ parameters:
resources:
RedisBase:
- type: ../../database/redis-base.yaml
+ type: ../../database/redis.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml
index ef4ba79a..20a439f6 100644
--- a/puppet/services/pacemaker/glance-api.yaml
+++ b/puppet/services/pacemaker/glance-api.yaml
@@ -57,6 +57,9 @@ outputs:
description: Role data for the Glance role.
value:
service_name: glance_api
+ monitoring_subscription: {get_attr: [GlanceApiBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [GlanceApiBase, role_data, logging_source]}
+ logging_groups: {get_attr: [GlanceApiBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [GlanceApiBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/glance-registry.yaml b/puppet/services/pacemaker/glance-registry.yaml
index e417f09f..41f89fdd 100644
--- a/puppet/services/pacemaker/glance-registry.yaml
+++ b/puppet/services/pacemaker/glance-registry.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Glance role.
value:
service_name: glance_registry
+ monitoring_subscription: {get_attr: [GlanceRegistryBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [GlanceRegistryBase, role_data, logging_source]}
+ logging_groups: {get_attr: [GlanceRegistryBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [GlanceRegistryBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/gnocchi-api.yaml b/puppet/services/pacemaker/gnocchi-api.yaml
index 42c7131d..6a9161fa 100644
--- a/puppet/services/pacemaker/gnocchi-api.yaml
+++ b/puppet/services/pacemaker/gnocchi-api.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionGnocchiApi:
+ default: 'overcloud-gnocchi-api'
+ type: string
resources:
GnocchiServiceBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Gnocchi role.
value:
service_name: gnocchi_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiApi}
config_settings:
map_merge:
- get_attr: [GnocchiServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/gnocchi-metricd.yaml b/puppet/services/pacemaker/gnocchi-metricd.yaml
index 177d7744..0f36b5d5 100644
--- a/puppet/services/pacemaker/gnocchi-metricd.yaml
+++ b/puppet/services/pacemaker/gnocchi-metricd.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionGnocchiMetricd:
+ default: 'overcloud-gnocchi-metricd'
+ type: string
resources:
GnocchiServiceBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Gnocchi role.
value:
service_name: gnocchi_metricd
+ monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiMetricd}
config_settings:
map_merge:
- get_attr: [GnocchiServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/gnocchi-statsd.yaml b/puppet/services/pacemaker/gnocchi-statsd.yaml
index a247a514..b9afc590 100644
--- a/puppet/services/pacemaker/gnocchi-statsd.yaml
+++ b/puppet/services/pacemaker/gnocchi-statsd.yaml
@@ -18,6 +18,9 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionGnocchiStatsd:
+ default: 'overcloud-gnocchi-statsd'
+ type: string
resources:
GnocchiServiceBase:
@@ -32,6 +35,7 @@ outputs:
description: Role data for the Gnocchi role.
value:
service_name: gnocchi_statsd
+ monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiStatsd}
config_settings:
map_merge:
- get_attr: [GnocchiServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/haproxy.yaml b/puppet/services/pacemaker/haproxy.yaml
index de028339..52104a71 100644
--- a/puppet/services/pacemaker/haproxy.yaml
+++ b/puppet/services/pacemaker/haproxy.yaml
@@ -32,6 +32,7 @@ outputs:
description: Role data for the HAproxy with pacemaker role.
value:
service_name: haproxy
+ monitoring_subscription: {get_attr: [LoadbalancerServiceBase, role_data, monitoring_subscription]}
config_settings:
map_merge:
- get_attr: [LoadbalancerServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml
index 155a35ec..dd25905b 100644
--- a/puppet/services/pacemaker/heat-api-cfn.yaml
+++ b/puppet/services/pacemaker/heat-api-cfn.yaml
@@ -32,6 +32,9 @@ outputs:
description: Role data for the Heat CloudFormation API role.
value:
service_name: heat_api_cfn
+ monitoring_subscription: {get_attr: [HeatApiCfnBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [HeatApiCfnBase, role_data, logging_source]}
+ logging_groups: {get_attr: [HeatApiCfnBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [HeatApiCfnBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
index 85927650..18d2a0d5 100644
--- a/puppet/services/pacemaker/heat-api-cloudwatch.yaml
+++ b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
@@ -32,6 +32,9 @@ outputs:
description: Role data for the Heat Cloudwatch API role.
value:
service_name: heat_api_cloudwatch
+ monitoring_subscription: {get_attr: [HeatApiCloudwatchBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [HeatApiCloudwatchBase, role_data, logging_source]}
+ logging_groups: {get_attr: [HeatApiCloudwatchBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [HeatApiCloudwatchBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml
index d7220619..43122cb0 100644
--- a/puppet/services/pacemaker/heat-api.yaml
+++ b/puppet/services/pacemaker/heat-api.yaml
@@ -32,6 +32,9 @@ outputs:
description: Role data for the Heat API role.
value:
service_name: heat_api
+ monitoring_subscription: {get_attr: [HeatApiBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [HeatApiBase, role_data, logging_source]}
+ logging_groups: {get_attr: [HeatApiBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [HeatApiBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml
index 579f5f10..54bfdad2 100644
--- a/puppet/services/pacemaker/heat-engine.yaml
+++ b/puppet/services/pacemaker/heat-engine.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Heat engine role.
value:
service_name: heat_engine
+ monitoring_subscription: {get_attr: [HeatEngineBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [HeatEngineBase, role_data, logging_source]}
+ logging_groups: {get_attr: [HeatEngineBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [HeatEngineBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/horizon.yaml b/puppet/services/pacemaker/horizon.yaml
index bd1ff046..18de23ae 100644
--- a/puppet/services/pacemaker/horizon.yaml
+++ b/puppet/services/pacemaker/horizon.yaml
@@ -33,6 +33,7 @@ outputs:
description: Role data for the Horizon role.
value:
service_name: horizon
+ monitoring_subscription: {get_attr: [HorizonBase, role_data, monitoring_subscription]}
config_settings:
get_attr: [HorizonBase, role_data, config_settings]
step_config: |
diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml
index 701f01f1..908b9bbd 100644
--- a/puppet/services/pacemaker/keystone.yaml
+++ b/puppet/services/pacemaker/keystone.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Keystone pacemaker role.
value:
service_name: keystone
+ monitoring_subscription: {get_attr: [KeystoneServiceBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [KeystoneServiceBase, role_data, logging_source]}
+ logging_groups: {get_attr: [KeystoneServiceBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [KeystoneServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/manila-share.yaml b/puppet/services/pacemaker/manila-share.yaml
index 7479eb08..cabc31a0 100644
--- a/puppet/services/pacemaker/manila-share.yaml
+++ b/puppet/services/pacemaker/manila-share.yaml
@@ -33,6 +33,7 @@ outputs:
description: Role data for the manila-share pacemaker role.
value:
service_name: manila_share
+ monitoring_subscription: {get_attr: [ManilaShareBase, role_data, monitoring_subscription]}
config_settings:
map_merge:
- get_attr: [ManilaShareBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/memcached.yaml b/puppet/services/pacemaker/memcached.yaml
index e612d775..04b895b6 100644
--- a/puppet/services/pacemaker/memcached.yaml
+++ b/puppet/services/pacemaker/memcached.yaml
@@ -33,6 +33,7 @@ outputs:
description: Role data for the Memcached pacemaker role.
value:
service_name: memcached
+ monitoring_subscription: {get_attr: [MemcachedServiceBase, role_data, monitoring_subscription]}
config_settings:
map_merge:
- get_attr: [MemcachedServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/neutron-dhcp.yaml b/puppet/services/pacemaker/neutron-dhcp.yaml
index f5f785e3..7fca73d6 100644
--- a/puppet/services/pacemaker/neutron-dhcp.yaml
+++ b/puppet/services/pacemaker/neutron-dhcp.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Neutron DHCP role.
value:
service_name: neutron_dhcp
+ monitoring_subscription: {get_attr: [NeutronDhcpBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [NeutronDhcpBase, role_data, logging_source]}
+ logging_groups: {get_attr: [NeutronDhcpBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [NeutronDhcpBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/neutron-l3.yaml b/puppet/services/pacemaker/neutron-l3.yaml
index 87176632..cdb87f50 100644
--- a/puppet/services/pacemaker/neutron-l3.yaml
+++ b/puppet/services/pacemaker/neutron-l3.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Neutron L3 role.
value:
service_name: neutron_l3
+ monitoring_subscription: {get_attr: [NeutronL3Base, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [NeutronL3Base, role_data, logging_source]}
+ logging_groups: {get_attr: [NeutronL3Base, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [NeutronL3Base, role_data, config_settings]
diff --git a/puppet/services/pacemaker/neutron-metadata.yaml b/puppet/services/pacemaker/neutron-metadata.yaml
index e00c2424..49a31eb5 100644
--- a/puppet/services/pacemaker/neutron-metadata.yaml
+++ b/puppet/services/pacemaker/neutron-metadata.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Neutron Metadata role.
value:
service_name: neutron_metadata
+ monitoring_subscription: {get_attr: [NeutronMetadataBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [NeutronMetadataBase, role_data, logging_source]}
+ logging_groups: {get_attr: [NeutronMetadataBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [NeutronMetadataBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/neutron-midonet.yaml b/puppet/services/pacemaker/neutron-midonet.yaml
index fb39ea44..fdd5dafb 100644
--- a/puppet/services/pacemaker/neutron-midonet.yaml
+++ b/puppet/services/pacemaker/neutron-midonet.yaml
@@ -33,6 +33,7 @@ outputs:
description: Role data for the Neutron Midonet plugin.
value:
service_name: neutron_midonet
+ monitoring_subscription: {get_attr: [NeutronMidonetBase, role_data, monitoring_subscription]}
config_settings:
map_merge:
- get_attr: [NeutronMidonetBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/neutron-ovs-agent.yaml b/puppet/services/pacemaker/neutron-ovs-agent.yaml
index 353c2958..a2bd7c83 100644
--- a/puppet/services/pacemaker/neutron-ovs-agent.yaml
+++ b/puppet/services/pacemaker/neutron-ovs-agent.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Neutron OVS agent service.
value:
service_name: neutron_ovs_agent
+ monitoring_subscription: {get_attr: [NeutronOvsBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [NeutronOvsBase, role_data, logging_source]}
+ logging_groups: {get_attr: [NeutronOvsBase, role_data, logging_groups]}
config_settings:
get_attr: [NeutronOvsBase, role_data, config_settings]
step_config: |
diff --git a/puppet/services/pacemaker/neutron-server.yaml b/puppet/services/pacemaker/neutron-server.yaml
index fc04e5ee..33bc2d99 100644
--- a/puppet/services/pacemaker/neutron-server.yaml
+++ b/puppet/services/pacemaker/neutron-server.yaml
@@ -37,6 +37,7 @@ outputs:
description: Role data for the Neutron Server.
value:
service_name: neutron_server
+ monitoring_subscription: {get_attr: [NeutronServerBase, role_data, monitoring_subscription]}
config_settings:
map_merge:
- get_attr: [NeutronServerBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/nova-api.yaml b/puppet/services/pacemaker/nova-api.yaml
index 709761e7..b86e438a 100644
--- a/puppet/services/pacemaker/nova-api.yaml
+++ b/puppet/services/pacemaker/nova-api.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Nova API role.
value:
service_name: nova_api
+ monitoring_subscription: {get_attr: [NovaApiBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [NovaApiBase, role_data, logging_source]}
+ logging_groups: {get_attr: [NovaApiBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [NovaApiBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/nova-conductor.yaml b/puppet/services/pacemaker/nova-conductor.yaml
index 7a2313c7..a0a766ec 100644
--- a/puppet/services/pacemaker/nova-conductor.yaml
+++ b/puppet/services/pacemaker/nova-conductor.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Nova Conductor role.
value:
service_name: nova_conductor
+ monitoring_subscription: {get_attr: [NovaConductorBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [NovaConductorBase, role_data, logging_source]}
+ logging_groups: {get_attr: [NovaConductorBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [NovaConductorBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/nova-consoleauth.yaml b/puppet/services/pacemaker/nova-consoleauth.yaml
index 77550c80..5d51eb47 100644
--- a/puppet/services/pacemaker/nova-consoleauth.yaml
+++ b/puppet/services/pacemaker/nova-consoleauth.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Nova Consoleauth role.
value:
service_name: nova_consoleauth
+ monitoring_subscription: {get_attr: [NovaConsoleauthBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [NovaConsoleauthBase, role_data, logging_source]}
+ logging_groups: {get_attr: [NovaConsoleauthBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [NovaConsoleauthBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/nova-scheduler.yaml b/puppet/services/pacemaker/nova-scheduler.yaml
index 2571ec43..8828ee11 100644
--- a/puppet/services/pacemaker/nova-scheduler.yaml
+++ b/puppet/services/pacemaker/nova-scheduler.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Nova Scheduler role.
value:
service_name: nova_scheduler
+ monitoring_subscription: {get_attr: [NovaSchedulerBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [NovaSchedulerBase, role_data, logging_source]}
+ logging_groups: {get_attr: [NovaSchedulerBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [NovaSchedulerBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/nova-vncproxy.yaml b/puppet/services/pacemaker/nova-vnc-proxy.yaml
index e536826e..ebe84a03 100644
--- a/puppet/services/pacemaker/nova-vncproxy.yaml
+++ b/puppet/services/pacemaker/nova-vnc-proxy.yaml
@@ -22,7 +22,7 @@ parameters:
resources:
NovaVncproxyBase:
- type: ../nova-vncproxy.yaml
+ type: ../nova-vnc-proxy.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
@@ -32,7 +32,10 @@ outputs:
role_data:
description: Role data for the Nova Vncproxy role.
value:
- service_name: nova_vncproxy
+ service_name: nova_vnc_proxy
+ monitoring_subscription: {get_attr: [NovaVncproxyBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [NovaVncproxyBase, role_data, logging_source]}
+ logging_groups: {get_attr: [NovaVncproxyBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [NovaVncproxyBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml
index 3eb01398..f3fa2d28 100644
--- a/puppet/services/pacemaker/rabbitmq.yaml
+++ b/puppet/services/pacemaker/rabbitmq.yaml
@@ -32,6 +32,7 @@ outputs:
description: Role data for the RabbitMQ pacemaker role.
value:
service_name: rabbitmq
+ monitoring_subscription: {get_attr: [RabbitMQServiceBase, role_data, monitoring_subscription]}
config_settings:
map_merge:
- get_attr: [RabbitMQServiceBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/sahara-api.yaml b/puppet/services/pacemaker/sahara-api.yaml
index e20b7a08..3dfb7d94 100644
--- a/puppet/services/pacemaker/sahara-api.yaml
+++ b/puppet/services/pacemaker/sahara-api.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Sahara API role.
value:
service_name: sahara_api
+ monitoring_subscription: {get_attr: [SaharaApiBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [SaharaApiBase, role_data, logging_source]}
+ logging_groups: {get_attr: [SaharaApiBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [SaharaApiBase, role_data, config_settings]
diff --git a/puppet/services/pacemaker/sahara-engine.yaml b/puppet/services/pacemaker/sahara-engine.yaml
index 07de74ca..a06d11b3 100644
--- a/puppet/services/pacemaker/sahara-engine.yaml
+++ b/puppet/services/pacemaker/sahara-engine.yaml
@@ -33,6 +33,9 @@ outputs:
description: Role data for the Sahara Engine role.
value:
service_name: sahara_engine
+ monitoring_subscription: {get_attr: [SaharaEngineBase, role_data, monitoring_subscription]}
+ logging_source: {get_attr: [SaharaEngineBase, role_data, logging_source]}
+ logging_groups: {get_attr: [SaharaEngineBase, role_data, logging_groups]}
config_settings:
map_merge:
- get_attr: [SaharaEngineBase, role_data, config_settings]
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index 06595b07..5387529d 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -38,12 +38,23 @@ parameters:
type: string
default: ''
hidden: true
+ RabbitHAQueues:
+ description:
+ The number of HA queues to be configured in rabbit. The default is 0 which will
+ be automatically overridden to CEIL(N/2) where N is the number of nodes running
+ rabbitmq.
+ default: 0
+ type: number
+ MonitoringSubscriptionRabbitmq:
+ default: 'overcloud-rabbitmq'
+ type: string
outputs:
role_data:
description: Role data for the RabbitMQ role.
value:
service_name: rabbitmq
+ monitoring_subscription: {get_param: MonitoringSubscriptionRabbitmq}
config_settings:
rabbitmq::file_limit: {get_param: RabbitFDLimit}
rabbitmq::default_user: {get_param: RabbitUserName}
@@ -54,7 +65,7 @@ outputs:
dport:
- 4369
- 5672
- - 35672
+ - 25672
rabbitmq::delete_guest_user: false
rabbitmq::wipe_db_on_cookie_change: true
rabbitmq::port: '5672'
@@ -62,13 +73,14 @@ outputs:
rabbitmq::repos_ensure: false
rabbitmq_environment:
RABBITMQ_NODENAME: "rabbit@%{::hostname}"
- RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+ RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
rabbitmq_kernel_variables:
- inet_dist_listen_min: '35672'
- inet_dist_listen_max: '35672'
+ inet_dist_listen_min: '25672'
+ inet_dist_listen_max: '25672'
rabbitmq_config_variables:
tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]'
cluster_partition_handling: 'pause_minority'
+ queue_master_locator: '<<"min-masters">>'
loopback_users: '[]'
rabbitmq::erlang_cookie:
yaql:
@@ -84,5 +96,7 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
rabbitmq::node_ip_address: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
+
step_config: |
include ::tripleo::profile::base::rabbitmq
diff --git a/puppet/services/sahara-api.yaml b/puppet/services/sahara-api.yaml
index fae9c434..54e63df4 100644
--- a/puppet/services/sahara-api.yaml
+++ b/puppet/services/sahara-api.yaml
@@ -30,6 +30,14 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ MonitoringSubscriptionSaharaApi:
+ default: 'overcloud-sahara-api'
+ type: string
+ SaharaApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.sahara.api
+ path: /var/log/sahara/sahara-api.log
resources:
SaharaBase:
@@ -44,16 +52,15 @@ outputs:
description: Role data for the Sahara API role.
value:
service_name: sahara_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionSaharaApi}
+ logging_source: {get_param: SaharaApiLoggingSource}
+ logging_groups:
+ - sahara
config_settings:
map_merge:
- get_attr: [SaharaBase, role_data, config_settings]
- sahara::port: {get_param: [EndpointMap, SaharaInternal, port]}
sahara::service::api::api_workers: {get_param: SaharaWorkers}
- sahara::keystone::auth::public_url: {get_param: [EndpointMap, SaharaPublic, uri]}
- sahara::keystone::auth::internal_url: {get_param: [EndpointMap, SaharaInternal, uri]}
- sahara::keystone::auth::admin_url: {get_param: [EndpointMap, SaharaAdmin, uri]}
- sahara::keystone::auth::password: {get_param: SaharaPassword }
- sahara::keystone::auth::region: {get_param: KeystoneRegion}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
@@ -67,3 +74,19 @@ outputs:
- 13386
step_config: |
include ::tripleo::profile::base::sahara::api
+ service_config_settings:
+ keystone:
+ sahara::keystone::auth::tenant: 'service'
+ sahara::keystone::auth::public_url: {get_param: [EndpointMap, SaharaPublic, uri]}
+ sahara::keystone::auth::internal_url: {get_param: [EndpointMap, SaharaInternal, uri]}
+ sahara::keystone::auth::admin_url: {get_param: [EndpointMap, SaharaAdmin, uri]}
+ sahara::keystone::auth::password: {get_param: SaharaPassword }
+ sahara::keystone::auth::region: {get_param: KeystoneRegion}
+ mysql:
+ sahara::db::mysql::password: {get_param: SaharaPassword}
+ sahara::db::mysql::user: sahara
+ sahara::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ sahara::db::mysql::dbname: sahara
+ sahara::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml
index c1ab8e8b..5fc8ed61 100644
--- a/puppet/services/sahara-base.yaml
+++ b/puppet/services/sahara-base.yaml
@@ -60,13 +60,6 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/sahara'
- sahara::db::mysql::password: {get_param: SaharaPassword}
- sahara::db::mysql::user: sahara
- sahara::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- sahara::db::mysql::dbname: sahara
- sahara::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
sahara::rabbit_password: {get_param: RabbitPassword}
sahara::rabbit_user: {get_param: RabbitUserName}
sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
@@ -85,6 +78,5 @@ outputs:
- storm
sahara::rpc_backend: rabbit
sahara::admin_tenant_name: 'service'
- sahara::keystone::auth::tenant: 'service'
sahara::db::database_db_max_retries: -1
sahara::db::database_max_retries: -1
diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml
index fcf4d485..287c1c05 100644
--- a/puppet/services/sahara-engine.yaml
+++ b/puppet/services/sahara-engine.yaml
@@ -18,6 +18,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MonitoringSubscriptionSaharaEngine:
+ default: 'overcloud-sahara-engine'
+ type: string
+ SaharaEngineLoggingSource:
+ type: json
+ default:
+ tag: openstack.sahara.engine
+ path: /var/log/sahara/sahara-engine.log
resources:
SaharaBase:
@@ -32,6 +40,10 @@ outputs:
description: Role data for the Sahara Engine role.
value:
service_name: sahara_engine
+ monitoring_subscription: {get_param: MonitoringSubscriptionSaharaEngine}
+ logging_source: {get_param: SaharaEngineLoggingSource}
+ logging_groups:
+ - sahara
config_settings:
map_merge:
- get_attr: [SaharaBase, role_data, config_settings]
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
index bb40001a..176fd235 100644
--- a/puppet/services/services.yaml
+++ b/puppet/services/services.yaml
@@ -39,6 +39,9 @@ resources:
EndpointMap: {get_param: EndpointMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ LoggingConfiguration:
+ type: OS::TripleO::LoggingConfiguration
+
outputs:
role_data:
description: Combined Role data for this set of services.
@@ -49,5 +52,58 @@ outputs:
yaql:
expression: list($.data.s_names.where($ != null))
data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}}
+ monitoring_subscriptions:
+ yaql:
+ expression: list($.data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
+ data: {get_attr: [ServiceChain, role_data]}
+ logging_sources:
+ # Transform the individual logging_source configuration from
+ # each service in the chain into a global list, adding some
+ # default configuration at the same time.
+ yaql:
+ expression: >
+ let(
+ default_format => $.data.default_format,
+ pos_file_path => $.data.pos_file_path,
+ sources => $.data.sources.flatten()
+ ) ->
+ $sources.where($ != null).select({
+ 'type' => 'tail',
+ 'tag' => $.tag,
+ 'path' => $.path,
+ 'format' => $.get('format', $default_format),
+ 'pos_file' => $.get('pos_file', $pos_file_path + '/' + $.tag + '.pos')
+ })
+ data:
+ sources:
+ - {get_attr: [LoggingConfiguration, LoggingDefaultSources]}
+ - yaql:
+ expression: list($.data.where($ != null).select($.get('logging_source')).where($ != null))
+ data: {get_attr: [ServiceChain, role_data]}
+ - {get_attr: [LoggingConfiguration, LoggingExtraSources]}
+ default_format: {get_attr: [LoggingConfiguration, LoggingDefaultFormat]}
+ pos_file_path: {get_attr: [LoggingConfiguration, LoggingPosFilePath]}
+ logging_groups:
+ # Build a list of unique groups to which we should add the
+ # fluentd user.
+ yaql:
+ expression: >
+ set($.data.groups.flatten()).where($)
+ data:
+ groups:
+ - [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}]
+ - yaql:
+ expression: list($.data.where($ != null).select($.get('logging_groups')).where($ != null))
+ data: {get_attr: [ServiceChain, role_data]}
+ - [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}]
config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
+ global_config_settings:
+ map_merge:
+ yaql:
+ expression: list($.data.where($ != null).select($.get('global_config_settings')).where($ != null))
+ data: {get_attr: [ServiceChain, role_data]}
+ service_config_settings:
+ yaql:
+ expression: $.data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
+ data: {get_attr: [ServiceChain, role_data]}
step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
diff --git a/puppet/services/snmp.yaml b/puppet/services/snmp.yaml
index e38ccf42..4d01632d 100644
--- a/puppet/services/snmp.yaml
+++ b/puppet/services/snmp.yaml
@@ -35,8 +35,8 @@ outputs:
value:
service_name: snmp
config_settings:
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
+ tripleo::profile::base::snmp::snmpd_user: {get_param: SnmpdReadonlyUserName}
+ tripleo::profile::base::snmp::snmpd_password: {get_param: SnmpdReadonlyUserPassword}
tripleo.snmp.firewall_rules:
'127 snmp':
dport: 161
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index cba08090..de8daea5 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -38,6 +38,17 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ MonitoringSubscriptionSwiftProxy:
+ default: 'overcloud-swift-proxy'
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
resources:
SwiftBase:
@@ -52,40 +63,32 @@ outputs:
description: Role data for the Swift proxy service.
value:
service_name: swift_proxy
+ monitoring_subscription: {get_param: MonitoringSubscriptionSwiftProxy}
config_settings:
map_merge:
- get_attr: [SwiftBase, role_data, config_settings]
- swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- swift::proxy::authtoken::admin_password: {get_param: SwiftPassword}
- swift::proxy::authtoken::admin_tenant_name: 'service'
+ swift::proxy::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ swift::proxy::authtoken::password: {get_param: SwiftPassword}
+ swift::proxy::authtoken::project_name: 'service'
swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
- swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]}
- swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]}
- swift::keystone::auth::admin_url: {get_param: [EndpointMap, SwiftAdmin, uri]}
- swift::keystone::auth::public_url_s3: {get_param: [EndpointMap, SwiftS3Public, uri]}
- swift::keystone::auth::internal_url_s3: {get_param: [EndpointMap, SwiftS3Internal, uri]}
- swift::keystone::auth::admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]}
- swift::keystone::auth::password: {get_param: SwiftPassword}
- swift::keystone::auth::region: {get_param: KeystoneRegion}
+ swift::proxy::ceilometer::rabbit_host: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
+ swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
tripleo.swift_proxy.firewall_rules:
'122 swift proxy':
dport:
- 8080
- 13808
- swift::keystone::auth::tenant: 'service'
- swift::keystone::auth::configure_s3_endpoint: false
- swift::keystone::auth::operator_roles:
- - admin
- - swiftoperator
- - ResellerAdmin
swift::proxy::keystone::operator_roles:
- admin
- swiftoperator
- ResellerAdmin
+ swift::proxy::versioned_writes::allow_versioned_writes: true
swift::proxy::pipeline:
+ - 'ceilometer'
- 'catch_errors'
- 'healthcheck'
- 'proxy-logging'
@@ -97,6 +100,7 @@ outputs:
- 'authtoken'
- 'keystone'
- 'staticweb'
+ - 'versioned_writes'
- 'proxy-logging'
- 'proxy-server'
swift::proxy::account_autocreate: true
@@ -109,3 +113,19 @@ outputs:
swift::proxy::proxy_local_net_ip: {get_param: [ServiceNetMap, SwiftProxyNetwork]}
step_config: |
include ::tripleo::profile::base::swift::proxy
+ service_config_settings:
+ keystone:
+ swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]}
+ swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]}
+ swift::keystone::auth::admin_url: {get_param: [EndpointMap, SwiftAdmin, uri]}
+ swift::keystone::auth::public_url_s3: {get_param: [EndpointMap, SwiftS3Public, uri]}
+ swift::keystone::auth::internal_url_s3: {get_param: [EndpointMap, SwiftS3Internal, uri]}
+ swift::keystone::auth::admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]}
+ swift::keystone::auth::password: {get_param: SwiftPassword}
+ swift::keystone::auth::region: {get_param: KeystoneRegion}
+ swift::keystone::auth::tenant: 'service'
+ swift::keystone::auth::configure_s3_endpoint: false
+ swift::keystone::auth::operator_roles:
+ - admin
+ - swiftoperator
+ - ResellerAdmin
diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml
index f41228e7..8ed4e9f4 100644
--- a/puppet/services/swift-ringbuilder.yaml
+++ b/puppet/services/swift-ringbuilder.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
OpenStack Swift Ringbuilder
@@ -34,6 +34,11 @@ parameters:
type: number
default: 3
description: How many replicas to use in the swift rings.
+ SwiftRawDisks:
+ default: {}
+ description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
+ type: json
+
outputs:
role_data:
@@ -43,7 +48,18 @@ outputs:
config_settings:
tripleo::profile::base::swift::ringbuilder::build_ring: {get_param: SwiftRingBuild}
tripleo::profile::base::swift::ringbuilder::replicas: {get_param: SwiftReplicas}
- swift::ringbuilder::part_power: {get_param: SwiftPartPower}
- swift::ringbuilder::min_part_hours: {get_param: SwiftMinPartHours}
+ tripleo::profile::base::swift::ringbuilder::part_power: {get_param: SwiftPartPower}
+ tripleo::profile::base::swift::ringbuilder::min_part_hours: {get_param: SwiftMinPartHours}
+ tripleo::profile::base::swift::ringbuilder::raw_disk_prefix: 'r1z1-'
+ tripleo::profile::base::swift::ringbuilder::raw_disks:
+ yaql:
+ expression: $.data.raw_disk_lists.flatten()
+ data:
+ raw_disk_lists:
+ - [':%PORT%/d1']
+ - repeat:
+ template: ':%PORT%/DEVICE'
+ for_each:
+ DEVICE: {get_param: SwiftRawDisks}
step_config: |
include ::tripleo::profile::base::swift::ringbuilder
diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml
index 6c7c3c7a..7fbb8d90 100644
--- a/puppet/services/swift-storage.yaml
+++ b/puppet/services/swift-storage.yaml
@@ -30,6 +30,9 @@ parameters:
default: {}
description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
type: json
+ MonitoringSubscriptionSwiftStorage:
+ default: 'overcloud-swift-storage'
+ type: string
# DEPRECATED options for compatibility with overcloud.yaml
# This should be removed and manipulation of the ControllerServices list
@@ -58,6 +61,7 @@ outputs:
description: Role data for the Swift Proxy role.
value:
service_name: swift_storage
+ monitoring_subscription: {get_param: MonitoringSubscriptionSwiftStorage}
config_settings:
map_merge:
- get_attr: [SwiftBase, role_data, config_settings]
@@ -83,6 +87,6 @@ outputs:
- healthcheck
- account-server
swift::storage::disks: {get_param: SwiftRawDisks}
- swift::storage::all::storage_local_net_ip: {get_param: [ServiceNetMap, SwiftMgmtNetwork]}
+ swift::storage::all::storage_local_net_ip: {get_param: [ServiceNetMap, SwiftStorageNetwork]}
step_config: |
include ::tripleo::profile::base::swift::storage
diff --git a/puppet/services/time/ntp.yaml b/puppet/services/time/ntp.yaml
index 7aa3706f..eb5237fe 100644
--- a/puppet/services/time/ntp.yaml
+++ b/puppet/services/time/ntp.yaml
@@ -32,7 +32,7 @@ outputs:
value:
service_name: ntp
config_settings:
- ntp::ntpservers: {get_param: NtpServer}
+ ntp::servers: {get_param: NtpServer}
tripleo.ntp.firewall_rules:
'105 ntp':
dport: 123
diff --git a/puppet/services/tripleo-firewall.yaml b/puppet/services/tripleo-firewall.yaml
index 14965b4f..7eb39905 100644
--- a/puppet/services/tripleo-firewall.yaml
+++ b/puppet/services/tripleo-firewall.yaml
@@ -18,11 +18,22 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ ManageFirewall:
+ default: true
+ description: Whether to manage IPtables rules.
+ type: boolean
+ PurgeFirewallRules:
+ default: false
+ description: Whether IPtables rules should be purged before setting up the new ones.
+ type: boolean
outputs:
role_data:
description: Role data for the TripleO firewall settings
value:
service_name: tripleo_firewall
+ config_settings:
+ tripleo::firewall::manage_firewall: {get_param: ManageFirewall}
+ tripleo::firewall::purge_firewall_rules: {get_param: PurgeFirewallRules}
step_config: |
include ::tripleo::firewall
diff --git a/puppet/services/vip-hosts.yaml b/puppet/services/vip-hosts.yaml
new file mode 100644
index 00000000..a9d757ee
--- /dev/null
+++ b/puppet/services/vip-hosts.yaml
@@ -0,0 +1,56 @@
+heat_template_version: 2016-04-08
+
+description: >
+ If the deployer doesn't have a DNS server for the overcloud nodes. This will
+ populate the node-names and IPs for the VIPs of the overcloud.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: role data for the VIP hosts role
+ value:
+ service_name: vip_hosts
+ config_settings:
+ tripleo::vip_hosts::hosts_spec:
+ external:
+ name: "%{hiera('cloud_name_external')}"
+ ip: "%{hiera('public_virtual_ip')}"
+ ensure: present
+ comment: FQDN of the external VIP
+ internal_api:
+ name: "%{hiera('cloud_name_internal_api')}"
+ ip: "%{hiera('internal_api_virtual_ip')}"
+ ensure: present
+ comment: FQDN of the internal api VIP
+ storage:
+ name: "%{hiera('cloud_name_storage')}"
+ ip: "%{hiera('storage_virtual_ip')}"
+ ensure: present
+ comment: FQDN of the storage VIP
+ storage_mgmt:
+ name: "%{hiera('cloud_name_storage_mgmt')}"
+ ip: "%{hiera('storage_mgmt_virtual_ip')}"
+ ensure: present
+ comment: FQDN of the storage mgmt VIP
+ ctlplane:
+ name: "%{hiera('cloud_name_ctlplane')}"
+ ip: "%{hiera('controller_virtual_ip')}"
+ ensure: present
+ comment: FQDN of the ctlplane VIP
+ step_config: |
+ include ::tripleo::vip_hosts
diff --git a/puppet/swift-devices-and-proxy-config.yaml b/puppet/swift-devices-and-proxy-config.yaml
deleted file mode 100644
index 14df831f..00000000
--- a/puppet/swift-devices-and-proxy-config.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Swift Devices and Proxy Config for Puppet'
-
-parameters:
- controller_swift_devices:
- type: comma_delimited_list
- object_store_swift_devices:
- type: comma_delimited_list
- controller_swift_proxy_memcaches:
- type: comma_delimited_list
-
-resources:
-
- SwiftDevicesAndProxyConfigImpl:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- hiera:
- datafiles:
- swift_devices_and_proxy:
- mapped_data:
- tripleo::profile::base::swift::ringbuilder::devices:
- list_join:
- - ", "
- - - list_join:
- - ", "
- - {get_param: controller_swift_devices}
- - list_join:
- - ", "
- - {get_param: object_store_swift_devices}
- swift::proxy::cache::memcache_servers:
- str_replace:
- template: "['SERVERS_LIST']"
- params:
- SERVERS_LIST:
- list_join:
- - "','"
- - {get_param: controller_swift_proxy_memcaches}
-
-outputs:
- config_id:
- description: The ID of the SwiftDevicesAndProxyConfigImpl resource.
- value:
- {get_resource: SwiftDevicesAndProxyConfigImpl}
diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml
deleted file mode 100644
index 859fad2c..00000000
--- a/puppet/swift-storage-post.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'OpenStack swift storage node post deployment for Puppet'
-
-parameters:
- ConfigDebug:
- default: false
- description: Whether to run config management (e.g. Puppet) in debug mode.
- type: boolean
- servers:
- type: json
- RoleData:
- type: json
- default: {}
- DeployIdentifier:
- type: string
- description: Value which changes if the node configuration may need to be re-applied
-
-resources:
-
- StorageArtifactsConfig:
- type: deploy-artifacts.yaml
-
- StorageArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: servers}
- config: {get_resource: StorageArtifactsConfig}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- StoragePuppetConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- options:
- enable_debug: {get_param: ConfigDebug}
- enable_hiera: True
- enable_facter: False
- modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
- inputs:
- - name: step
- outputs:
- - name: result
- config:
- list_join:
- - ''
- - - get_file: manifests/overcloud_object.pp
- - {get_param: [RoleData, step_config]}
-
- StorageRingbuilderDeployment_Step2:
- type: OS::Heat::StructuredDeployments
- depends_on: StorageArtifactsDeploy
- properties:
- name: StorageRingbuilderDeployment_Step2
- servers: {get_param: servers}
- config: {get_resource: StoragePuppetConfig}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- StorageRingbuilderDeployment_Step3:
- type: OS::Heat::StructuredDeployments
- depends_on: StorageRingbuilderDeployment_Step2
- properties:
- name: StorageRingbuilderDeployment_Step3
- servers: {get_param: servers}
- config: {get_resource: StoragePuppetConfig}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- StorageDeployment_Step4:
- type: OS::Heat::StructuredDeployments
- depends_on: StorageRingbuilderDeployment_Step3
- properties:
- name: StorageDeployment_Step4
- servers: {get_param: servers}
- config: {get_resource: StoragePuppetConfig}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ExtraConfig:
- depends_on: StorageDeployment_Step4
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
-
diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml
deleted file mode 100644
index cbd7ea09..00000000
--- a/puppet/vip-config.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
- Configure hieradata for service -> virtual IP mappings.
-
-resources:
- VipConfigImpl:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- hiera:
- datafiles:
- vip_data:
- mapped_data:
- keystone_admin_api_vip: {get_input: keystone_admin_api_vip}
- keystone_public_api_vip: {get_input: keystone_public_api_vip}
- neutron_api_vip: {get_input: neutron_api_vip}
- # TODO: pass a `midonet_api_vip` var
- midonet_api_vip: {get_input: neutron_api_vip}
- cinder_api_vip: {get_input: cinder_api_vip}
- glance_api_vip: {get_input: glance_api_vip}
- glance_registry_vip: {get_input: glance_registry_vip}
- sahara_api_vip: {get_input: sahara_api_vip}
- swift_proxy_vip: {get_input: swift_proxy_vip}
- manila_api_vip: {get_input: manila_api_vip}
- nova_api_vip: {get_input: nova_api_vip}
- nova_metadata_vip: {get_input: nova_metadata_vip}
- ceilometer_api_vip: {get_input: ceilometer_api_vip}
- aodh_api_vip: {get_input: aodh_api_vip}
- gnocchi_api_vip: {get_input: gnocchi_api_vip}
- heat_api_vip: {get_input: heat_api_vip}
- horizon_vip: {get_input: horizon_vip}
- redis_vip: {get_input: redis_vip}
- mysql_vip: {get_input: mysql_vip}
- public_virtual_ip: {get_input: public_virtual_ip}
- controller_virtual_ip: {get_input: control_virtual_ip}
- internal_api_virtual_ip: {get_input: internal_api_virtual_ip}
- storage_virtual_ip: {get_input: storage_virtual_ip}
- storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip}
- ironic_api_vip: {get_input: ironic_api_vip}
- opendaylight_api_vip: {get_input: opendaylight_api_vip}
- # public_virtual_ip and controller_virtual_ip are needed in
- # both HAproxy & keepalived.
- tripleo::haproxy::public_virtual_ip: {get_input: public_virtual_ip}
- tripleo::haproxy::controller_virtual_ip: {get_input: control_virtual_ip}
- tripleo::keepalived::public_virtual_ip: {get_input: public_virtual_ip}
- tripleo::keepalived::controller_virtual_ip: {get_input: control_virtual_ip}
- tripleo::keepalived::internal_api_virtual_ip: {get_input: internal_api_virtual_ip}
- tripleo::keepalived::storage_virtual_ip: {get_input: storage_virtual_ip}
- tripleo::keepalived::storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip}
- tripleo::redis_notification::haproxy_monitor_ip: {get_input: control_virtual_ip}
-
-
-outputs:
- OS::stack_id:
- description: The VipConfigImpl resource.
- value: {get_resource: VipConfigImpl}
diff --git a/roles_data.yaml b/roles_data.yaml
new file mode 100644
index 00000000..86d0e4f5
--- /dev/null
+++ b/roles_data.yaml
@@ -0,0 +1,165 @@
+# Specifies which roles (groups of nodes) will be deployed
+# Note this is used as an input to the various *.j2.yaml
+# jinja2 templates, so that they are converted into *.yaml
+# during the plan creation (via a mistral action/workflow).
+#
+# The format is a list, with the following format:
+#
+# * name: (string) mandatory, name of the role, must be unique
+#
+# CountDefault: (number) optional, default number of nodes, defaults to 0
+# sets the default for the {{role.name}}Count parameter in overcloud.yaml
+#
+# HostnameFormatDefault: (string) optional default format string for hostname
+# defaults to '%stackname%-{{role.name.lower()}}-%index%'
+# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
+#
+# ServicesDefault: (list) optional default list of services to be deployed
+# on the role, defaults to an empty list. Sets the default for the
+# {{role.name}}Services parameter in overcloud.yaml
+
+- name: Controller
+ CountDefault: 1
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::CephRgw
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderBackup
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::GlanceRegistry
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronApi
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::Redis
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::MongoDb
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaConsoleauth
+ - OS::TripleO::Services::NovaVncProxy
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::CeilometerApi
+ - OS::TripleO::Services::CeilometerCollector
+ - OS::TripleO::Services::CeilometerExpirer
+ - OS::TripleO::Services::CeilometerAgentCentral
+ - OS::TripleO::Services::CeilometerAgentNotification
+ - OS::TripleO::Services::Horizon
+ - OS::TripleO::Services::GnocchiApi
+ - OS::TripleO::Services::GnocchiMetricd
+ - OS::TripleO::Services::GnocchiStatsd
+ - OS::Tripleo::Services::ManilaApi
+ - OS::Tripleo::Services::ManilaScheduler
+ - OS::Tripleo::Services::ManilaBackendGeneric
+ - OS::Tripleo::Services::ManilaBackendNetapp
+ - OS::Tripleo::Services::ManilaBackendCephFs
+ - OS::Tripleo::Services::ManilaShare
+ - OS::TripleO::Services::AodhApi
+ - OS::TripleO::Services::AodhEvaluator
+ - OS::TripleO::Services::AodhNotifier
+ - OS::TripleO::Services::AodhListener
+ - OS::TripleO::Services::SaharaApi
+ - OS::TripleO::Services::SaharaEngine
+ - OS::TripleO::Services::IronicApi
+ - OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::NovaIronic
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::OpenDaylightApi
+ - OS::TripleO::Services::OpenDaylightOvs
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::VipHosts
+
+- name: Compute
+ CountDefault: 1
+ HostnameFormatDefault: '%stackname%-novacompute-%index%'
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephClient
+ - OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::ComputeNeutronCorePlugin
+ - OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::ComputeCeilometerAgent
+ - OS::TripleO::Services::ComputeNeutronL3Agent
+ - OS::TripleO::Services::ComputeNeutronMetadataAgent
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::OpenDaylightOvs
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::VipHosts
+
+- name: BlockStorage
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::BlockStorageCinderVolume
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::VipHosts
+
+- name: ObjectStorage
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::VipHosts
+
+- name: CephStorage
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::VipHosts
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index d75aeb4f..95c7d025 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -17,6 +17,8 @@ import traceback
import yaml
+required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords']
+
def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0])
sys.exit(1)
@@ -40,7 +42,6 @@ def validate_service(filename, tpl):
% filename)
return 1
if 'parameters' in tpl:
- required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords']
for param in required_params:
if param not in tpl['parameters']:
print('ERROR: parameter %s is required for %s.'
@@ -63,13 +64,16 @@ def validate(filename):
print(traceback.format_exc())
return 1
# yaml is OK, now walk the parameters and output a warning for unused ones
- for p in tpl.get('parameters', {}):
- str_p = '\'%s\'' % p
- in_resources = str_p in str(tpl.get('resources', {}))
- in_outputs = str_p in str(tpl.get('outputs', {}))
- if not in_resources and not in_outputs:
- print('Warning: parameter %s in template %s appears to be unused'
- % (p, filename))
+ if 'heat_template_version' in tpl:
+ for p in tpl.get('parameters', {}):
+ if p in required_params:
+ continue
+ str_p = '\'%s\'' % p
+ in_resources = str_p in str(tpl.get('resources', {}))
+ in_outputs = str_p in str(tpl.get('outputs', {}))
+ if not in_resources and not in_outputs:
+ print('Warning: parameter %s in template %s '
+ 'appears to be unused' % (p, filename))
return retval
@@ -84,7 +88,7 @@ for base_path in path_args:
if os.path.isdir(base_path):
for subdir, dirs, files in os.walk(base_path):
for f in files:
- if f.endswith('.yaml'):
+ if f.endswith('.yaml') and not f.endswith('.j2.yaml'):
file_path = os.path.join(subdir, f)
failed = validate(file_path)
if failed: