aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore18
-rw-r--r--Gemfile24
-rw-r--r--README.rst76
-rw-r--r--Rakefile6
-rw-r--r--all-nodes-validation.yaml2
-rw-r--r--bootstrap-config.yaml2
-rw-r--r--capabilities-map.yaml61
-rw-r--r--ci/README.rst11
-rw-r--r--ci/common/net-config-multinode-os-net-config.yaml114
-rw-r--r--ci/common/net-config-multinode.yaml64
-rw-r--r--ci/environments/multinode-3nodes.yaml77
-rw-r--r--ci/environments/multinode.yaml47
-rw-r--r--ci/environments/multinode_major_upgrade.yaml47
-rw-r--r--ci/environments/scenario001-multinode.yaml93
-rw-r--r--ci/environments/scenario002-multinode.yaml54
-rw-r--r--ci/environments/scenario003-multinode.yaml53
-rw-r--r--ci/environments/scenario004-multinode.yaml63
-rw-r--r--ci/pingtests/scenario001-multinode.yaml186
-rw-r--r--ci/pingtests/scenario002-multinode.yaml158
-rw-r--r--ci/pingtests/scenario003-multinode.yaml154
-rw-r--r--ci/pingtests/scenario004-multinode.yaml127
-rw-r--r--ci/pingtests/tenantvm_floatingip.yaml142
-rw-r--r--ci/scripts/freeipa_setup.sh104
-rw-r--r--default_passwords.yaml2
-rw-r--r--deployed-server/README.rst13
-rw-r--r--deployed-server/ctlplane-port.yaml17
-rw-r--r--deployed-server/deployed-neutron-port.yaml67
-rw-r--r--deployed-server/deployed-server-bootstrap-centos.sh16
-rw-r--r--deployed-server/deployed-server-bootstrap-centos.yaml22
-rw-r--r--deployed-server/deployed-server-bootstrap-rhel.sh13
-rw-r--r--deployed-server/deployed-server-bootstrap-rhel.yaml22
-rw-r--r--deployed-server/deployed-server-config.yaml22
-rw-r--r--deployed-server/deployed-server-roles-data.yaml172
-rw-r--r--deployed-server/deployed-server.yaml80
-rwxr-xr-xdeployed-server/scripts/get-occ-config.sh60
-rw-r--r--docker/README-containers.md3
-rw-r--r--docker/compute-post.yaml349
-rw-r--r--docker/firstboot/install_docker_agents.yaml2
-rwxr-xr-x[-rw-r--r--]docker/firstboot/start_docker_agents.sh97
-rw-r--r--docker/post.j2.yaml207
-rw-r--r--docker/services/README.rst65
-rw-r--r--docker/services/neutron-ovs-agent.yaml84
-rw-r--r--docker/services/nova-compute.yaml81
-rw-r--r--docker/services/nova-libvirt.yaml82
-rw-r--r--docker/services/services.yaml75
-rw-r--r--environments/auditd.yaml119
-rw-r--r--environments/cinder-hpelefthand-config.yaml13
-rw-r--r--environments/cinder-iser.yaml19
-rw-r--r--environments/deployed-server-bootstrap-environment-centos.yaml7
-rw-r--r--environments/deployed-server-bootstrap-environment-rhel.yaml7
-rw-r--r--environments/deployed-server-environment.yaml4
-rw-r--r--environments/deployed-server-noop-ctlplane.yaml4
-rw-r--r--environments/docker-network-isolation.yaml4
-rw-r--r--environments/docker.yaml32
-rw-r--r--environments/enable-internal-tls.yaml15
-rw-r--r--environments/enable-swap-partition.yaml3
-rw-r--r--environments/enable-swap.yaml3
-rw-r--r--environments/external-loadbalancer-vip-v6.yaml21
-rw-r--r--environments/external-loadbalancer-vip.yaml19
-rw-r--r--environments/horizon_password_validation.yaml5
-rw-r--r--environments/host-config-pre-network.j2.yaml16
-rw-r--r--environments/hyperconverged-ceph.yaml23
-rw-r--r--environments/low-memory-usage.yaml3
-rw-r--r--environments/major-upgrade-all-in-one.yaml8
-rw-r--r--environments/major-upgrade-composable-steps.yaml3
-rw-r--r--environments/manila-cephfsnative-config.yaml8
-rw-r--r--environments/manila-generic-config.yaml8
-rw-r--r--environments/manila-netapp-config.yaml8
-rw-r--r--environments/network-environment.yaml7
-rw-r--r--environments/network-isolation-no-tunneling.yaml30
-rw-r--r--environments/network-isolation.yaml3
-rw-r--r--environments/neutron-ml2-fujitsu-cfab.yaml21
-rw-r--r--environments/neutron-ml2-fujitsu-fossw.yaml22
-rw-r--r--environments/neutron-ml2-ovn.yaml6
-rw-r--r--environments/neutron-nuage-config.yaml2
-rw-r--r--environments/neutron-opendaylight-l3.yaml14
-rw-r--r--environments/neutron-opendaylight.yaml6
-rw-r--r--environments/neutron-ovs-dvr.yaml19
-rw-r--r--environments/puppet-ceph-external.yaml3
-rw-r--r--environments/puppet-pacemaker.yaml5
-rw-r--r--environments/services/barbican.yaml4
-rw-r--r--environments/services/ceph-mds.yaml2
-rw-r--r--environments/services/ceph-rbdmirror.yaml2
-rw-r--r--environments/services/disable-ceilometer-api.yaml2
-rw-r--r--environments/services/ec2-api.yaml3
-rw-r--r--environments/services/etcd.yaml2
-rw-r--r--environments/services/panko.yaml2
-rw-r--r--environments/services/zaqar.yaml2
-rw-r--r--environments/sshd-banner.yaml13
-rw-r--r--environments/storage-environment.yaml22
-rw-r--r--environments/tls-endpoints-public-dns.yaml22
-rw-r--r--environments/tls-endpoints-public-ip.yaml22
-rw-r--r--environments/tls-everywhere-endpoints-dns.yaml22
-rw-r--r--environments/undercloud.yaml18
-rw-r--r--environments/updates/README.md3
-rw-r--r--environments/updates/update-from-deployed-server-newton.yaml2
-rw-r--r--environments/updates/update-from-keystone-admin-internal-api.yaml29
-rw-r--r--environments/use-dns-for-vips.yaml4
-rw-r--r--extraconfig/all_nodes/mac_hostname.j2.yaml2
-rw-r--r--extraconfig/all_nodes/random_string.j2.yaml2
-rw-r--r--extraconfig/all_nodes/swap-partition.j2.yaml19
-rw-r--r--extraconfig/all_nodes/swap.j2.yaml10
-rw-r--r--extraconfig/nova_metadata/krb-service-principals.yaml84
-rw-r--r--extraconfig/post_deploy/default.yaml2
-rw-r--r--extraconfig/post_deploy/example.yaml2
-rw-r--r--extraconfig/post_deploy/example_run_on_update.yaml2
-rwxr-xr-xextraconfig/post_deploy/undercloud_post.sh126
-rw-r--r--extraconfig/post_deploy/undercloud_post.yaml93
-rw-r--r--extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml2
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration42
-rw-r--r--extraconfig/pre_network/ansible_host_config.ansible58
-rw-r--r--extraconfig/pre_network/config_then_reboot.yaml48
-rw-r--r--extraconfig/pre_network/host_config_and_reboot.role.j2.yaml100
-rw-r--r--extraconfig/tasks/major_upgrade_block_storage.sh13
-rw-r--r--extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml2
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh20
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh19
-rw-r--r--extraconfig/tasks/major_upgrade_compute.sh20
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh185
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh211
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_3.sh76
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_4.sh17
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_5.sh8
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_6.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_object_storage.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml89
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml15
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh21
-rw-r--r--extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml2
-rw-r--r--extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp8
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh26
-rwxr-xr-xextraconfig/tasks/pacemaker_resource_restart.sh14
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker.yaml2
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker_restart.yaml2
-rw-r--r--extraconfig/tasks/pre_puppet_pacemaker.yaml2
-rwxr-xr-xextraconfig/tasks/yum_update.sh33
-rw-r--r--extraconfig/tasks/yum_update.yaml9
-rw-r--r--extraconfig/tasks/yum_update_noop.yaml2
-rw-r--r--firstboot/os-net-config-mappings.yaml6
-rw-r--r--firstboot/userdata_default.yaml2
-rw-r--r--firstboot/userdata_dev_rsync.yaml2
-rw-r--r--firstboot/userdata_example.yaml2
-rw-r--r--firstboot/userdata_heat_admin.yaml8
-rw-r--r--firstboot/userdata_root_password.yaml38
-rw-r--r--hosts-config.yaml25
-rw-r--r--net-config-bond.yaml72
-rw-r--r--net-config-bridge.yaml44
-rw-r--r--net-config-linux-bridge.yaml60
-rw-r--r--net-config-noop.yaml6
-rw-r--r--net-config-static-bridge-with-external-dhcp.yaml78
-rw-r--r--net-config-static-bridge.yaml67
-rw-r--r--net-config-static.yaml58
-rw-r--r--net-config-undercloud.yaml77
-rw-r--r--network/config/bond-with-vlans/ceph-storage.yaml121
-rw-r--r--network/config/bond-with-vlans/cinder-storage.yaml131
-rw-r--r--network/config/bond-with-vlans/compute-dpdk.yaml155
-rw-r--r--network/config/bond-with-vlans/compute.yaml131
-rw-r--r--network/config/bond-with-vlans/controller-no-external.yaml141
-rw-r--r--network/config/bond-with-vlans/controller-v6.yaml165
-rw-r--r--network/config/bond-with-vlans/controller.yaml153
-rw-r--r--network/config/bond-with-vlans/swift-storage.yaml131
-rw-r--r--network/config/multiple-nics/ceph-storage.yaml88
-rw-r--r--network/config/multiple-nics/cinder-storage.yaml101
-rw-r--r--network/config/multiple-nics/compute-dvr.yaml162
-rw-r--r--network/config/multiple-nics/compute.yaml108
-rw-r--r--network/config/multiple-nics/controller-v6.yaml155
-rw-r--r--network/config/multiple-nics/controller.yaml145
-rw-r--r--network/config/multiple-nics/swift-storage.yaml101
-rw-r--r--network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml95
-rw-r--r--network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml109
-rw-r--r--network/config/single-nic-linux-bridge-vlans/compute.yaml113
-rw-r--r--network/config/single-nic-linux-bridge-vlans/controller-v6.yaml155
-rw-r--r--network/config/single-nic-linux-bridge-vlans/controller.yaml152
-rw-r--r--network/config/single-nic-linux-bridge-vlans/swift-storage.yaml109
-rw-r--r--network/config/single-nic-vlans/ceph-storage.yaml87
-rw-r--r--network/config/single-nic-vlans/cinder-storage.yaml97
-rw-r--r--network/config/single-nic-vlans/compute.yaml97
-rw-r--r--network/config/single-nic-vlans/controller-no-external.yaml107
-rw-r--r--network/config/single-nic-vlans/controller-v6.yaml129
-rw-r--r--network/config/single-nic-vlans/controller.yaml117
-rw-r--r--network/config/single-nic-vlans/swift-storage.yaml97
-rwxr-xr-xnetwork/endpoints/build_endpoint_map.py7
-rw-r--r--network/endpoints/endpoint_data.yaml75
-rw-r--r--network/endpoints/endpoint_map.yaml1783
-rw-r--r--network/external.yaml2
-rw-r--r--network/external_v6.yaml2
-rw-r--r--network/internal_api.yaml2
-rw-r--r--network/internal_api_v6.yaml2
-rw-r--r--network/management.yaml2
-rw-r--r--network/management_v6.yaml2
-rw-r--r--network/networks.yaml2
-rw-r--r--network/ports/ctlplane_vip.yaml2
-rw-r--r--network/ports/external.yaml2
-rw-r--r--network/ports/external_from_pool.yaml2
-rw-r--r--network/ports/external_from_pool_v6.yaml2
-rw-r--r--network/ports/external_v6.yaml2
-rw-r--r--network/ports/from_service.yaml2
-rw-r--r--network/ports/from_service_v6.yaml2
-rw-r--r--network/ports/internal_api.yaml2
-rw-r--r--network/ports/internal_api_from_pool.yaml2
-rw-r--r--network/ports/internal_api_from_pool_v6.yaml2
-rw-r--r--network/ports/internal_api_v6.yaml2
-rw-r--r--network/ports/management.yaml2
-rw-r--r--network/ports/management_from_pool.yaml2
-rw-r--r--network/ports/management_from_pool_v6.yaml2
-rw-r--r--network/ports/management_v6.yaml2
-rw-r--r--network/ports/net_ip_list_map.yaml19
-rw-r--r--network/ports/net_ip_map.yaml2
-rw-r--r--network/ports/net_vip_map_external.yaml2
-rw-r--r--network/ports/net_vip_map_external_v6.yaml2
-rw-r--r--network/ports/noop.yaml2
-rw-r--r--network/ports/storage.yaml2
-rw-r--r--network/ports/storage_from_pool.yaml2
-rw-r--r--network/ports/storage_from_pool_v6.yaml2
-rw-r--r--network/ports/storage_mgmt.yaml2
-rw-r--r--network/ports/storage_mgmt_from_pool.yaml2
-rw-r--r--network/ports/storage_mgmt_from_pool_v6.yaml2
-rw-r--r--network/ports/storage_mgmt_v6.yaml2
-rw-r--r--network/ports/storage_v6.yaml2
-rw-r--r--network/ports/tenant.yaml2
-rw-r--r--network/ports/tenant_from_pool.yaml2
-rw-r--r--network/ports/tenant_from_pool_v6.yaml2
-rw-r--r--network/ports/tenant_v6.yaml2
-rw-r--r--network/ports/vip.yaml2
-rw-r--r--network/ports/vip_v6.yaml2
-rwxr-xr-xnetwork/scripts/run-os-net-config.sh148
-rw-r--r--network/service_net_map.j2.yaml77
-rw-r--r--network/storage.yaml2
-rw-r--r--network/storage_mgmt.yaml2
-rw-r--r--network/storage_mgmt_v6.yaml2
-rw-r--r--network/storage_v6.yaml2
-rw-r--r--network/tenant.yaml2
-rw-r--r--network/tenant_v6.yaml2
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml55
-rw-r--r--overcloud.j2.yaml211
-rw-r--r--puppet/all-nodes-config.yaml249
-rw-r--r--puppet/blockstorage-role.yaml310
-rw-r--r--puppet/cephstorage-role.yaml309
-rw-r--r--puppet/compute-role.yaml322
-rw-r--r--puppet/config.role.j2.yaml17
-rw-r--r--puppet/controller-config-pacemaker.yaml41
-rw-r--r--puppet/controller-role.yaml345
-rw-r--r--puppet/deploy-artifacts.sh2
-rw-r--r--puppet/deploy-artifacts.yaml2
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml2
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/multiple.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/default.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/per_node.yaml2
-rw-r--r--puppet/extraconfig/tls/ca-inject.yaml2
-rw-r--r--puppet/extraconfig/tls/freeipa-enroll.yaml83
-rw-r--r--puppet/extraconfig/tls/tls-cert-inject.yaml2
-rw-r--r--puppet/major_upgrade_steps.j2.yaml147
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp21
-rw-r--r--puppet/manifests/overcloud_role.pp4
-rw-r--r--puppet/objectstorage-role.yaml311
-rw-r--r--puppet/post.j2.yaml93
-rw-r--r--puppet/role.role.j2.yaml327
-rw-r--r--puppet/services/README.rst66
-rw-r--r--puppet/services/aodh-api.yaml34
-rw-r--r--puppet/services/aodh-base.yaml4
-rw-r--r--puppet/services/aodh-evaluator.yaml9
-rw-r--r--puppet/services/aodh-listener.yaml9
-rw-r--r--puppet/services/aodh-notifier.yaml9
-rw-r--r--puppet/services/apache-internal-tls-certmonger.yaml45
-rw-r--r--puppet/services/apache.yaml8
-rw-r--r--puppet/services/auditd.yaml34
-rw-r--r--puppet/services/barbican-api.yaml152
-rw-r--r--puppet/services/ca-certs.yaml2
-rw-r--r--puppet/services/ceilometer-agent-central.yaml9
-rw-r--r--puppet/services/ceilometer-agent-compute.yaml19
-rw-r--r--puppet/services/ceilometer-agent-notification.yaml9
-rw-r--r--puppet/services/ceilometer-api.yaml27
-rw-r--r--puppet/services/ceilometer-base.yaml27
-rw-r--r--puppet/services/ceilometer-collector.yaml9
-rw-r--r--puppet/services/ceilometer-expirer.yaml6
-rw-r--r--puppet/services/ceph-base.yaml55
-rw-r--r--puppet/services/ceph-client.yaml2
-rw-r--r--puppet/services/ceph-external.yaml75
-rw-r--r--puppet/services/ceph-mds.yaml (renamed from puppet/services/pacemaker/ceilometer-api.yaml)30
-rw-r--r--puppet/services/ceph-mon.yaml34
-rw-r--r--puppet/services/ceph-osd.yaml46
-rw-r--r--puppet/services/ceph-rgw.yaml35
-rw-r--r--puppet/services/cinder-api.yaml41
-rw-r--r--puppet/services/cinder-backup.yaml2
-rw-r--r--puppet/services/cinder-base.yaml53
-rw-r--r--puppet/services/cinder-hpelefthand-iscsi.yaml56
-rw-r--r--puppet/services/cinder-scheduler.yaml9
-rw-r--r--puppet/services/cinder-volume.yaml17
-rw-r--r--puppet/services/database/mongodb-base.yaml2
-rw-r--r--puppet/services/database/mongodb.yaml9
-rw-r--r--puppet/services/database/mysql-internal-tls-certmonger.yaml47
-rw-r--r--puppet/services/database/mysql.yaml105
-rw-r--r--puppet/services/database/redis-base.yaml6
-rw-r--r--puppet/services/database/redis.yaml2
-rw-r--r--puppet/services/disabled/glance-registry.yaml30
-rw-r--r--puppet/services/ec2-api.yaml118
-rw-r--r--puppet/services/etcd.yaml58
-rw-r--r--puppet/services/glance-api.yaml59
-rw-r--r--puppet/services/glance-base.yaml22
-rw-r--r--puppet/services/glance-registry.yaml100
-rw-r--r--puppet/services/gnocchi-api.yaml30
-rw-r--r--puppet/services/gnocchi-base.yaml6
-rw-r--r--puppet/services/gnocchi-metricd.yaml11
-rw-r--r--puppet/services/gnocchi-statsd.yaml13
-rw-r--r--puppet/services/haproxy-internal-tls-certmonger.yaml37
-rw-r--r--puppet/services/haproxy-public-tls-certmonger.yaml6
-rw-r--r--puppet/services/haproxy.yaml30
-rw-r--r--puppet/services/heat-api-cfn.yaml23
-rw-r--r--puppet/services/heat-api-cloudwatch.yaml9
-rw-r--r--puppet/services/heat-api.yaml23
-rw-r--r--puppet/services/heat-base.yaml80
-rw-r--r--puppet/services/heat-engine.yaml45
-rw-r--r--puppet/services/horizon.yaml84
-rw-r--r--puppet/services/ironic-api.yaml6
-rw-r--r--puppet/services/ironic-base.yaml4
-rw-r--r--puppet/services/ironic-conductor.yaml11
-rw-r--r--puppet/services/keepalived.yaml48
-rw-r--r--puppet/services/kernel.yaml8
-rw-r--r--puppet/services/keystone.yaml106
-rw-r--r--puppet/services/logging/fluentd-base.yaml2
-rw-r--r--puppet/services/logging/fluentd-client.yaml2
-rw-r--r--puppet/services/logging/fluentd-config.yaml2
-rw-r--r--puppet/services/manila-api.yaml38
-rw-r--r--puppet/services/manila-backend-cephfs.yaml20
-rw-r--r--puppet/services/manila-backend-generic.yaml2
-rw-r--r--puppet/services/manila-backend-netapp.yaml2
-rw-r--r--puppet/services/manila-base.yaml26
-rw-r--r--puppet/services/manila-scheduler.yaml11
-rw-r--r--puppet/services/manila-share.yaml12
-rw-r--r--puppet/services/memcached.yaml2
-rw-r--r--puppet/services/mistral-api.yaml2
-rw-r--r--puppet/services/mistral-base.yaml4
-rw-r--r--puppet/services/mistral-engine.yaml2
-rw-r--r--puppet/services/mistral-executor.yaml2
-rw-r--r--puppet/services/monitoring/sensu-base.yaml19
-rw-r--r--puppet/services/monitoring/sensu-client.yaml2
-rw-r--r--puppet/services/network/contrail-analytics.yaml2
-rw-r--r--puppet/services/network/contrail-base.yaml2
-rw-r--r--puppet/services/network/contrail-config.yaml2
-rw-r--r--puppet/services/network/contrail-control.yaml2
-rw-r--r--puppet/services/network/contrail-database.yaml2
-rw-r--r--puppet/services/network/contrail-webui.yaml2
-rw-r--r--puppet/services/neutron-api.yaml83
-rw-r--r--puppet/services/neutron-base.yaml17
-rw-r--r--puppet/services/neutron-compute-plugin-midonet.yaml2
-rw-r--r--puppet/services/neutron-compute-plugin-nuage.yaml2
-rw-r--r--puppet/services/neutron-compute-plugin-opencontrail.yaml2
-rw-r--r--puppet/services/neutron-compute-plugin-ovn.yaml2
-rw-r--r--puppet/services/neutron-compute-plugin-plumgrid.yaml2
-rw-r--r--puppet/services/neutron-dhcp.yaml14
-rw-r--r--puppet/services/neutron-l3-compute-dvr.yaml2
-rw-r--r--puppet/services/neutron-l3.yaml28
-rw-r--r--puppet/services/neutron-metadata.yaml11
-rw-r--r--puppet/services/neutron-midonet.yaml2
-rw-r--r--puppet/services/neutron-ovs-agent.yaml36
-rw-r--r--puppet/services/neutron-ovs-dpdk-agent.yaml10
-rw-r--r--puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml73
-rw-r--r--puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml78
-rw-r--r--puppet/services/neutron-plugin-ml2-ovn.yaml13
-rw-r--r--puppet/services/neutron-plugin-ml2.yaml53
-rw-r--r--puppet/services/neutron-plugin-nuage.yaml2
-rw-r--r--puppet/services/neutron-plugin-opencontrail.yaml2
-rw-r--r--puppet/services/neutron-plugin-plumgrid.yaml4
-rw-r--r--puppet/services/neutron-sriov-agent.yaml7
-rw-r--r--puppet/services/nova-api.yaml74
-rw-r--r--puppet/services/nova-base.yaml183
-rw-r--r--puppet/services/nova-compute.yaml12
-rw-r--r--puppet/services/nova-conductor.yaml2
-rw-r--r--puppet/services/nova-consoleauth.yaml2
-rw-r--r--puppet/services/nova-ironic.yaml2
-rw-r--r--puppet/services/nova-libvirt.yaml17
-rw-r--r--puppet/services/nova-metadata.yaml2
-rw-r--r--puppet/services/nova-placement.yaml120
-rw-r--r--puppet/services/nova-scheduler.yaml4
-rw-r--r--puppet/services/nova-vnc-proxy.yaml7
-rw-r--r--puppet/services/octavia-api.yaml97
-rw-r--r--puppet/services/octavia-base.yaml62
-rw-r--r--puppet/services/opendaylight-api.yaml15
-rw-r--r--puppet/services/opendaylight-ovs.yaml19
-rw-r--r--puppet/services/ovn-dbs.yaml40
-rw-r--r--puppet/services/pacemaker.yaml8
-rw-r--r--puppet/services/pacemaker/ceilometer-agent-central.yaml45
-rw-r--r--puppet/services/pacemaker/ceilometer-agent-notification.yaml45
-rw-r--r--puppet/services/pacemaker/ceilometer-collector.yaml45
-rw-r--r--puppet/services/pacemaker/ceph-rbdmirror.yaml (renamed from puppet/services/pacemaker/gnocchi-api.yaml)28
-rw-r--r--puppet/services/pacemaker/cinder-api.yaml45
-rw-r--r--puppet/services/pacemaker/cinder-backup.yaml2
-rw-r--r--puppet/services/pacemaker/cinder-scheduler.yaml45
-rw-r--r--puppet/services/pacemaker/cinder-volume.yaml2
-rw-r--r--puppet/services/pacemaker/database/mongodb.yaml42
-rw-r--r--puppet/services/pacemaker/database/mysql.yaml10
-rw-r--r--puppet/services/pacemaker/database/redis.yaml2
-rw-r--r--puppet/services/pacemaker/glance-api.yaml74
-rw-r--r--puppet/services/pacemaker/glance-registry.yaml47
-rw-r--r--puppet/services/pacemaker/gnocchi-metricd.yaml47
-rw-r--r--puppet/services/pacemaker/gnocchi-statsd.yaml46
-rw-r--r--puppet/services/pacemaker/haproxy.yaml6
-rw-r--r--puppet/services/pacemaker/heat-api-cfn.yaml44
-rw-r--r--puppet/services/pacemaker/heat-api-cloudwatch.yaml44
-rw-r--r--puppet/services/pacemaker/heat-api.yaml44
-rw-r--r--puppet/services/pacemaker/heat-engine.yaml45
-rw-r--r--puppet/services/pacemaker/horizon.yaml41
-rw-r--r--puppet/services/pacemaker/keystone.yaml45
-rw-r--r--puppet/services/pacemaker/manila-share.yaml2
-rw-r--r--puppet/services/pacemaker/memcached.yaml42
-rw-r--r--puppet/services/pacemaker/neutron-dhcp.yaml46
-rw-r--r--puppet/services/pacemaker/neutron-l3.yaml46
-rw-r--r--puppet/services/pacemaker/neutron-metadata.yaml44
-rw-r--r--puppet/services/pacemaker/neutron-midonet.yaml41
-rw-r--r--puppet/services/pacemaker/neutron-ovs-agent.yaml42
-rw-r--r--puppet/services/pacemaker/neutron-plugin-ml2.yaml42
-rw-r--r--puppet/services/pacemaker/neutron-plugin-nuage.yaml40
-rw-r--r--puppet/services/pacemaker/neutron-plugin-opencontrail.yaml40
-rw-r--r--puppet/services/pacemaker/neutron-plugin-plumgrid.yaml40
-rw-r--r--puppet/services/pacemaker/neutron-server.yaml48
-rw-r--r--puppet/services/pacemaker/nova-api.yaml45
-rw-r--r--puppet/services/pacemaker/nova-conductor.yaml45
-rw-r--r--puppet/services/pacemaker/nova-consoleauth.yaml45
-rw-r--r--puppet/services/pacemaker/nova-scheduler.yaml45
-rw-r--r--puppet/services/pacemaker/nova-vnc-proxy.yaml45
-rw-r--r--puppet/services/pacemaker/rabbitmq.yaml2
-rw-r--r--puppet/services/pacemaker/sahara-api.yaml45
-rw-r--r--puppet/services/pacemaker/sahara-engine.yaml45
-rw-r--r--puppet/services/pacemaker_remote.yaml57
-rw-r--r--puppet/services/panko-api.yaml86
-rw-r--r--puppet/services/panko-base.yaml75
-rw-r--r--puppet/services/rabbitmq.yaml19
-rw-r--r--puppet/services/sahara-api.yaml6
-rw-r--r--puppet/services/sahara-base.yaml16
-rw-r--r--puppet/services/sahara-engine.yaml9
-rw-r--r--puppet/services/services.yaml39
-rw-r--r--puppet/services/snmp.yaml6
-rw-r--r--puppet/services/sshd.yaml (renamed from puppet/services/pacemaker/core.yaml)17
-rw-r--r--puppet/services/swift-base.yaml2
-rw-r--r--puppet/services/swift-proxy.yaml73
-rw-r--r--puppet/services/swift-ringbuilder.yaml17
-rw-r--r--puppet/services/swift-storage.yaml34
-rw-r--r--puppet/services/time/ntp.yaml4
-rw-r--r--puppet/services/time/timezone.yaml2
-rw-r--r--puppet/services/tripleo-firewall.yaml2
-rw-r--r--puppet/services/tripleo-packages.yaml14
-rw-r--r--puppet/services/vip-hosts.yaml56
-rw-r--r--puppet/services/zaqar.yaml66
-rw-r--r--puppet/upgrade_config.yaml58
-rw-r--r--releasenotes/notes/6.0.0-b52a14a71fc62788.yaml95
-rw-r--r--releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml14
-rw-r--r--releasenotes/notes/deployed-servers-fd47f18204cea105.yaml8
-rw-r--r--releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml9
-rw-r--r--releasenotes/source/_static/.placeholder0
-rw-r--r--releasenotes/source/conf.py264
-rw-r--r--releasenotes/source/index.rst18
-rw-r--r--releasenotes/source/unreleased.rst5
-rw-r--r--requirements.txt7
-rw-r--r--roles_data.yaml54
-rw-r--r--roles_data_undercloud.yaml35
-rwxr-xr-xscripts/hosts-config.sh39
-rw-r--r--setup.py11
-rw-r--r--test-requirements.txt10
-rwxr-xr-xtools/process-templates.py168
-rwxr-xr-xtools/releasenotes_tox.sh28
-rwxr-xr-xtools/tox_install.sh30
-rwxr-xr-xtools/yaml-nic-config-2-script.py219
-rwxr-xr-xtools/yaml-validate.py111
-rw-r--r--tox.ini11
473 files changed, 14545 insertions, 6572 deletions
diff --git a/.gitignore b/.gitignore
index 3d7aded8..cea6064d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -44,3 +44,21 @@ doc/_build
# Built by pbr (python setup.py sdist):
AUTHORS
ChangeLog
+
+extraconfig/all_nodes/mac_hostname.yaml
+extraconfig/all_nodes/random_string.yaml
+extraconfig/all_nodes/swap-partition.yaml
+extraconfig/all_nodes/swap.yaml
+extraconfig/tasks/major_upgrade_pacemaker_init.yaml
+network/service_net_map.yaml
+overcloud-resource-registry-puppet.yaml
+overcloud.yaml
+puppet/blockstorage-config.yaml
+puppet/cephstorage-config.yaml
+puppet/compute-config.yaml
+puppet/controller-config.yaml
+puppet/objectstorage-config.yaml
+puppet/post.yaml
+
+# Files created by releasenotes build
+releasenotes/build
diff --git a/Gemfile b/Gemfile
deleted file mode 100644
index 302ef415..00000000
--- a/Gemfile
+++ /dev/null
@@ -1,24 +0,0 @@
-source 'https://rubygems.org'
-
-group :development, :test do
- gem 'puppetlabs_spec_helper', :require => false
-
- gem 'puppet-lint', '~> 1.1'
- gem 'puppet-lint-absolute_classname-check'
- gem 'puppet-lint-absolute_template_path'
- gem 'puppet-lint-trailing_newline-check'
-
- # Puppet 4.x related lint checks
- gem 'puppet-lint-unquoted_string-check'
- gem 'puppet-lint-leading_zero-check'
- gem 'puppet-lint-variable_contains_upcase'
- gem 'puppet-lint-numericvariable'
-end
-
-if puppetversion = ENV['PUPPET_GEM_VERSION']
- gem 'puppet', puppetversion, :require => false
-else
- gem 'puppet', :require => false
-end
-
-# vim:ft=ruby
diff --git a/README.rst b/README.rst
index 36f9fba0..a9d8b382 100644
--- a/README.rst
+++ b/README.rst
@@ -1,3 +1,12 @@
+========================
+Team and repository tags
+========================
+
+.. image:: http://governance.openstack.org/badges/tripleo-heat-templates.svg
+ :target: http://governance.openstack.org/reference/tags/index.html
+
+.. Change things from this point on
+
======================
tripleo-heat-templates
======================
@@ -44,3 +53,70 @@ A description of the directory layout in TripleO Heat Templates.
* validation-scripts: validation scripts useful to all deployment
configurations
+
+
+Service testing matrix
+----------------------
+
+The configuration for the CI scenarios will be defined in `tripleo-heat-templates/ci/`
+and should be executed according to the following table:
+
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| - | scenario001 | scenario002 | scenario003 | scenario004 | multinode-nonha |
++================+=============+=============+=============+=============+=================+
+| keystone | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| glance | file | swift | file | file | swift |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| cinder | rbd | iscsi | | | iscsi |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| heat | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| mysql | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| neutron | ovs | ovs | ovs | ovs | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| rabbitmq | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| mongodb | X | X | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| redis | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| haproxy | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| keepalived | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| memcached | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| pacemaker | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| nova | qemu | qemu | qemu | qemu | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| ntp | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| snmp | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| timezone | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| sahara | | | X | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| mistral | | | X | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| swift | | X | | | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| aodh | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| ceilometer | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| gnocchi | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| panko | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| barbican | | X | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| zaqar | | X | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| ec2api | | X | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| cephrgw | | X | | X | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
diff --git a/Rakefile b/Rakefile
deleted file mode 100644
index bca6a6c2..00000000
--- a/Rakefile
+++ /dev/null
@@ -1,6 +0,0 @@
-require 'puppetlabs_spec_helper/rake_tasks'
-require 'puppet-lint/tasks/puppet-lint'
-
-PuppetLint.configuration.fail_on_warnings = true
-PuppetLint.configuration.send('disable_80chars')
-PuppetLint.configuration.send('disable_autoloader_layout')
diff --git a/all-nodes-validation.yaml b/all-nodes-validation.yaml
index a7383375..65d01d0f 100644
--- a/all-nodes-validation.yaml
+++ b/all-nodes-validation.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Software Config to drive validations that occur on all nodes.
diff --git a/bootstrap-config.yaml b/bootstrap-config.yaml
index c87670e3..a3fdee96 100644
--- a/bootstrap-config.yaml
+++ b/bootstrap-config.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: 'Bootstrap Config'
parameters:
diff --git a/capabilities-map.yaml b/capabilities-map.yaml
index ae747621..08c455f9 100644
--- a/capabilities-map.yaml
+++ b/capabilities-map.yaml
@@ -160,6 +160,16 @@ topics:
description: Deploy Mistral service
requires:
- overcloud-resource-registry-puppet.yaml
+ - title: Ceilometer Api
+ description:
+ environments:
+ - file: environments/services/disable-ceilometer-api.yaml
+ title: Ceilometer Api
+ description: Disable Ceilometer Api service. This service is
+ deprecated and will be removed in future releases. Please move
+ to using gnocchi/aodh/panko apis instead.
+ requires:
+ - overcloud-resource-registry-puppet.yaml
# - title: Network Interface Configuration
# description:
@@ -355,6 +365,16 @@ topics:
description: Enables PLUMgrid extensions
requires:
- overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-ml2-fujitsu-cfab.yaml
+ title: Fujitsu Neutron plugin for C-Fabric
+ description: Enable C-Fabric in the overcloud
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-ml2-fujitsu-fossw.yaml
+ title: Fujitsu Neutron plugin for FOS
+ description: Enable FOS in the overcloud
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- title: Nova Extensions
description:
@@ -399,12 +419,25 @@ topics:
via puppet
requires:
- overcloud-resource-registry-puppet.yaml
+ - file: environments/cinder-hpelefthand-config.yaml
+ title: Cinder HPELeftHandISCSI backend
+ description: >
+ Enables a Cinder HPELeftHandISCSI backend, configured
+ via puppet
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- file: environments/cinder-eqlx-config.yaml
title: Cinder EQLX backend
description: >
Enables a Cinder EQLX backend, configured via puppet
requires:
- overcloud-resource-registry-puppet.yaml
+ - file: environments/cinder-iser.yaml
+ title: Cinder iSER backend
+ description: >
+ Enable a Cinder iSER RDMA backend, configured via puppet
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- title: Externally managed Ceph
description: >
Enable the use of an externally managed Ceph cluster
@@ -504,3 +537,31 @@ topics:
description:
requires:
- overcloud-resource-registry-puppet.yaml
+
+ - title: Security Options
+ description: Security Hardening Options
+ environment_groups:
+ - title: SSH Banner Text
+ description: Enables population of SSH Banner Text
+ environments:
+ - file: environments/sshd-banner.yaml
+ title: SSH Banner Text
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Horizon Password Validation
+ description: Enable Horizon Password validation
+ environments:
+ - file: environments/horizon_password_validation.yaml
+ title: Horizon Password Validation
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: AuditD Rules
+ description: Management of AuditD rules
+ environments:
+ - file: environments/auditd.yaml
+ title: AuditD Rule Management
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
diff --git a/ci/README.rst b/ci/README.rst
new file mode 100644
index 00000000..44e8626d
--- /dev/null
+++ b/ci/README.rst
@@ -0,0 +1,11 @@
+=======================
+TripleO CI environments
+=======================
+
+TripleO CI environments are exclusively used for Continuous Integration
+purpose or for development usage.
+They should not be used in production and we don't guarantee they work outside
+TripleO CI.
+
+For more informations about TripleO CI, please look:
+https://github.com/openstack-infra/tripleo-ci
diff --git a/ci/common/net-config-multinode-os-net-config.yaml b/ci/common/net-config-multinode-os-net-config.yaml
new file mode 100644
index 00000000..227c5da2
--- /dev/null
+++ b/ci/common/net-config-multinode-os-net-config.yaml
@@ -0,0 +1,114 @@
+heat_template_version: 2016-10-14
+
+description: >
+ Software Config to drive os-net-config for a simple bridge configured
+ with a static IP address for the ctlplane network.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet:
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ OvSBridgeMtu:
+ default: 1300
+ description: The mtu of the OvS bridge
+ type: number
+
+resources:
+
+ OsNetConfigImpl:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - |
+ #!/bin/bash
+ function network_config_hook {
+ primary_private_ip=$(cat /etc/nodepool/primary_node_private)
+ sed -i "s/primary_private_ip/$primary_private_ip/" /etc/os-net-config/config.json
+ subnode_private_ip=$(cat /etc/nodepool/node_private)
+ sed -i "s/subnode_private_ip/$subnode_private_ip/" /etc/os-net-config/config.json
+ # We start with an arbitrarily high vni key so that we don't
+ # overlap with Neutron created values. These will also match the
+ # values that we've been using previously from the devstack-gate
+ # code.
+ vni=1000002
+ subnode_index=$(grep -n $(cat /etc/nodepool/node_private) /etc/nodepool/sub_nodes_private | cut -d: -f1)
+ let vni+=$subnode_index
+ sed -i "s/vni/$vni/" /etc/os-net-config/config.json
+ export interface_name="br-ex_$primary_private_ip"
+ # Until we are fully migrated to os-net-config we need to clean
+ # up the old bridge first created by devstack-gate
+ ovs-vsctl del-br br-ex
+ }
+
+ -
+ str_replace:
+ template:
+ get_file: ../../network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: bridge_name
+ mtu:
+ get_param: OvSBridgeMtu
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ list_join:
+ - "/"
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ members:
+ - type: ovs_tunnel
+ name: interface_name
+ tunnel_type: vxlan
+ ovs_options:
+ - list_join:
+ - "="
+ - - key
+ - vni
+ - list_join:
+ - "="
+ - - remote_ip
+ - primary_private_ip
+ - list_join:
+ - "="
+ - - local_ip
+ - subnode_private_ip
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/ci/common/net-config-multinode.yaml b/ci/common/net-config-multinode.yaml
new file mode 100644
index 00000000..bf947d3e
--- /dev/null
+++ b/ci/common/net-config-multinode.yaml
@@ -0,0 +1,64 @@
+heat_template_version: ocata
+
+description: >
+ Software Config to drive os-net-config for a simple bridge configured
+ with a static IP address for the ctlplane network.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet:
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ str_replace:
+ template: |
+ #!/bin/bash
+ ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name
+ params:
+ CONTROLPLANEIP: {get_param: ControlPlaneIp}
+ CONTROLPLANESUBNETCIDR: {get_param: ControlPlaneSubnetCidr}
+ inputs:
+ -
+ name: bridge_name
+ default: br-ex
+ description: bridge-name
+ type: String
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/ci/environments/multinode-3nodes.yaml b/ci/environments/multinode-3nodes.yaml
new file mode 100644
index 00000000..f35a0804
--- /dev/null
+++ b/ci/environments/multinode-3nodes.yaml
@@ -0,0 +1,77 @@
+# Specifies which roles (groups of nodes) will be deployed
+# Note this is used as an input to the various *.j2.yaml
+# jinja2 templates, so that they are converted into *.yaml
+# during the plan creation (via a mistral action/workflow).
+#
+# The format is a list, with the following format:
+#
+# * name: (string) mandatory, name of the role, must be unique
+#
+# CountDefault: (number) optional, default number of nodes, defaults to 0
+# sets the default for the {{role.name}}Count parameter in overcloud.yaml
+#
+# HostnameFormatDefault: (string) optional default format string for hostname
+# defaults to '%stackname%-{{role.name.lower()}}-%index%'
+# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
+#
+# ServicesDefault: (list) optional default list of services to be deployed
+# on the role, defaults to an empty list. Sets the default for the
+# {{role.name}}Services parameter in overcloud.yaml
+
+- name: ControllerApi
+ CountDefault: 1
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronApi
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaConsoleauth
+ - OS::TripleO::Services::NovaVncProxy
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+
+- name: Controller
+ CountDefault: 1
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CinderBackup
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
diff --git a/ci/environments/multinode.yaml b/ci/environments/multinode.yaml
new file mode 100644
index 00000000..11243c8a
--- /dev/null
+++ b/ci/environments/multinode.yaml
@@ -0,0 +1,47 @@
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ # Required for Centos 7.3 and Qemu 2.6.0
+ nova::compute::libvirt::libvirt_cpu_mode: 'none'
+ SwiftCeilometerPipelineEnabled: False
diff --git a/ci/environments/multinode_major_upgrade.yaml b/ci/environments/multinode_major_upgrade.yaml
new file mode 100644
index 00000000..520a0c65
--- /dev/null
+++ b/ci/environments/multinode_major_upgrade.yaml
@@ -0,0 +1,47 @@
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ OS::TripleO::Services::Core: multinode-core.yaml
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::GlanceRegistry
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::SaharaApi
+ - OS::TripleO::Services::SaharaEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ # Required for Centos 7.3 and Qemu 2.6.0
+ nova::compute::libvirt::libvirt_cpu_mode: 'none'
+ heat::rpc_response_timeout: 600
+ SwiftCeilometerPipelineEnabled: False
diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml
new file mode 100644
index 00000000..f75c0fd3
--- /dev/null
+++ b/ci/environments/scenario001-multinode.yaml
@@ -0,0 +1,93 @@
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml
+ OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml
+ OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml
+ OS::TripleO::Services::PankoApi: /usr/share/openstack-tripleo-heat-templates/puppet/services/panko-api.yaml
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::MongoDb
+ - OS::TripleO::Services::Redis
+ - OS::TripleO::Services::AodhApi
+ - OS::TripleO::Services::AodhEvaluator
+ - OS::TripleO::Services::AodhNotifier
+ - OS::TripleO::Services::AodhListener
+ - OS::TripleO::Services::CeilometerApi
+ - OS::TripleO::Services::CeilometerCollector
+ - OS::TripleO::Services::CeilometerExpirer
+ - OS::TripleO::Services::CeilometerAgentCentral
+ - OS::TripleO::Services::CeilometerAgentNotification
+ - OS::TripleO::Services::GnocchiApi
+ - OS::TripleO::Services::GnocchiMetricd
+ - OS::TripleO::Services::GnocchiStatsd
+ - OS::TripleO::Services::PankoApi
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::CephClient
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderBackup
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ Debug: true
+ #NOTE(gfidente): not great but we need this to deploy on ext4
+ #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+ ExtraConfig:
+ ceph::profile::params::osd_max_object_name_len: 256
+ ceph::profile::params::osd_max_object_namespace_len: 64
+ #NOTE: These ID's and keys should be regenerated for
+ # a production deployment. What is here is suitable for
+ # developer and CI testing only.
+ CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
+ CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
+ CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
+ CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+ NovaEnableRbdBackend: true
+ CinderEnableRbdBackend: true
+ CinderBackupBackend: ceph
+ GlanceBackend: rbd
+ GnocchiBackend: rbd
+ CinderEnableIscsiBackend: false
+ BannerText: |
+ ******************************************************************
+ * This system is for the use of authorized users only. Usage of *
+ * this system may be monitored and recorded by system personnel. *
+ * Anyone using this system expressly consents to such monitoring *
+ * and is advised that if such monitoring reveals possible *
+ * evidence of criminal activity, system personnel may provide *
+ * the evidence from such monitoring to law enforcement officials.*
+ ******************************************************************
diff --git a/ci/environments/scenario002-multinode.yaml b/ci/environments/scenario002-multinode.yaml
new file mode 100644
index 00000000..e611c6f9
--- /dev/null
+++ b/ci/environments/scenario002-multinode.yaml
@@ -0,0 +1,54 @@
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml
+ OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml
+ OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderBackup
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::BarbicanApi
+ - OS::TripleO::Services::MongoDb
+ - OS::TripleO::Services::Zaqar
+ - OS::TripleO::Services::Ec2Api
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ Debug: true
+ SwiftCeilometerPipelineEnabled: false
diff --git a/ci/environments/scenario003-multinode.yaml b/ci/environments/scenario003-multinode.yaml
new file mode 100644
index 00000000..b26ee0a3
--- /dev/null
+++ b/ci/environments/scenario003-multinode.yaml
@@ -0,0 +1,53 @@
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Services::SaharaApi: ../../puppet/services/sahara-api.yaml
+ OS::TripleO::Services::SaharaEngine: ../../puppet/services/sahara-engine.yaml
+ OS::TripleO::Services::MistralApi: ../../puppet/services/mistral-api.yaml
+ OS::TripleO::Services::MistralEngine: ../../puppet/services/mistral-engine.yaml
+ OS::TripleO::Services::MistralExecutor: ../../puppet/services/mistral-executor.yaml
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::SaharaApi
+ - OS::TripleO::Services::SaharaEngine
+ - OS::TripleO::Services::MistralApi
+ - OS::TripleO::Services::MistralEngine
+ - OS::TripleO::Services::MistralExecutor
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ Debug: true
+ # we don't deploy Swift so we switch to file backend.
+ GlanceBackend: 'file'
+ KeystoneTokenProvider: 'fernet'
+ SwiftCeilometerPipelineEnabled: false
diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml
new file mode 100644
index 00000000..5b253a84
--- /dev/null
+++ b/ci/environments/scenario004-multinode.yaml
@@ -0,0 +1,63 @@
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml
+ OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml
+ OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml
+ OS::TripleO::Services::CephRgw: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-rgw.yaml
+ OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::SwiftStorage: OS::Heat::None
+ OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::CephClient
+ - OS::TripleO::Services::CephRgw
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ Debug: true
+ #NOTE(gfidente): not great but we need this to deploy on ext4
+ #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+ ExtraConfig:
+ ceph::profile::params::osd_max_object_name_len: 256
+ ceph::profile::params::osd_max_object_namespace_len: 64
+ #NOTE: These ID's and keys should be regenerated for
+ # a production deployment. What is here is suitable for
+ # developer and CI testing only.
+ CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
+ CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
+ CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
+ CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+ SwiftCeilometerPipelineEnabled: false
diff --git a/ci/pingtests/scenario001-multinode.yaml b/ci/pingtests/scenario001-multinode.yaml
new file mode 100644
index 00000000..2651c0d0
--- /dev/null
+++ b/ci/pingtests/scenario001-multinode.yaml
@@ -0,0 +1,186 @@
+heat_template_version: ocata
+
+description: >
+ HOT template to created resources deployed by scenario001.
+parameters:
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+ default: 'pingtest_key'
+ image:
+ type: string
+ description: Name of image to use for servers
+ default: 'pingtest_image'
+ public_net_name:
+ type: string
+ default: 'nova'
+ description: >
+ ID or name of public network for which floating IP addresses will be allocated
+ private_net_name:
+ type: string
+ description: Name of private network to be created
+ default: 'default-net'
+ private_net_cidr:
+ type: string
+ description: Private network address (CIDR notation)
+ default: '192.168.2.0/24'
+ private_net_gateway:
+ type: string
+ description: Private network gateway address
+ default: '192.168.2.1'
+ private_net_pool_start:
+ type: string
+ description: Start of private network IP address allocation pool
+ default: '192.168.2.100'
+ private_net_pool_end:
+ type: string
+ default: '192.168.2.200'
+ description: End of private network IP address allocation pool
+
+resources:
+
+ key_pair:
+ type: OS::Nova::KeyPair
+ properties:
+ save_private_key: true
+ name: {get_param: key_name }
+
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: private_net_name }
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: { get_param: private_net_cidr }
+ gateway_ip: { get_param: private_net_gateway }
+ allocation_pools:
+ - start: { get_param: private_net_pool_start }
+ end: { get_param: private_net_pool_end }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: public_net_name }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ volume1:
+ type: OS::Cinder::Volume
+ properties:
+ name: Volume1
+ image: { get_param: image }
+ size: 1
+
+ server1:
+ type: OS::Nova::Server
+ depends_on: volume1
+ properties:
+ name: Server1
+ block_device_mapping:
+ - device_name: vda
+ volume_id: { get_resource: volume1 }
+ flavor: { get_resource: test_flavor }
+ key_name: { get_resource: key_pair }
+ networks:
+ - port: { get_resource: server1_port }
+
+ server1_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+
+ server1_floating_ip:
+ type: OS::Neutron::FloatingIP
+ # TODO: investigate why we need this depends_on and if we could
+ # replace it by router_id with get_resource: router_interface
+ depends_on: router_interface
+ properties:
+ floating_network: { get_param: public_net_name }
+ port_id: { get_resource: server1_port }
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ description: Add security group rules for server
+ name: pingtest-security-group
+ rules:
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: icmp
+
+ test_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 512
+ vcpus: 1
+
+# Disabling this resource now
+# https://bugs.launchpad.net/tripleo/+bug/1646506
+# gnocchi_res_alarm:
+# type: OS::Aodh::GnocchiResourcesAlarm
+# properties:
+# description: Do stuff with gnocchi
+# metric: cpu_util
+# aggregation_method: mean
+# granularity: 60
+# evaluation_periods: 1
+# threshold: 50
+# alarm_actions: []
+# resource_type: instance
+# resource_id: { get_resource: server1 }
+# comparison_operator: gt
+
+ asg:
+ type: OS::Heat::AutoScalingGroup
+ properties:
+ max_size: 5
+ min_size: 1
+ resource:
+ type: OS::Heat::RandomString
+
+ scaleup_policy:
+ type: OS::Heat::ScalingPolicy
+ properties:
+ adjustment_type: change_in_capacity
+ auto_scaling_group_id: {get_resource: asg}
+ cooldown: 0
+ scaling_adjustment: 1
+
+ alarm:
+ type: OS::Aodh::Alarm
+ properties:
+ description: Scale-up if the average CPU > 50% for 1 minute
+ meter_name: test_meter
+ statistic: count
+ comparison_operator: ge
+ threshold: 1
+ period: 60
+ evaluation_periods: 1
+ alarm_actions:
+ - {get_attr: [scaleup_policy, alarm_url]}
+ matching_metadata:
+ metadata.metering.stack_id: {get_param: "OS::stack_id"}
+
+outputs:
+ server1_private_ip:
+ description: IP address of server1 in private network
+ value: { get_attr: [ server1, first_address ] }
+ server1_public_ip:
+ description: Floating IP address of server1 in public network
+ value: { get_attr: [ server1_floating_ip, floating_ip_address ] }
+ asg_size:
+ value: {get_attr: [asg, current_size]}
diff --git a/ci/pingtests/scenario002-multinode.yaml b/ci/pingtests/scenario002-multinode.yaml
new file mode 100644
index 00000000..da1ae60c
--- /dev/null
+++ b/ci/pingtests/scenario002-multinode.yaml
@@ -0,0 +1,158 @@
+heat_template_version: ocata
+
+description: >
+ HOT template to created resources deployed by scenario002.
+parameters:
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+ default: 'pingtest_key'
+ image:
+ type: string
+ description: Name of image to use for servers
+ default: 'pingtest_image'
+ public_net_name:
+ type: string
+ default: 'nova'
+ description: >
+ ID or name of public network for which floating IP addresses will be allocated
+ private_net_name:
+ type: string
+ description: Name of private network to be created
+ default: 'default-net'
+ private_net_cidr:
+ type: string
+ description: Private network address (CIDR notation)
+ default: '192.168.2.0/24'
+ private_net_gateway:
+ type: string
+ description: Private network gateway address
+ default: '192.168.2.1'
+ private_net_pool_start:
+ type: string
+ description: Start of private network IP address allocation pool
+ default: '192.168.2.100'
+ private_net_pool_end:
+ type: string
+ default: '192.168.2.200'
+ description: End of private network IP address allocation pool
+
+resources:
+
+ key_pair:
+ type: OS::Nova::KeyPair
+ properties:
+ save_private_key: true
+ name: {get_param: key_name }
+
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: private_net_name }
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: { get_param: private_net_cidr }
+ gateway_ip: { get_param: private_net_gateway }
+ allocation_pools:
+ - start: { get_param: private_net_pool_start }
+ end: { get_param: private_net_pool_end }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: public_net_name }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ luks_volume_type:
+ type: OS::Cinder::VolumeType
+ properties:
+ name: LUKS
+
+ encrypted_volume_type:
+ type: OS::Cinder::EncryptedVolumeType
+ properties:
+ volume_type: {get_resource: luks_volume_type}
+ provider: luks
+ cipher: aes-xts-plain64
+ control_location: front-end
+ key_size: 256
+
+ volume1:
+ type: OS::Cinder::Volume
+ depends_on: encrypted_volume_type
+ properties:
+ name: Volume1
+ image: { get_param: image }
+ size: 1
+ volume_type: {get_resource: luks_volume_type}
+
+ server1:
+ type: OS::Nova::Server
+ depends_on: volume1
+ properties:
+ name: Server1
+ block_device_mapping:
+ - device_name: vda
+ volume_id: { get_resource: volume1 }
+ flavor: { get_resource: test_flavor }
+ key_name: { get_resource: key_pair }
+ networks:
+ - port: { get_resource: server1_port }
+
+ server1_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+
+ server1_floating_ip:
+ type: OS::Neutron::FloatingIP
+ # TODO: investigate why we need this depends_on and if we could
+ # replace it by router_id with get_resource: router_interface
+ depends_on: router_interface
+ properties:
+ floating_network: { get_param: public_net_name }
+ port_id: { get_resource: server1_port }
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ description: Add security group rules for server
+ name: pingtest-security-group
+ rules:
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: icmp
+
+ test_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 512
+ vcpus: 1
+
+ zaqar_queue:
+ type: OS::Zaqar::Queue
+ properties:
+ name: pingtest-queue
+
+outputs:
+ server1_private_ip:
+ description: IP address of server1 in private network
+ value: { get_attr: [ server1, first_address ] }
+ server1_public_ip:
+ description: Floating IP address of server1 in public network
+ value: { get_attr: [ server1_floating_ip, floating_ip_address ] }
diff --git a/ci/pingtests/scenario003-multinode.yaml b/ci/pingtests/scenario003-multinode.yaml
new file mode 100644
index 00000000..c3ceadaf
--- /dev/null
+++ b/ci/pingtests/scenario003-multinode.yaml
@@ -0,0 +1,154 @@
+heat_template_version: ocata
+
+description: >
+ HOT template to created resources deployed by scenario003.
+parameters:
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+ default: 'pingtest_key'
+ image:
+ type: string
+ description: Name of image to use for servers
+ default: 'pingtest_image'
+ public_net_name:
+ type: string
+ default: 'nova'
+ description: >
+ ID or name of public network for which floating IP addresses will be allocated
+ private_net_name:
+ type: string
+ description: Name of private network to be created
+ default: 'default-net'
+ private_net_cidr:
+ type: string
+ description: Private network address (CIDR notation)
+ default: '192.168.2.0/24'
+ private_net_gateway:
+ type: string
+ description: Private network gateway address
+ default: '192.168.2.1'
+ private_net_pool_start:
+ type: string
+ description: Start of private network IP address allocation pool
+ default: '192.168.2.100'
+ private_net_pool_end:
+ type: string
+ default: '192.168.2.200'
+ description: End of private network IP address allocation pool
+
+resources:
+
+ key_pair:
+ type: OS::Nova::KeyPair
+ properties:
+ save_private_key: true
+ name: {get_param: key_name }
+
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: private_net_name }
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: { get_param: private_net_cidr }
+ gateway_ip: { get_param: private_net_gateway }
+ allocation_pools:
+ - start: { get_param: private_net_pool_start }
+ end: { get_param: private_net_pool_end }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: public_net_name }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ server1:
+ type: OS::Nova::Server
+ properties:
+ name: Server1
+ flavor: { get_resource: test_flavor }
+ image: { get_param: image }
+ key_name: { get_resource: key_pair }
+ networks:
+ - port: { get_resource: server1_port }
+
+ server1_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+
+ server1_floating_ip:
+ type: OS::Neutron::FloatingIP
+ # TODO: investigate why we need this depends_on and if we could
+ # replace it by router_id with get_resource: router_interface
+ depends_on: router_interface
+ properties:
+ floating_network: { get_param: public_net_name }
+ port_id: { get_resource: server1_port }
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ description: Add security group rules for server
+ name: pingtest-security-group
+ rules:
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: icmp
+
+ test_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 512
+ vcpus: 1
+
+ sahara-image:
+ type: OS::Sahara::ImageRegistry
+ properties:
+ image: { get_param: image }
+ username: cirros
+ tags:
+ - tripleo
+
+ mistral_workflow:
+ type: OS::Mistral::Workflow
+ properties:
+ type: direct
+ name: test_workflow
+ description: Just testing workflow resource.
+ input:
+ phrase: Hello!
+ output:
+ out: <% $.word %>
+ tasks:
+ - name: hello
+ action: std.echo output=<% $.phrase %>
+ publish:
+ word: <% $.hello %>
+
+outputs:
+ server1_private_ip:
+ description: IP address of server1 in private network
+ value: { get_attr: [ server1, first_address ] }
+ server1_public_ip:
+ description: Floating IP address of server1 in public network
+ value: { get_attr: [ server1_floating_ip, floating_ip_address ] }
+ exec:
+ description: Mistral output verifying execution
+ value: { get_attr: [mistral_workflow, executions]} \ No newline at end of file
diff --git a/ci/pingtests/scenario004-multinode.yaml b/ci/pingtests/scenario004-multinode.yaml
new file mode 100644
index 00000000..a188fd1c
--- /dev/null
+++ b/ci/pingtests/scenario004-multinode.yaml
@@ -0,0 +1,127 @@
+heat_template_version: ocata
+
+description: >
+ HOT template to created resources deployed by scenario004.
+parameters:
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+ default: 'pingtest_key'
+ image:
+ type: string
+ description: Name of image to use for servers
+ default: 'pingtest_image'
+ public_net_name:
+ type: string
+ default: 'nova'
+ description: >
+ ID or name of public network for which floating IP addresses will be allocated
+ private_net_name:
+ type: string
+ description: Name of private network to be created
+ default: 'default-net'
+ private_net_cidr:
+ type: string
+ description: Private network address (CIDR notation)
+ default: '192.168.2.0/24'
+ private_net_gateway:
+ type: string
+ description: Private network gateway address
+ default: '192.168.2.1'
+ private_net_pool_start:
+ type: string
+ description: Start of private network IP address allocation pool
+ default: '192.168.2.100'
+ private_net_pool_end:
+ type: string
+ default: '192.168.2.200'
+ description: End of private network IP address allocation pool
+
+resources:
+
+ key_pair:
+ type: OS::Nova::KeyPair
+ properties:
+ save_private_key: true
+ name: {get_param: key_name }
+
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: private_net_name }
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: { get_param: private_net_cidr }
+ gateway_ip: { get_param: private_net_gateway }
+ allocation_pools:
+ - start: { get_param: private_net_pool_start }
+ end: { get_param: private_net_pool_end }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: public_net_name }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ server1:
+ type: OS::Nova::Server
+ properties:
+ name: Server1
+ flavor: { get_resource: test_flavor }
+ image: { get_param: image }
+ key_name: { get_resource: key_pair }
+ networks:
+ - port: { get_resource: server1_port }
+
+ server1_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+
+ server1_floating_ip:
+ type: OS::Neutron::FloatingIP
+ # TODO: investigate why we need this depends_on and if we could
+ # replace it by router_id with get_resource: router_interface
+ depends_on: router_interface
+ properties:
+ floating_network: { get_param: public_net_name }
+ port_id: { get_resource: server1_port }
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ description: Add security group rules for server
+ name: pingtest-security-group
+ rules:
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: icmp
+
+ test_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 512
+ vcpus: 1
+
+outputs:
+ server1_private_ip:
+ description: IP address of server1 in private network
+ value: { get_attr: [ server1, first_address ] }
+ server1_public_ip:
+ description: Floating IP address of server1 in public network
+ value: { get_attr: [ server1_floating_ip, floating_ip_address ] }
diff --git a/ci/pingtests/tenantvm_floatingip.yaml b/ci/pingtests/tenantvm_floatingip.yaml
new file mode 100644
index 00000000..0f31bc16
--- /dev/null
+++ b/ci/pingtests/tenantvm_floatingip.yaml
@@ -0,0 +1,142 @@
+heat_template_version: 2013-05-23
+
+description: >
+ This template resides in tripleo-ci for Mitaka CI jobs only.
+ For Newton and beyond, please look in THT.
+ HOT template to create a new neutron network plus a router to the public
+ network, and for deploying a server into the new network. The template also
+ assigns a floating IP address and sets security group rules. ADAPTED FROM
+ https://raw.githubusercontent.com/openstack/heat-templates/master/hot/servers_in_new_neutron_net.yaml
+parameters:
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+ default: 'pingtest_key'
+ image:
+ type: string
+ description: Name of image to use for servers
+ default: 'pingtest_image'
+ public_net_name:
+ type: string
+ default: 'nova'
+ description: >
+ ID or name of public network for which floating IP addresses will be allocated
+ private_net_name:
+ type: string
+ description: Name of private network to be created
+ default: 'default-net'
+ private_net_cidr:
+ type: string
+ description: Private network address (CIDR notation)
+ default: '192.168.2.0/24'
+ private_net_gateway:
+ type: string
+ description: Private network gateway address
+ default: '192.168.2.1'
+ private_net_pool_start:
+ type: string
+ description: Start of private network IP address allocation pool
+ default: '192.168.2.100'
+ private_net_pool_end:
+ type: string
+ default: '192.168.2.200'
+ description: End of private network IP address allocation pool
+
+resources:
+
+ key_pair:
+ type: OS::Nova::KeyPair
+ properties:
+ save_private_key: true
+ name: {get_param: key_name }
+
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: private_net_name }
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: { get_param: private_net_cidr }
+ gateway_ip: { get_param: private_net_gateway }
+ allocation_pools:
+ - start: { get_param: private_net_pool_start }
+ end: { get_param: private_net_pool_end }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: public_net_name }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ volume1:
+ type: OS::Cinder::Volume
+ properties:
+ name: Volume1
+ image: { get_param: image }
+ size: 1
+
+ server1:
+ type: OS::Nova::Server
+ depends_on: volume1
+ properties:
+ name: Server1
+ block_device_mapping:
+ - device_name: vda
+ volume_id: { get_resource: volume1 }
+ flavor: { get_resource: test_flavor }
+ key_name: { get_resource: key_pair }
+ networks:
+ - port: { get_resource: server1_port }
+
+ server1_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+
+ server1_floating_ip:
+ type: OS::Neutron::FloatingIP
+ # TODO: investigate why we need this depends_on and if we could
+ # replace it by router_id with get_resource: router_interface
+ depends_on: router_interface
+ properties:
+ floating_network: { get_param: public_net_name }
+ port_id: { get_resource: server1_port }
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ description: Add security group rules for server
+ name: pingtest-security-group
+ rules:
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: icmp
+
+ test_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 512
+ vcpus: 1
+
+outputs:
+ server1_private_ip:
+ description: IP address of server1 in private network
+ value: { get_attr: [ server1, first_address ] }
+ server1_public_ip:
+ description: Floating IP address of server1 in public network
+ value: { get_attr: [ server1_floating_ip, floating_ip_address ] }
diff --git a/ci/scripts/freeipa_setup.sh b/ci/scripts/freeipa_setup.sh
new file mode 100644
index 00000000..a36493a1
--- /dev/null
+++ b/ci/scripts/freeipa_setup.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+#
+# Used environment variables:
+#
+# - Hostname
+# - FreeIPAIP
+# - DirectoryManagerPassword
+# - AdminPassword
+# - UndercloudFQDN
+# - HostsSecret
+# - ProvisioningCIDR: If set, it adds the given CIDR to the provisioning
+# interface (which is hardcoded to eth1)
+#
+set -eux
+
+if [ -f "~/freeipa-setup.env" ]; then
+ source ~/freeipa-setup.env
+elif [ -f "/tmp/freeipa-setup.env" ]; then
+ source /tmp/freeipa-setup.env
+fi
+
+if [ -n "$ProvisioningCIDR" ]; then
+ # Add address to provisioning network interface
+ ip link set dev eth1 up
+ ip addr add $ProvisioningCIDR dev eth1
+fi
+
+# Set DNS servers
+echo "nameserver 8.8.8.8" >> /etc/resolv.conf
+echo "nameserver 8.8.4.4" >> /etc/resolv.conf
+
+yum -q -y remove openstack-dashboard
+
+# Install the needed packages
+yum -q install -y ipa-server ipa-server-dns epel-release rng-tools mod_nss git
+yum -q install -y haveged
+
+# Prepare hostname
+hostnamectl set-hostname --static $Hostname
+
+echo $FreeIPAIP `hostname` | tee -a /etc/hosts
+
+# Set iptables rules
+cat << EOF > freeipa-iptables-rules.txt
+# Firewall configuration written by system-config-firewall
+# Manual customization of this file is not recommended.
+*filter
+:INPUT ACCEPT [0:0]
+:FORWARD ACCEPT [0:0]
+:OUTPUT ACCEPT [0:0]
+-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
+-A INPUT -p icmp -j ACCEPT
+-A INPUT -i lo -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
+#TCP ports for FreeIPA
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 80 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 443 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 389 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 636 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 88 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 464 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 53 -j ACCEPT
+#UDP ports for FreeIPA
+-A INPUT -m state --state NEW -m udp -p udp --dport 88 -j ACCEPT
+-A INPUT -m state --state NEW -m udp -p udp --dport 464 -j ACCEPT
+-A INPUT -m state --state NEW -m udp -p udp --dport 123 -j ACCEPT
+-A INPUT -m state --state NEW -m udp -p udp --dport 53 -j ACCEPT
+-A INPUT -j REJECT --reject-with icmp-host-prohibited
+-A FORWARD -j REJECT --reject-with icmp-host-prohibited
+COMMIT
+EOF
+
+iptables-restore < freeipa-iptables-rules.txt
+
+# Entropy generation; otherwise, ipa-server-install will lag.
+chkconfig haveged on
+systemctl start haveged
+
+# Remove conflicting httpd configuration
+rm -f /etc/httpd/conf.d/ssl.conf
+
+# Set up FreeIPA
+ipa-server-install -U -r `hostname -d|tr "[a-z]" "[A-Z]"` \
+ -p $DirectoryManagerPassword -a $AdminPassword \
+ --hostname `hostname -f`
+
+# Authenticate
+echo $AdminPassword | kinit admin
+
+# Verify we have TGT
+klist
+
+if [ "$?" = '1' ]; then
+ exit 1
+fi
+
+# Create undercloud host
+ipa host-add $UndercloudFQDN --password=$HostsSecret --force
+
+# Create overcloud nodes and services
+git clone https://github.com/JAORMX/freeipa-tripleo-incubator.git
+cd freeipa-tripleo-incubator
+python create_ipa_tripleo_host_setup.py -w $HostsSecret -d $(hostname -d) \
+ --controller-count 1 --compute-count 1
diff --git a/default_passwords.yaml b/default_passwords.yaml
index 7a47f443..c85881e5 100644
--- a/default_passwords.yaml
+++ b/default_passwords.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: Passwords we manage at the top level
diff --git a/deployed-server/README.rst b/deployed-server/README.rst
index ce74e77b..e4d8299b 100644
--- a/deployed-server/README.rst
+++ b/deployed-server/README.rst
@@ -119,10 +119,15 @@ from the deployment command, the script should be ready to run:
[NovaCompute]: CREATE_IN_PROGRESS state changed
The user running the script must be able to ssh as root to each server. Define
-the hostnames of the deployed servers you intend to use for each role type::
-
- export controller_hosts="controller0 controller1 controller2"
- export compute_hosts="compute0"
+the names of your custom roles (if applicable) and hostnames of the deployed
+servers you intend to use for each role type. For each role name, a
+corresponding <role-name>_hosts variable should also be defined, e.g.::
+
+ export ROLES="Controller NewtorkNode StorageNode Compute"
+ export Controller_hosts="10.0.0.1 10.0.0.2 10.0.0.3"
+ export NetworkNode_hosts="10.0.0.4 10.0.0.5 10.0.0.6"
+ export StorageNode_hosts="10.0.0.7 10.0.08"
+ export Compute_hosts="10.0.0.9 10.0.0.10 10.0.0.11"
Then run the script on the undercloud with a stackrc file sourced, and
the script will copy the needed os-collect-config.conf configuration to each
diff --git a/deployed-server/ctlplane-port.yaml b/deployed-server/ctlplane-port.yaml
index eb10fba0..7b5cdf11 100644
--- a/deployed-server/ctlplane-port.yaml
+++ b/deployed-server/ctlplane-port.yaml
@@ -1,8 +1,14 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
parameters:
- Hostname:
+ network:
type: string
+ default: ctlplane
+ name:
+ type: string
+ replacement_policy:
+ type: string
+ default: AUTO
resources:
@@ -13,11 +19,10 @@ resources:
name:
list_join:
- '-'
- - - {get_param: Hostname}
- - ctlplane
+ - - {get_param: name}
- port
replacement_policy: AUTO
outputs:
- ip_address:
- value: {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
+ fixed_ips:
+ value: {get_attr: [ControlPlanePort, fixed_ips]}
diff --git a/deployed-server/deployed-neutron-port.yaml b/deployed-server/deployed-neutron-port.yaml
new file mode 100644
index 00000000..bddf8bc1
--- /dev/null
+++ b/deployed-server/deployed-neutron-port.yaml
@@ -0,0 +1,67 @@
+heat_template_version: ocata
+
+description: "
+ A fake OS::Neutron::Port stack which outputs fixed_ips and subnets based on
+ the input from the DeployedServerPortMap (set via parameter_defaults). This
+ lookup requires the use of port naming conventions. In order for this to work
+ with deployed-server the keys should be <hostname>-<network>.
+ Example:
+ parameter_defaults:
+ DeployedServerPortMap:
+ gatsby-ctlplane:
+ fixed_ips:
+ - ip_address: 127.0.0.1
+ subnets:
+ - cidr: 24"
+
+parameters:
+ name:
+ default: ''
+ type: string
+ network:
+ default: ''
+ type: string
+ fixed_ips:
+ default: ''
+ type: comma_delimited_list
+ replacement_policy:
+ default: ''
+ type: string
+ DeployedServerPortMap:
+ default: {}
+ type: json
+
+
+outputs:
+ fixed_ips:
+ value:
+ {get_param: [DeployedServerPortMap, {get_param: name}, fixed_ips]}
+ subnets:
+ value:
+ {get_param: [DeployedServerPortMap, {get_param: name}, subnets]}
+ name:
+ value: {get_param: name}
+ status:
+ value: DOWN
+ allowed_address_pairs:
+ value: {}
+ device_id:
+ value: ''
+ device_owner:
+ value: {get_param: network}
+ dns_assignment:
+ value: ''
+ port_security_enabled:
+ value: False
+ admin_state_up:
+ value: False
+ security_groups:
+ value: {}
+ network_id:
+ value: ''
+ tenant_id:
+ value: ''
+ qos_policy_id:
+ value: ''
+ mac_address:
+ value: ''
diff --git a/deployed-server/deployed-server-bootstrap-centos.sh b/deployed-server/deployed-server-bootstrap-centos.sh
new file mode 100644
index 00000000..7266ca57
--- /dev/null
+++ b/deployed-server/deployed-server-bootstrap-centos.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+set -eux
+
+yum install -y \
+ jq \
+ python-ipaddr \
+ openstack-puppet-modules \
+ os-net-config \
+ openvswitch \
+ python-heat-agent*
+
+ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
+
+setenforce 0
+sed -i 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config
diff --git a/deployed-server/deployed-server-bootstrap-centos.yaml b/deployed-server/deployed-server-bootstrap-centos.yaml
new file mode 100644
index 00000000..c1740d78
--- /dev/null
+++ b/deployed-server/deployed-server-bootstrap-centos.yaml
@@ -0,0 +1,22 @@
+heat_template_version: ocata
+
+description: 'Deployed Server Bootstrap Config'
+
+parameters:
+
+ server:
+ type: string
+
+resources:
+
+ DeployedServerBootstrapConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: deployed-server-bootstrap-centos.sh}
+
+ DeployedServerBootstrapDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: DeployedServerBootstrapConfig}
+ server: {get_param: server}
diff --git a/deployed-server/deployed-server-bootstrap-rhel.sh b/deployed-server/deployed-server-bootstrap-rhel.sh
new file mode 100644
index 00000000..36ff0077
--- /dev/null
+++ b/deployed-server/deployed-server-bootstrap-rhel.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -eux
+
+yum install -y \
+ jq \
+ python-ipaddr \
+ openstack-puppet-modules \
+ os-net-config \
+ openvswitch \
+ python-heat-agent*
+
+ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
diff --git a/deployed-server/deployed-server-bootstrap-rhel.yaml b/deployed-server/deployed-server-bootstrap-rhel.yaml
new file mode 100644
index 00000000..2d2f5156
--- /dev/null
+++ b/deployed-server/deployed-server-bootstrap-rhel.yaml
@@ -0,0 +1,22 @@
+heat_template_version: ocata
+
+description: 'Deployed Server Bootstrap Config'
+
+parameters:
+
+ server:
+ type: string
+
+resources:
+
+ DeployedServerBootstrapConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: deployed-server-bootstrap-rhel.sh}
+
+ DeployedServerBootstrapDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: DeployedServerBootstrapConfig}
+ server: {get_param: server}
diff --git a/deployed-server/deployed-server-config.yaml b/deployed-server/deployed-server-config.yaml
deleted file mode 100644
index 8c59dc72..00000000
--- a/deployed-server/deployed-server-config.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-heat_template_version: 2014-10-16
-parameters:
- user_data_format:
- type: string
- default: SOFTWARE_CONFIG
-
-resources:
- # We just need something which returns a unique ID, but we can't
- # use RandomString because RefId returns the value, not the physical
- # resource ID, SoftwareConfig should work as it returns a UUID
- deployed-server-config:
- type: OS::Heat::SoftwareConfig
-
-outputs:
- # FIXME(shardy) this is needed because TemplateResource returns an
- # ARN not a UUID, which overflows the Deployment server_id column..
- user_data_format:
- value: SOFTWARE_CONFIG
- OS::stack_id:
- value: {get_resource: deployed-server-config}
-
-
diff --git a/deployed-server/deployed-server-roles-data.yaml b/deployed-server/deployed-server-roles-data.yaml
new file mode 100644
index 00000000..9795a00f
--- /dev/null
+++ b/deployed-server/deployed-server-roles-data.yaml
@@ -0,0 +1,172 @@
+# Specifies which roles (groups of nodes) will be deployed
+# Note this is used as an input to the various *.j2.yaml
+# jinja2 templates, so that they are converted into *.yaml
+# during the plan creation (via a mistral action/workflow).
+#
+# The format is a list, with the following format:
+#
+# * name: (string) mandatory, name of the role, must be unique
+#
+# CountDefault: (number) optional, default number of nodes, defaults to 0
+# sets the default for the {{role.name}}Count parameter in overcloud.yaml
+#
+# HostnameFormatDefault: (string) optional default format string for hostname
+# defaults to '%stackname%-{{role.name.lower()}}-%index%'
+# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
+#
+# disable_constraints: (boolean) optional, whether to disable Nova and Glance
+# constraints for each role specified in the templates.
+#
+# ServicesDefault: (list) optional default list of services to be deployed
+# on the role, defaults to an empty list. Sets the default for the
+# {{role.name}}Services parameter in overcloud.yaml
+
+- name: ControllerDeployedServer
+ CountDefault: 1
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::CephRgw
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderBackup
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronApi
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::Redis
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::MongoDb
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaConsoleauth
+ - OS::TripleO::Services::NovaVncProxy
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::CeilometerApi
+ - OS::TripleO::Services::CeilometerCollector
+ - OS::TripleO::Services::CeilometerExpirer
+ - OS::TripleO::Services::CeilometerAgentCentral
+ - OS::TripleO::Services::CeilometerAgentNotification
+ - OS::TripleO::Services::Horizon
+ - OS::TripleO::Services::GnocchiApi
+ - OS::TripleO::Services::GnocchiMetricd
+ - OS::TripleO::Services::GnocchiStatsd
+ - OS::TripleO::Services::ManilaApi
+ - OS::TripleO::Services::ManilaScheduler
+ - OS::TripleO::Services::ManilaBackendGeneric
+ - OS::TripleO::Services::ManilaBackendNetapp
+ - OS::TripleO::Services::ManilaBackendCephFs
+ - OS::TripleO::Services::ManilaShare
+ - OS::TripleO::Services::AodhApi
+ - OS::TripleO::Services::AodhEvaluator
+ - OS::TripleO::Services::AodhNotifier
+ - OS::TripleO::Services::AodhListener
+ - OS::TripleO::Services::SaharaApi
+ - OS::TripleO::Services::SaharaEngine
+ - OS::TripleO::Services::IronicApi
+ - OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::NovaIronic
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::OpenDaylightApi
+ - OS::TripleO::Services::OpenDaylightOvs
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::BarbicanApi
+ - OS::TripleO::Services::PankoApi
+ - OS::TripleO::Services::Zaqar
+ - OS::TripleO::Services::OVNDBs
+
+- name: ComputeDeployedServer
+ CountDefault: 1
+ HostnameFormatDefault: '%stackname%-novacompute-%index%'
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephClient
+ - OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::ComputeNeutronCorePlugin
+ - OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::ComputeCeilometerAgent
+ - OS::TripleO::Services::ComputeNeutronL3Agent
+ - OS::TripleO::Services::ComputeNeutronMetadataAgent
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::OpenDaylightOvs
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+
+- name: BlockStorageDeployedServer
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::BlockStorageCinderVolume
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+
+- name: ObjectStorageDeployedServer
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+
+- name: CephStorageDeployedServer
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
diff --git a/deployed-server/deployed-server.yaml b/deployed-server/deployed-server.yaml
index da5698e5..1e8afb25 100644
--- a/deployed-server/deployed-server.yaml
+++ b/deployed-server/deployed-server.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
parameters:
image:
type: string
@@ -21,7 +21,7 @@ parameters:
default: ''
name:
type: string
- default: ''
+ default: 'deployed-server'
image_update_policy:
type: string
default: ''
@@ -38,28 +38,52 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
- # We just need something which returns a unique ID, but we can't
- # use RandomString because RefId returns the value, not the physical
- # resource ID, SoftwareConfig should work as it returns a UUID
deployed-server:
- type: OS::TripleO::DeployedServerConfig
+ type: OS::Heat::DeployedServer
+ properties:
+ name: {get_param: name}
+ software_config_transport: {get_param: software_config_transport}
+
+ UpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ UpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
properties:
- user_data_format: SOFTWARE_CONFIG
+ name: UpgradeInitDeployment
+ server: {get_resource: deployed-server}
+ config: {get_resource: UpgradeInitConfig}
+
InstanceIdConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: apply-config
config:
- instance-id: {get_attr: [deployed-server, "OS::stack_id"]}
+ instance-id: {get_resource: deployed-server}
InstanceIdDeployment:
type: OS::Heat::StructuredDeployment
properties:
config: {get_resource: InstanceIdConfig}
server: {get_resource: deployed-server}
+ depends_on: UpgradeInitDeployment
HostsEntryConfig:
type: OS::Heat::SoftwareConfig
@@ -69,21 +93,10 @@ resources:
#!/bin/bash
set -eux
mkdir -p $heat_outputs_path
- host=$(hostnamectl --static)
- echo -n "$host " > $heat_outputs_path.hosts_entry
- host_ip=$(python -c "import socket; print socket.gethostbyname(\"$host\")")
- echo -n "$host_ip " >> $heat_outputs_path.hosts_entry
- echo >> $heat_outputs_path.hosts_entry
- cat $heat_outputs_path.hosts_entry
- echo -n $host_ip > $heat_outputs_path.ip_address
- cat $heat_outputs_path.ip_address
+ host=$(hostname -s)
echo -n $host > $heat_outputs_path.hostname
cat $heat_outputs_path.hostname
outputs:
- - name: hosts_entry
- description: hosts_entry
- - name: ip_address
- description: ip_address
- name: hostname
description: hostname
@@ -93,23 +106,28 @@ resources:
config: {get_resource: HostsEntryConfig}
server: {get_resource: deployed-server}
+ DeployedServerBootstrapConfig:
+ type: OS::TripleO::DeployedServer::Bootstrap
+ properties:
+ server: {get_resource: deployed-server}
+
ControlPlanePort:
type: OS::TripleO::DeployedServer::ControlPlanePort
properties:
- Hostname: {get_attr: [HostsEntryDeployment, hostname]}
+ network: ctlplane
+ name:
+ list_join:
+ - '-'
+ - - {get_attr: [HostsEntryDeployment, hostname]}
+ - ctlplane
+ replacement_policy: AUTO
outputs:
- # FIXME(shardy) this is needed because TemplateResource returns an
- # ARN not a UUID, which overflows the Deployment server_id column..
OS::stack_id:
- value: {get_attr: [deployed-server, "OS::stack_id"]}
+ value: {get_resource: deployed-server}
networks:
value:
ctlplane:
- - {get_attr: [ControlPlanePort, ip_address]}
+ - {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
name:
- value: {get_attr: [HostsEntryDeployment, hostname]}
- hosts_entry:
- value: {get_attr: [HostsEntryDeployment, hosts_entry]}
- ip_address:
- value: {get_attr: [HostsEntryDeployment, ip_address]}
+ value: {get_attr: [HostsEntryDeployment, hostname]}
diff --git a/deployed-server/scripts/get-occ-config.sh b/deployed-server/scripts/get-occ-config.sh
index 2c01174e..6c196f97 100755
--- a/deployed-server/scripts/get-occ-config.sh
+++ b/deployed-server/scripts/get-occ-config.sh
@@ -11,28 +11,36 @@ OBJECTSTORAGE_HOSTS=${OBJECTSTORAGE_HOSTS:-""}
CEPHSTORAGE_HOSTS=${CEPHSTORAGE_HOSTS:-""}
SUBNODES_SSH_KEY=${SUBNODES_SSH_KEY:-"~/.ssh/id_rsa"}
SSH_OPTIONS="-tt -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=Verbose -o PasswordAuthentication=no -o ConnectionAttempts=32"
+OVERCLOUD_ROLES=${OVERCLOUD_ROLES:-"Controller Compute BlockStorage ObjectStorage CephStorage"}
+
+# Set the _hosts vars for the default roles based on the old var names that
+# were all caps for backwards compatibility.
+Controller_hosts=${Controller_hosts:-"$CONTROLLER_HOSTS"}
+Compute_hosts=${Compute_hosts:-"$COMPUTE_HOSTS"}
+BlockStorage_hosts=${BlockStorage_hosts:-"$BLOCKSTORAGE_HOSTS"}
+ObjectStorage_hosts=${ObjectStorage_hosts:-"$OBJECTSTORAGE_HOSTS"}
+CephStorage_hosts=${CephStorage_hosts:-"$CEPHSTORAGE_HOSTS"}
+
+# Set the _hosts_a vars for each role defined
+for role in $OVERCLOUD_ROLES; do
+ eval hosts=\${${role}_hosts}
+ read -a ${role}_hosts_a <<< $hosts
+done
-read -a Controller_hosts_a <<< $CONTROLLER_HOSTS
-read -a Compute_hosts_a <<< $COMPUTE_HOSTS
-read -a BlockStorage_hosts_a <<< $BLOCKSTORAGE_HOSTS
-read -a ObjectStorage_hosts_a <<< $OBJECTSTORAGE_HOSTS
-read -a CephStorage_hosts_a <<< $CEPHSTORAGE_HOSTS
-
-roles="Controller Compute BlockStorage ObjectStorage CephStorage"
admin_user_id=$(openstack user show admin -c id -f value)
admin_project_id=$(openstack project show admin -c id -f value)
function check_stack {
- local stack_to_check=$1
+ local stack_to_check=${1:-""}
- if [ "$stack_to_check" = "|" ]; then
+ if [ "$stack_to_check" = "" ]; then
echo Stack not created
return 1
fi
echo Checking if $1 stack is created
set +e
- heat resource-list $stack_to_check
+ openstack stack resource list $stack_to_check
rc=$?
set -e
@@ -44,18 +52,18 @@ function check_stack {
}
-for role in $roles; do
+for role in $OVERCLOUD_ROLES; do
while ! check_stack overcloud; do
sleep $SLEEP_TIME
done
- rg_stack=$(heat resource-list overcloud | grep " $role " | awk '{print $4}')
+ rg_stack=$(openstack stack resource show overcloud $role -c physical_resource_id -f value)
while ! check_stack $rg_stack; do
sleep $SLEEP_TIME
- rg_stack=$(heat resource-list overcloud | grep " $role " | awk '{print $4}')
+ rg_stack=$(openstack stack resource show overcloud $role -c physical_resource_id -f value)
done
- stacks=$(heat resource-list $rg_stack | grep OS::TripleO::$role | awk '{print $4}')
+ stacks=$(openstack stack resource list $rg_stack -c physical_resource_id -f value)
i=0
@@ -65,30 +73,32 @@ for role in $roles; do
server_resource_name="NovaCompute"
fi
- server_stack=$(heat resource-list $stack | grep " $server_resource_name " | awk '{print $4}')
+ server_stack=$(openstack stack resource show $stack $server_resource_name -c physical_resource_id -f value)
while ! check_stack $server_stack; do
sleep $SLEEP_TIME
- server_stack=$(heat resource-list $stack | grep " $server_resource_name " | awk '{print $4}')
+ server_stack=$(openstack stack resource show $stack $server_resource_name -c physical_resource_id -f value)
done
- deployed_server_stack=$(heat resource-list $server_stack | grep "deployed-server" | awk '{print $4}')
+ while true; do
+ deployed_server_metadata_url=$(openstack stack resource metadata $server_stack deployed-server | jq -r '.["os-collect-config"].request.metadata_url')
+ if [ "$deployed_server_metadata_url" = "null" ]; then
+ continue
+ else
+ break
+ fi
+ done
echo "======================"
echo "$role$i os-collect-config.conf configuration:"
config="
[DEFAULT]
-collectors=heat
+collectors=request
command=os-refresh-config
polling_interval=30
-[heat]
-user_id=$admin_user_id
-password=$OS_PASSWORD
-auth_url=$OS_AUTH_URL
-project_id=$admin_project_id
-stack_id=$deployed_server_stack
-resource_name=deployed-server-config"
+[request]
+metadata_url=$deployed_server_metadata_url"
echo "$config"
echo "======================"
diff --git a/docker/README-containers.md b/docker/README-containers.md
index ff062a93..5a9f6f3c 100644
--- a/docker/README-containers.md
+++ b/docker/README-containers.md
@@ -20,6 +20,9 @@ glance image-create --name atomic-image --file Fedora-Cloud-Atomic-22-20150521.x
You can use the tripleo.sh script up until the point of running the Overcloud.
https://github.com/openstack/tripleo-common/blob/master/scripts/tripleo.sh
+You will want to set up the runtime puppet script delivery system described here:
+http://hardysteven.blogspot.ca/2016/08/tripleo-deploy-artifacts-and-puppet.html
+
Create the Overcloud:
```
$ openstack overcloud deploy --templates=tripleo-heat-templates -e tripleo-heat-templates/environments/docker.yaml -e tripleo-heat-templates/environments/docker-network.yaml --libvirt-type=qemu
diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml
deleted file mode 100644
index 60b831be..00000000
--- a/docker/compute-post.yaml
+++ /dev/null
@@ -1,349 +0,0 @@
-heat_template_version: 2015-10-15
-description: >
- OpenStack compute node post deployment for Docker.
-
-parameters:
- servers:
- type: json
- DeployIdentifier:
- type: string
- description: Value which changes if the node configuration may need to be re-applied
- DockerNamespace:
- type: string
- default: tripleoupstream
- DockerComputeImage:
- type: string
- DockerComputeDataImage:
- type: string
- DockerLibvirtImage:
- type: string
- DockerOpenvswitchImage:
- type: string
- DockerOvsVswitchdImage:
- type: string
- DockerOpenvswitchDBImage:
- type: string
- LibvirtConfig:
- type: string
- default: "/etc/libvirt/libvirtd.conf"
- NovaConfig:
- type: string
- default: "/etc/nova/nova.conf,/etc/nova/rootwrap.conf"
- NeutronOpenvswitchAgentConfig:
- type: string
- default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
- NeutronOpenvswitchAgentPluginVolume:
- type: string
- default: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/ovs_neutron_plugin.ini:ro"
- NeutronOpenvswitchAgentOvsVolume:
- type: string
- default: " "
- StepConfig:
- type: string
- description: Config manifests that will be used to step through the deployment.
- default: ''
- RoleData:
- type: json
- default: {}
-
-
-resources:
-
- ComputePuppetConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- options:
- enable_hiera: True
- enable_facter: False
- tags: package,file,concat,file_line,nova_config,neutron_config,neutron_agent_ovs,neutron_plugin_ml2
- inputs:
- - name: tripleo::packages::enable_install
- type: Boolean
- default: True
- outputs:
- - name: result
- config:
- list_join:
- - ''
- - - get_file: ../puppet/manifests/overcloud_compute.pp
- - {get_param: StepConfig}
-
-
- ComputePuppetDeployment:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: ComputePuppetDeployment
- servers: {get_param: servers}
- config: {get_resource: ComputePuppetConfig}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
- tripleo::packages::enable_install: True
-
- CopyEtcConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- outputs:
- - name: result
- config: {get_file: ./copy-etc.sh}
-
- CopyEtcDeployment:
- type: OS::Heat::SoftwareDeployments
- depends_on: ComputePuppetDeployment
- properties:
- name: CopyEtcDeployment
- config: {get_resource: CopyEtcConfig}
- servers: {get_param: servers}
-
- CopyJsonConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: libvirt_config
- - name: nova_config
- - name: neutron_openvswitch_agent_config
- config: |
- #!/bin/python
- import json
- import os
-
- data = {}
- file_perms = '600'
- libvirt_perms = '644'
-
- libvirt_config = os.getenv('libvirt_config').split(',')
- nova_config = os.getenv('nova_config').split(',')
- neutron_openvswitch_agent_config = os.getenv('neutron_openvswitch_agent_config').split(',')
-
- # Command, Config_files, Owner, Perms
- services = {'nova-libvirt': ['/usr/sbin/libvirtd', libvirt_config, 'root', libvirt_perms],
- 'nova-compute': ['/usr/bin/nova-compute', nova_config, 'nova', file_perms],
- 'neutron-openvswitch-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_openvswitch_agent_config, 'neutron', file_perms],
- 'ovs-vswitchd': ['/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/openvswitch/ovs-vswitchd.log'],
- 'ovsdb-server': ['/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --log-file=/var/log/openvswitch/ovsdb-server.log']
- }
-
-
- def build_config_files(config, owner, perms):
- config_source = '/var/lib/kolla/config_files/'
- config_files_dict = {}
- source = os.path.basename(config)
- dest = config
- config_files_dict.update({'source': config_source + source,
- 'dest': dest,
- 'owner': owner,
- 'perm': perms})
- return config_files_dict
-
-
- for service in services:
- if service != 'ovs-vswitchd' and service != 'ovsdb-server':
- command = services.get(service)[0]
- config_files = services.get(service)[1]
- owner = services.get(service)[2]
- perms = services.get(service)[3]
- config_files_list = []
- for config_file in config_files:
- if service == 'nova-libvirt':
- command = command + ' --config ' + config_file
- else:
- command = command + ' --config-file ' + config_file
- data['command'] = command
- config_files_dict = build_config_files(config_file, owner, perms)
- config_files_list.append(config_files_dict)
- data['config_files'] = config_files_list
- else:
- data['command'] = services.get(service)[0]
- data['config_files'] = []
-
- json_config_dir = '/var/lib/etc-data/json-config/'
- with open(json_config_dir + service + '.json', 'w') as json_file:
- json.dump(data, json_file, sort_keys=True, indent=4, separators=(',', ': '))
-
- CopyJsonDeployment:
- type: OS::Heat::SoftwareDeployments
- depends_on: CopyEtcDeployment
- properties:
- name: CopyJsonDeployment
- config: {get_resource: CopyJsonConfig}
- servers: {get_param: servers}
- input_values:
- libvirt_config: {get_param: LibvirtConfig}
- nova_config: {get_param: NovaConfig}
- neutron_openvswitch_agent_config: {get_param: NeutronOpenvswitchAgentConfig}
-
- NovaComputeContainersDeploymentOVS:
- type: OS::Heat::StructuredDeployments
- depends_on: CopyJsonDeployment
- properties:
- name: NovaComputeContainersDeploymentOVS
- config: {get_resource: NovaComputeContainersConfigOVS}
- servers: {get_param: servers}
-
- NovaComputeContainersConfigOVS:
- type: OS::Heat::StructuredConfig
- properties:
- group: docker-cmd
- config:
- ovsvswitchd:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOvsVswitchdImage} ]
- net: host
- privileged: true
- restart: always
- volumes:
- - /run:/run
- - /lib/modules:/lib/modules:ro
- - /var/lib/etc-data/json-config/ovs-vswitchd.json:/var/lib/kolla/config_files/config.json
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-
- openvswitchdb:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchDBImage} ]
- net: host
- restart: always
- volumes:
- - /run:/run
- - /var/lib/etc-data/json-config/ovsdb-server.json:/var/lib/kolla/config_files/config.json
- - /etc/openvswitchd:/etc/openvswitchd
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-
- NovaComputeContainersDeploymentNetconfig:
- type: OS::Heat::SoftwareDeployments
- depends_on: NovaComputeContainersDeploymentOVS
- properties:
- name: NovaComputeContainersDeploymentNetconfig
- config: {get_resource: NovaComputeContainersConfigNetconfig}
- servers: {get_param: servers}
-
- # We run os-net-config here because we depend on the ovs containers to be up
- # and running before we configure the network. This allows explicit timing
- # of the network configuration.
- NovaComputeContainersConfigNetconfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- outputs:
- - name: result
- config: |
- #!/bin/bash
- /usr/local/bin/run-os-net-config
-
- LibvirtContainersDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: [CopyJsonDeployment, CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig]
- properties:
- name: LibvirtContainersDeployment
- config: {get_resource: LibvirtContainersConfig}
- servers: {get_param: servers}
-
- LibvirtContainersConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: docker-cmd
- config:
- computedata:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerComputeDataImage} ]
- container_name: computedata
- volumes:
- - /var/lib/nova/instances
- - /var/lib/libvirt
-
- libvirt:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
- net: host
- pid: host
- privileged: true
- restart: always
- volumes:
- - /run:/run
- - /lib/modules:/lib/modules:ro
- - /dev:/dev
- - /lib/udev:/lib/udev
- - /sys/fs/cgroup:/sys/fs/cgroup
- - /var/lib/etc-data/json-config/nova-libvirt.json:/var/lib/kolla/config_files/config.json
- - /var/lib/etc-data/libvirt/libvirtd.conf:/var/lib/kolla/config_files/libvirtd.conf
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- volumes_from:
- - computedata
-
- NovaComputeContainersDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: [CopyJsonDeployment, CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig, LibvirtContainersDeployment]
- properties:
- name: NovaComputeContainersDeployment
- config: {get_resource: NovaComputeContainersConfig}
- servers: {get_param: servers}
-
- NovaComputeContainersConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: docker-cmd
- config:
- neutronovsagent:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
- net: host
- pid: host
- privileged: true
- restart: always
- volumes:
- str_split:
- - ","
- - list_join:
- - ","
- - [ "/run:/run", "/lib/modules:/lib/modules:ro",
- "/var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json",
- "/var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro",
- "/var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro",
- {get_param: NeutronOpenvswitchAgentPluginVolume},
- {get_param: NeutronOpenvswitchAgentOvsVolume} ]
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- volumes_from:
- - computedata
-
- novacompute:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerComputeImage} ]
- net: host
- privileged: true
- restart: always
- volumes:
- - /run:/run
- - /lib/modules:/lib/modules:ro
- - /dev:/dev
- - /lib/udev:/lib/udev
- - /etc/iscsi:/etc/iscsi
- - /var/lib/etc-data/json-config/nova-compute.json:/var/lib/kolla/config_files/config.json
- - /var/lib/etc-data/nova/nova.conf:/var/lib/kolla/config_files/nova.conf:ro
- - /var/lib/etc-data/nova/rootwrap.conf:/var/lib/kolla/config_files/rootwrap.conf:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- volumes_from:
- - computedata
-
- ExtraConfig:
- depends_on: NovaComputeContainersDeployment
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
diff --git a/docker/firstboot/install_docker_agents.yaml b/docker/firstboot/install_docker_agents.yaml
index f6d61e2d..41a87406 100644
--- a/docker/firstboot/install_docker_agents.yaml
+++ b/docker/firstboot/install_docker_agents.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
parameters:
DockerAgentImage:
diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh
index acb44ce5..1c5cc18d 100644..100755
--- a/docker/firstboot/start_docker_agents.sh
+++ b/docker/firstboot/start_docker_agents.sh
@@ -1,72 +1,56 @@
#!/bin/bash
set -eux
-/sbin/setenforce 0
-/sbin/modprobe ebtables
-
-# CentOS sets ptmx to 000. Withoutit being 666, we can't use Cinder volumes
-chmod 666 /dev/pts/ptmx
-
-# We need hostname -f to return in a centos container for the puppet hook
-HOSTNAME=$(hostname)
-echo "127.0.0.1 $HOSTNAME.localdomain $HOSTNAME" >> /etc/hosts
-
-# update docker for local insecure registry(optional)
-# Note: This is different for different docker versions
-# For older docker versions < 1.4.x use commented line
-#echo "OPTIONS='--insecure-registry $docker_registry'" >> /etc/sysconfig/docker
-#echo "ADD_REGISTRY='--registry-mirror $docker_registry'" >> /etc/sysconfig/docker
+# TODO remove this when built image includes docker
+if [ ! -f "/usr/bin/docker" ]; then
+ yum -y install docker
+fi
# Local docker registry 1.8
# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is
# a place holder for text replacement done via heat
-if [ "$docker_namespace_is_registry" = True ]; then
+if [ "$docker_namespace_is_registry" = "True" ]; then
/usr/bin/systemctl stop docker.service
# if namespace is used with local registry, trim all namespacing
trim_var=$docker_registry
registry_host="${trim_var%%/*}"
/bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker
- /usr/bin/systemctl start --no-block docker.service
fi
-/usr/bin/docker pull $agent_image &
-DOCKER_PULL_PID=$!
-
mkdir -p /var/lib/etc-data/json-config #FIXME: this should be a docker data container
# NOTE(flaper87): Heat Agent required mounts
-AGENT_COMMAND_MOUNTS="-v /var/lib/etc-data:/var/lib/etc-data \
- -v /run:/run \
- -v /etc:/host/etc \
- -v /usr/bin/atomic:/usr/bin/atomic \
- -v /var/lib/dhclient:/var/lib/dhclient \
- -v /var/lib/cloud:/var/lib/cloud \
- -v /var/lib/heat-cfntools:/var/lib/heat-cfntools \
- -v /etc/sysconfig/docker:/etc/sysconfig/docker \
- -v /usr/lib64/libseccomp.so.2:/usr/lib64/libseccomp.so.2"
-
-
-# NOTE(flaper87): Some of these commands may not be present depending on the
-# atomic version.
-for docker_cmd in docker docker-current docker-latest; do
- if [ -f "/usr/bin/$docker_cmd" ]; then
- AGENT_COMMAND_MOUNTS+=" -v /usr/bin/$docker_cmd:/usr/bin/$docker_cmd"
- fi
-done
+AGENT_COMMAND_MOUNTS="\
+-v /var/lib/etc-data:/var/lib/etc-data \
+-v /run:/run \
+-v /etc/hosts:/etc/hosts \
+-v /etc:/host/etc \
+-v /var/lib/dhclient:/var/lib/dhclient \
+-v /var/lib/cloud:/var/lib/cloud \
+-v /var/lib/heat-cfntools:/var/lib/heat-cfntools \
+-v /var/lib/os-collect-config:/var/lib/os-collect-config \
+-v /var/lib/os-apply-config-deployments:/var/lib/os-apply-config-deployments \
+-v /var/lib/heat-config:/var/lib/heat-config \
+-v /etc/sysconfig/docker:/etc/sysconfig/docker \
+-v /etc/sysconfig/network-scripts:/etc/sysconfig/network-scripts \
+-v /usr/lib64/libseccomp.so.2:/usr/lib64/libseccomp.so.2 \
+-v /usr/bin/docker:/usr/bin/docker \
+-v /usr/bin/docker-current:/usr/bin/docker-current \
+-v /var/lib/os-collect-config:/var/lib/os-collect-config"
# heat-docker-agents service
cat <<EOF > /etc/systemd/system/heat-docker-agents.service
-
[Unit]
Description=Heat Docker Agent Container
After=docker.service
Requires=docker.service
+Before=os-collect-config.service
+Conflicts=os-collect-config.service
[Service]
User=root
-Restart=on-failure
-ExecStartPre=-/usr/bin/docker kill heat-agents
-ExecStartPre=-/usr/bin/docker rm heat-agents
+Restart=always
+ExecStartPre=-/usr/bin/docker rm -f heat-agents
ExecStart=/usr/bin/docker run --name heat-agents --privileged --net=host \
$AGENT_COMMAND_MOUNTS \
--entrypoint=/usr/bin/os-collect-config $agent_image
@@ -74,35 +58,12 @@ ExecStop=/usr/bin/docker stop heat-agents
[Install]
WantedBy=multi-user.target
-
EOF
# enable and start heat-docker-agents
-chmod 0640 /etc/systemd/system/heat-docker-agents.service
/usr/bin/systemctl enable heat-docker-agents.service
/usr/bin/systemctl start --no-block heat-docker-agents.service
-# Disable NetworkManager and let the ifup/down scripts work properly.
-/usr/bin/systemctl disable NetworkManager
-/usr/bin/systemctl stop NetworkManager
-
-# Atomic's root partition & logical volume defaults to 3G. In order to launch
-# larger VMs, we need to enlarge the root logical volume and scale down the
-# docker_pool logical volume. We are allocating 80% of the disk space for
-# vm data and the remaining 20% for docker images.
-ATOMIC_ROOT='/dev/mapper/atomicos-root'
-ROOT_DEVICE=`pvs -o vg_name,pv_name --no-headings | grep atomicos | awk '{ print $2}'`
-
-growpart $( echo "${ROOT_DEVICE}" | sed -r 's/([^0-9]*)([0-9]+)/\1 \2/' )
-pvresize "${ROOT_DEVICE}"
-lvresize -l +80%FREE "${ATOMIC_ROOT}"
-xfs_growfs "${ATOMIC_ROOT}"
-
-cat <<EOF > /etc/sysconfig/docker-storage-setup
-GROWPART=true
-AUTO_EXTEND_POOL=yes
-POOL_AUTOEXTEND_PERCENT=30
-POOL_AUTOEXTEND_THRESHOLD=70
-EOF
-
-wait $DOCKER_PULL_PID
+# Disable libvirtd
+/usr/bin/systemctl disable libvirtd.service
+/usr/bin/systemctl stop libvirtd.service
diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml
new file mode 100644
index 00000000..865c74e5
--- /dev/null
+++ b/docker/post.j2.yaml
@@ -0,0 +1,207 @@
+heat_template_version: ocata
+
+description: >
+ Post-deploy configuration steps via puppet for all roles,
+ as defined in ../roles_data.yaml
+
+parameters:
+ servers:
+ type: json
+ description: Mapping of Role name e.g Controller to a list of servers
+
+ role_data:
+ type: json
+ description: Mapping of Role name e.g Controller to the per-role data
+
+ DeployIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+
+resources:
+
+{% for role in roles %}
+ # Post deployment steps for all roles
+ # A single config is re-applied with an incrementing step number
+ # {{role.name}} Role steps
+ {{role.name}}ArtifactsConfig:
+ type: ../puppet/deploy-artifacts.yaml
+
+ {{role.name}}ArtifactsDeploy:
+ type: OS::Heat::StructuredDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ArtifactsConfig}
+
+ {{role.name}}PreConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PreConfig
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Config:
+ type: OS::TripleO::{{role.name}}Config
+ properties:
+ StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
+ {% if role.name.lower() == 'compute' %}
+ PuppetTags: {get_param: [role_data, {{role.name}}, puppet_tags]}
+ {% endif %}
+
+ # Step through a series of configuration steps
+ {{role.name}}Deployment_Step1:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ properties:
+ name: {{role.name}}Deployment_Step1
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 1
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step2:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step1
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step2
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 2
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step3:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step2
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step3
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 3
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step4:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step3
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step4
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 4
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step5:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step4
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step5
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 5
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}PostConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PostConfig
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ {% endfor %}
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ # Note, this should come last, so use depends_on to ensure
+ # this is created after any other resources.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}PostConfig
+ {% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+
+ {% if role.name.lower() == 'compute' %}
+ CopyEtcConfig:
+ type: OS::Heat::SoftwareConfig
+ depends_on: {{role.name}}PostConfig
+ properties:
+ group: script
+ outputs:
+ - name: result
+ config: {get_file: ../docker/copy-etc.sh}
+
+ CopyEtcDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ name: CopyEtcDeployment
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: CopyEtcConfig}
+
+ {{role.name}}KollaJsonConfig:
+ type: OS::Heat::StructuredConfig
+ depends_on: CopyEtcDeployment
+ properties:
+ group: json-file
+ config:
+ {get_param: [role_data, {{role.name}}, kolla_config]}
+
+ {{role.name}}KollaJsonDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ name: {{role.name}}KollaJsonDeployment
+ config: {get_resource: {{role.name}}KollaJsonConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+
+ {{role.name}}ContainersConfig_Step1:
+ type: OS::Heat::StructuredConfig
+ depends_on: {{role.name}}KollaJsonDeployment
+ properties:
+ group: docker-cmd
+ config:
+ {get_param: [role_data, {{role.name}}, docker_config, step_1]}
+
+ {{role.name}}ContainersConfig_Step2:
+ type: OS::Heat::StructuredConfig
+ depends_on: {{role.name}}KollaJsonDeployment
+ properties:
+ group: docker-cmd
+ config:
+ {get_param: [role_data, {{role.name}}, docker_config, step_2]}
+
+ {{role.name}}ContainersDeployment_Step1:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ properties:
+ name: {{role.name}}ContainersDeployment_Step1
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ContainersConfig_Step1}
+
+ {{role.name}}ContainersDeployment_Step2:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on: {{role.name}}ContainersDeployment_Step1
+ properties:
+ name: {{role.name}}ContainersDeployment_Step2
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ContainersConfig_Step2}
+ {% endif %}
+{% endfor %}
diff --git a/docker/services/README.rst b/docker/services/README.rst
new file mode 100644
index 00000000..60719bfc
--- /dev/null
+++ b/docker/services/README.rst
@@ -0,0 +1,65 @@
+========
+services
+========
+
+A TripleO nested stack Heat template that encapsulates generic configuration
+data to configure a specific service. This generally includes everything
+needed to configure the service excluding the local bind ports which
+are still managed in the per-node role templates directly (controller.yaml,
+compute.yaml, etc.). All other (global) service settings go into
+the puppet/service templates.
+
+Input Parameters
+----------------
+
+Each service may define its own input parameters and defaults.
+Operators will use the parameter_defaults section of any Heat
+environment to set per service parameters.
+
+Config Settings
+---------------
+
+Each service may define a config_settings output variable which returns
+Hiera settings to be configured.
+
+Steps
+-----
+
+Each service may define an output variable which returns a puppet manifest
+snippet that will run at each of the following steps. Earlier manifests
+are re-asserted when applying latter ones.
+
+ * config_settings: Custom hiera settings for this service. These are
+ used to generate configs.
+
+ * kolla_config: Contains YAML that represents how to map config files
+ into the kolla container. This config file is typically mapped into
+ the container itself at the /var/lib/kolla/config_files/config.json
+ location and drives how kolla's external config mechanisms work.
+
+ * step_config: A puppet manifest that is used to step through the deployment
+ sequence. Each sequence is given a "step" (via hiera('step') that provides
+ information for when puppet classes should activate themselves.
+
+ * docker_compose:
+
+ * container_name:
+
+ * volumes:
+
+Steps correlate to the following:
+
+ 1) Service configuration generation with puppet.
+
+ 2) Early Openstack Service setup (database init?)
+
+ 3) Early containerized networking services startup (OVS)
+
+ 4) Network configuration
+
+ 5) General OpenStack Services
+
+ 6) Service activation (Pacemaker)
+
+ 7) Fencing (Pacemaker)
+
diff --git a/docker/services/neutron-ovs-agent.yaml b/docker/services/neutron-ovs-agent.yaml
new file mode 100644
index 00000000..0a061f6c
--- /dev/null
+++ b/docker/services/neutron-ovs-agent.yaml
@@ -0,0 +1,84 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Neutron openvswitch service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerOpenvswitchImage:
+ description: image
+ default: 'centos-binary-neutron-openvswitch-agent'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ NeutronOvsAgentBase:
+ type: ../../puppet/services/neutron-ovs-agent.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for Neutron openvswitch service
+ value:
+ config_settings: {get_attr: [NeutronOvsAgentBase, role_data, config_settings]}
+ step_config: {get_attr: [NeutronOvsAgentBase, role_data, step_config]}
+ puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
+ kolla_config:
+ /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:
+ command: /usr/bin/neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+ config_files:
+ - dest: /etc/neutron/neutron.conf
+ owner: neutron
+ perm: '0600'
+ source: /var/lib/kolla/config_files/neutron.conf
+ - dest: /etc/neutron/plugins/ml2/openvswitch_agent.ini
+ owner: neutron
+ perm: '0600'
+ source: /var/lib/kolla/config_files/openvswitch_agent.ini
+ - dest: /etc/neutron/plugins/ml2/ml2_conf.ini
+ owner: neutron
+ perm: '0600'
+ source: /var/lib/kolla/config_files/ml2_conf.ini
+ docker_config:
+ step_1:
+ neutronovsagent:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
+ net: host
+ pid: host
+ privileged: true
+ restart: always
+ volumes:
+ - /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro
+ - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro
+ - /var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - logs:/var/log/kolla/
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_2: {}
diff --git a/docker/services/nova-compute.yaml b/docker/services/nova-compute.yaml
new file mode 100644
index 00000000..e765609e
--- /dev/null
+++ b/docker/services/nova-compute.yaml
@@ -0,0 +1,81 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova Compute service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaComputeImage:
+ description: image
+ default: 'centos-binary-nova-compute'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ NovaComputeBase:
+ type: ../../puppet/services/nova-compute.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Compute service.
+ value:
+ config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]}
+ step_config: {get_attr: [NovaComputeBase, role_data, step_config]}
+ puppet_tags: nova_config,nova_paste_api_ini
+ kolla_config:
+ /var/lib/etc-data/json-config/nova-compute.json:
+ command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/nova.conf
+ - dest: /etc/nova/rootwrap.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/rootwrap.conf
+ docker_config:
+ step_1:
+ novacompute:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ net: host
+ privileged: true
+ user: root
+ restart: always
+ volumes:
+ - /var/lib/etc-data/json-config/nova-compute.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/etc-data/nova/nova.conf:/var/lib/kolla/config_files/nova.conf:ro
+ - /var/lib/etc-data/nova/rootwrap.conf:/var/lib/kolla/config_files/rootwrap.conf:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /dev:/dev
+ - logs:/var/log/kolla/
+ - /etc/iscsi:/etc/iscsi
+ - libvirtd:/var/lib/libvirt
+ - nova_compute:/var/lib/nova/
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_2: {}
diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml
new file mode 100644
index 00000000..004d624a
--- /dev/null
+++ b/docker/services/nova-libvirt.yaml
@@ -0,0 +1,82 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Libvirt Service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerLibvirtImage:
+ description: image
+ default: 'centos-binary-libvirt'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ NovaLibvirtBase:
+ type: ../../puppet/services/nova-libvirt.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Libvirt service.
+ value:
+ config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]}
+ step_config: {get_attr: [NovaLibvirtBase, role_data, step_config]}
+ puppet_tags: nova_config
+ kolla_config:
+ /var/lib/etc-data/json-config/nova-libvirt.json:
+ command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
+ config_files:
+ - dest: /etc/libvirt/libvirtd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/libvirtd.conf
+ docker_config:
+ step_1:
+ nova_libvirt:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
+ net: host
+ pid: host
+ privileged: true
+ restart: always
+ volumes:
+ - /var/lib/etc-data/json-config/nova-libvirt.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/etc-data/libvirt/libvirtd.conf:/var/lib/kolla/config_files/libvirtd.conf
+ # NOTE(mandre) Ideally the qemu.conf file is mounted in
+ # /var/lib/kolla/config_files and copied to the right place but
+ # copy-json.py doesn't allow us to do that without appending the
+ # file as an additional config on the CLI
+ - /var/lib/etc-data/libvirt/qemu.conf:/etc/libvirt/qemu.conf:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /dev:/dev
+ - /sys/fs/cgroup:/sys/fs/cgroup
+ - logs:/var/log/kolla/
+ - libvirtd:/var/lib/libvirt
+ - nova_compute:/var/lib/nova/
+ - nova_libvirt_qemu:/etc/libvirt/qemu
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_2: {}
diff --git a/docker/services/services.yaml b/docker/services/services.yaml
new file mode 100644
index 00000000..8c31107f
--- /dev/null
+++ b/docker/services/services.yaml
@@ -0,0 +1,75 @@
+heat_template_version: ocata
+
+description: >
+ Utility stack to convert an array of services into a set of combined
+ role configs.
+
+parameters:
+ Services:
+ default: []
+ description: |
+ List nested stack service templates.
+ type: comma_delimited_list
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ DefaultPasswords:
+ default: {}
+ description: Mapping of service -> default password. Used to help
+ pass top level passwords managed by Heat into services.
+ type: json
+
+resources:
+
+ PuppetServices:
+ type: ../../puppet/services/services.yaml
+ properties:
+ Services: {get_param: Services}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ EndpointMap: {get_param: EndpointMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+ ServiceChain:
+ type: OS::Heat::ResourceChain
+ properties:
+ resources: {get_param: Services}
+ concurrent: true
+ resource_properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ EndpointMap: {get_param: EndpointMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Combined Role data for this set of services.
+ value:
+ service_names:
+ {get_attr: [PuppetServices, role_data, service_names]}
+ monitoring_subscriptions:
+ {get_attr: [PuppetServices, role_data, monitoring_subscriptions]}
+ logging_sources:
+ {get_attr: [PuppetServices, role_data, logging_sources]}
+ logging_groups:
+ {get_attr: [PuppetServices, role_data, logging_groups]}
+ service_config_settings:
+ {get_attr: [PuppetServices, role_data, service_config_settings]}
+ config_settings:
+ {get_attr: [PuppetServices, role_data, config_settings]}
+ global_config_settings:
+ {get_attr: [PuppetServices, role_data, global_config_settings]}
+ step_config:
+ {get_attr: [PuppetServices, role_data, step_config]}
+ puppet_tags: {list_join: [",", {get_attr: [ServiceChain, role_data, puppet_tags]}]}
+ kolla_config:
+ map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
+ docker_config:
+ step_1: {map_merge: {get_attr: [ServiceChain, role_data, docker_config, step_1]}}
+ step_2: {map_merge: {get_attr: [ServiceChain, role_data, docker_config, step_2]}}
diff --git a/environments/auditd.yaml b/environments/auditd.yaml
new file mode 100644
index 00000000..b358c98a
--- /dev/null
+++ b/environments/auditd.yaml
@@ -0,0 +1,119 @@
+resource_registry:
+ OS::TripleO::Services::AuditD: ../puppet/services/auditd.yaml
+
+parameter_defaults:
+ AuditdRules:
+ 'Record attempts to alter time through adjtimex':
+ content: '-a always,exit -F arch=b64 -S adjtimex -k audit_time_rules'
+ order : 1
+ 'Record attempts to alter time through settimeofday':
+ content: '-a always,exit -F arch=b64 -S settimeofday -k audit_time_rules'
+ order : 2
+ 'Record Attempts to Alter Time Through stime':
+ content: '-a always,exit -F arch=b64 -S stime -k audit_time_rules'
+ order : 3
+ 'Record Attempts to Alter Time Through clock_settime':
+ content: '-a always,exit -F arch=b64 -S clock_settime -k audit_time_rules'
+ order : 4
+ 'Record Attempts to Alter the localtime File':
+ content: '-w /etc/localtime -p wa -k audit_time_rules'
+ order : 5
+ 'Record Events that Modify the Systems Discretionary Access Controls - chmod':
+ content: '-a always,exit -F arch=b64 -S chmod -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 5
+ 'Record Events that Modify the Systems Discretionary Access Controls - chown':
+ content: '-a always,exit -F arch=b64 -S chown -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 6
+ 'Record Events that Modify the Systems Discretionary Access Controls - fchmod':
+ content: '-a always,exit -F arch=b64 -S fchmod -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 7
+ 'Record Events that Modify the Systems Discretionary Access Controls - fchmodat':
+ content: '-a always,exit -F arch=b64 -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 8
+ 'Record Events that Modify the Systems Discretionary Access Controls - fchown':
+ content: '-a always,exit -F arch=b64 -S fchown -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 9
+ 'Record Events that Modify the Systems Discretionary Access Controls - fchownat':
+ content: '-a always,exit -F arch=b64 -S fchownat -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 10
+ 'Record Events that Modify the Systems Discretionary Access Controls - fremovexattr':
+ content: '-a always,exit -F arch=b64 -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 11
+ 'Record Events that Modify the Systems Discretionary Access Controls - fsetxattr':
+ content: '-a always,exit -F arch=b64 -S fsetxattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 12
+ 'Record Events that Modify the Systems Discretionary Access Controls - lchown':
+ content: '-a always,exit -F arch=b64 -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 13
+ 'Record Events that Modify the Systems Discretionary Access Controls - lremovexattr':
+ content: '-a always,exit -F arch=b64 -S lremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 14
+ 'Record Events that Modify the Systems Discretionary Access Controls - lsetxattr':
+ content: '-a always,exit -F arch=b64 -S lsetxattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 15
+ 'Record Events that Modify the Systems Discretionary Access Controls - removexattr':
+ content: '-a always,exit -F arch=b64 -S removexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 16
+ 'Record Events that Modify the Systems Discretionary Access Controls - setxattr':
+ content: '-a always,exit -F arch=b64 -S setxattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 17
+ 'Record Events that Modify User/Group Information - /etc/group':
+ content: '-w /etc/group -p wa -k audit_rules_usergroup_modification'
+ order : 18
+ 'Record Events that Modify User/Group Information - /etc/passwd':
+ content: '-w /etc/passwd -p wa -k audit_rules_usergroup_modification'
+ order : 19
+ 'Record Events that Modify User/Group Information - /etc/gshadow':
+ content: '-w /etc/gshadow -p wa -k audit_rules_usergroup_modification'
+ order : 20
+ 'Record Events that Modify User/Group Information - /etc/shadow':
+ content: '-w /etc/shadow -p wa -k audit_rules_usergroup_modification'
+ order : 21
+ 'Record Events that Modify User/Group Information - /etc/opasswd':
+ content: '-w /etc/opasswd -p wa -k audit_rules_usergroup_modification'
+ order : 22
+ 'Record Events that Modify the Systems Network Environment - sethostname / setdomainname':
+ content: '-a always,exit -F arch=b64 -S sethostname -S setdomainname -k audit_rules_networkconfig_modification'
+ order : 23
+ 'Record Events that Modify the Systems Network Environment - /etc/issue':
+ content: '-w /etc/issue -p wa -k audit_rules_networkconfig_modification'
+ order : 24
+ 'Record Events that Modify the Systems Network Environment - /etc/issue.net':
+ content: '-w /etc/issue.net -p wa -k audit_rules_networkconfig_modification'
+ order : 25
+ 'Record Events that Modify the Systems Network Environment - /etc/hosts':
+ content: '-w /etc/hosts -p wa -k audit_rules_networkconfig_modification'
+ order : 26
+ 'Record Events that Modify the Systems Network Environment - /etc/sysconfig/network':
+ content: '-w /etc/sysconfig/network -p wa -k audit_rules_networkconfig_modification'
+ order : 27
+ 'Record Events that Modify the Systems Mandatory Access Controls':
+ content: '-w /etc/selinux/ -p wa -k MAC-policy'
+ order : 28
+ 'Ensure auditd Collects Unauthorized Access Attempts to Files (unsuccessful / EACCES)':
+ content: '-a always,exit -F arch=b64 -S creat -S open -S openat -S open_by_handle_at -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access'
+ order : 29
+ 'Ensure auditd Collects Unauthorized Access Attempts to Files (unsuccessful / EPERM)':
+ content: '-a always,exit -F arch=b64 -S creat -S open -S openat -S open_by_handle_at -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access'
+ order : 30
+ 'Ensure auditd Collects Information on the Use of Privileged Commands':
+ content: '-a always,exit -F path=SETUID_PROG_PATH -F perm=x -F auid>=1000 -F auid!=4294967295 -k privileged'
+ order : 31
+ 'Ensure auditd Collects Information on Exporting to Media (successful)':
+ content: '-a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k export'
+ order : 32
+ 'Ensure auditd Collects File Deletion Events by User':
+ content: '-a always,exit -F arch=b64 -S rmdir -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete'
+ order : 33
+ 'Ensure auditd Collects System Administrator Actions':
+ content: '-w /etc/sudoers -p wa -k actions'
+ order : 34
+ 'Ensure auditd Collects Information on Kernel Module Loading and Unloading (insmod)':
+ content: '-w /usr/sbin/insmod -p x -k modules'
+ order : 35
+ 'Ensure auditd Collects Information on Kernel Module Loading and Unloading (rmmod)':
+ content: '-w /usr/sbin/rmmod -p x -k modules'
+ order : 36
+ 'Ensure auditd Collects Information on Kernel Module Loading and Unloading (modprobe)':
+ content: '-w /usr/sbin/modprobe -p x -k modules'
+ order : 37
diff --git a/environments/cinder-hpelefthand-config.yaml b/environments/cinder-hpelefthand-config.yaml
new file mode 100644
index 00000000..90d0261e
--- /dev/null
+++ b/environments/cinder-hpelefthand-config.yaml
@@ -0,0 +1,13 @@
+# A Heat environment file which can be used to enable a
+# a Cinder HPELeftHandISCSI backend, configured via puppet
+resource_registry:
+ OS::TripleO::Services::CinderHPELeftHandISCSI: ../puppet/services/cinder-hpelefthand-iscsi.yaml
+
+parameter_defaults:
+ CinderHPELeftHandISCSIApiUrl: ''
+ CinderHPELeftHandISCSIUserName: ''
+ CinderHPELeftHandISCSIPassword: ''
+ CinderHPELeftHandISCSIBackendName: 'tripleo_hpelefthand'
+ CinderHPELeftHandISCSIChapEnabled: false
+ CinderHPELeftHandClusterName: ''
+ CinderHPELeftHandDebug: false
diff --git a/environments/cinder-iser.yaml b/environments/cinder-iser.yaml
new file mode 100644
index 00000000..5eae7c04
--- /dev/null
+++ b/environments/cinder-iser.yaml
@@ -0,0 +1,19 @@
+parameter_defaults:
+
+ ## Whether to enable iscsi backend for Cinder.
+ CinderEnableIscsiBackend: true
+ CinderISCSIProtocol: 'iser'
+ CinderISCSIHelper: 'lioadm'
+
+ ## Whether to enable rbd (Ceph) backend for Cinder.
+ CinderEnableRbdBackend: false
+
+ ## Whether to enable NFS backend for Cinder.
+ CinderEnableNfsBackend: false
+
+ ## Whether to enable rbd (Ceph) backend for Nova ephemeral storage.
+ NovaEnableRbdBackend: false
+
+ ## Glance backend can be either 'rbd' (Ceph), 'swift' or 'file'.
+ ## GlanceBackend: swift
+
diff --git a/environments/deployed-server-bootstrap-environment-centos.yaml b/environments/deployed-server-bootstrap-environment-centos.yaml
new file mode 100644
index 00000000..ebcdfc2b
--- /dev/null
+++ b/environments/deployed-server-bootstrap-environment-centos.yaml
@@ -0,0 +1,7 @@
+# An environment that can be used with the deployed-server.yaml template to do
+# initial bootstrapping of the deployed servers.
+resource_registry:
+ OS::TripleO::DeployedServer::Bootstrap: ../deployed-server/deployed-server-bootstrap-centos.yaml
+
+parameter_defaults:
+ EnablePackageInstall: True
diff --git a/environments/deployed-server-bootstrap-environment-rhel.yaml b/environments/deployed-server-bootstrap-environment-rhel.yaml
new file mode 100644
index 00000000..f614a91a
--- /dev/null
+++ b/environments/deployed-server-bootstrap-environment-rhel.yaml
@@ -0,0 +1,7 @@
+# An environment that can be used with the deployed-server.yaml template to do
+# initial bootstrapping of the deployed servers.
+resource_registry:
+ OS::TripleO::DeployedServer::Bootstrap: ../deployed-server/deployed-server-bootstrap-rhel.yaml
+
+parameter_defaults:
+ EnablePackageInstall: True
diff --git a/environments/deployed-server-environment.yaml b/environments/deployed-server-environment.yaml
index c63d399a..7bc1bd9b 100644
--- a/environments/deployed-server-environment.yaml
+++ b/environments/deployed-server-environment.yaml
@@ -1,4 +1,4 @@
resource_registry:
OS::TripleO::Server: ../deployed-server/deployed-server.yaml
- OS::TripleO::DeployedServerConfig: ../deployed-server/deployed-server-config.yaml
- OS::TripleO::DeployedServer::ControlPlanePort: ../deployed-server/ctlplane-port.yaml
+ OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
+ OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
diff --git a/environments/deployed-server-noop-ctlplane.yaml b/environments/deployed-server-noop-ctlplane.yaml
index cfda314d..8835d5b1 100644
--- a/environments/deployed-server-noop-ctlplane.yaml
+++ b/environments/deployed-server-noop-ctlplane.yaml
@@ -1,4 +1,4 @@
resource_registry:
+ OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
OS::TripleO::Server: ../deployed-server/deployed-server.yaml
- OS::TripleO::DeployedServerConfig: ../deployed-server/deployed-server-config.yaml
- OS::TripleO::DeployedServer::ControlPlanePort: OS::Heat::None
+ OS::TripleO::DeployedServer::ControlPlanePort: ../deployed-server/deployed-neutron-port.yaml
diff --git a/environments/docker-network-isolation.yaml b/environments/docker-network-isolation.yaml
deleted file mode 100644
index 87c81d0b..00000000
--- a/environments/docker-network-isolation.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-parameter_defaults:
- NeutronOpenvswitchAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini"
- NeutronOpenvswitchAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro"
- NeutronOpenvswitchAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro"
diff --git a/environments/docker.yaml b/environments/docker.yaml
index c03d8511..4f5b36b4 100644
--- a/environments/docker.yaml
+++ b/environments/docker.yaml
@@ -1,20 +1,30 @@
resource_registry:
# Docker container with heat agents for containerized compute node.
- OS::TripleO::ComputePostDeployment: ../docker/compute-post.yaml
- OS::TripleO::NodeUserData: ../docker/firstboot/install_docker_agents.yaml
+ OS::TripleO::Compute::NodeUserData: ../docker/firstboot/install_docker_agents.yaml
+ OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
+ OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
+ OS::TripleO::Services::NovaCompute: ../docker/services/nova-compute.yaml
+ # NOTE (dprince) here we set new roles to be docker enabled as we add support
+ #OS::TripleO::ComputePostDeploySteps: ../docker/post.yaml
+ # NOTE (mandre) Defining per role post deploy steps doesn't work yet
+ # Set a global PostDeploySteps that works for both containerized and
+ # non-containerized roles
+ OS::TripleO::PostDeploySteps: ../docker/post.yaml
+ OS::TripleO::Services: ../docker/services/services.yaml
parameter_defaults:
- NovaImage: atomic-image
# Defaults to 'tripleoupstream'. Specify a local docker registry
# Example: 192.0.2.1:8787/tripleoupstream
DockerNamespace: tripleoupstream
# Enable local Docker registry
DockerNamespaceIsRegistry: false
- # Compute Node Images
- DockerComputeImage: centos-binary-nova-compute:latest
- DockerAgentImage: heat-docker-agents:latest
- DockerComputeDataImage: centos-binary-data:latest
- DockerLibvirtImage: centos-binary-nova-libvirt:latest
- DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:latest
- DockerOvsVswitchdImage: centos-binary-openvswitch-vswitchd:latest
- DockerOpenvswitchDBImage: centos-binary-openvswitch-db-server:latest
+ DockerAgentImage: heat-docker-agents:newton
+ # Docker containers
+ DockerNovaComputeImage: centos-binary-nova-compute:newton
+ DockerLibvirtImage: centos-binary-nova-libvirt:newton
+ DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:newton
+
+ ComputeServices:
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::ComputeNeutronOvsAgent
diff --git a/environments/enable-internal-tls.yaml b/environments/enable-internal-tls.yaml
index 5116c6da..ff4ecfbe 100644
--- a/environments/enable-internal-tls.yaml
+++ b/environments/enable-internal-tls.yaml
@@ -2,5 +2,18 @@
# a TLS for in the internal network via certmonger
parameter_defaults:
EnableInternalTLS: true
+
+ # Required for novajoin to enroll the overcloud nodes
+ ServerMetadata:
+ ipa_enroll: True
+
resource_registry:
- OS::TripleO::Services::ApacheTLS: ../../puppet/services/apache-internal-tls-certmonger.yaml
+ OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml
+ OS::TripleO::Services::ApacheTLS: ../puppet/services/apache-internal-tls-certmonger.yaml
+ OS::TripleO::Services::MySQLTLS: ../puppet/services/database/mysql-internal-tls-certmonger.yaml
+ # We use apache as a TLS proxy
+ OS::TripleO::Services::TLSProxyBase: ../puppet/services/apache.yaml
+
+ # Creates nova metadata that will create the extra service principals per
+ # node.
+ OS::TripleO::ServiceServerMetadataHook: ../extraconfig/nova_metadata/krb-service-principals.yaml
diff --git a/environments/enable-swap-partition.yaml b/environments/enable-swap-partition.yaml
new file mode 100644
index 00000000..71b70ec9
--- /dev/null
+++ b/environments/enable-swap-partition.yaml
@@ -0,0 +1,3 @@
+# Use this environment to create a swap partition in all Overcloud nodes
+resource_registry:
+ OS::TripleO::AllNodesExtraConfig: ../extraconfig/all_nodes/swap-partition.yaml
diff --git a/environments/enable-swap.yaml b/environments/enable-swap.yaml
new file mode 100644
index 00000000..9ba08642
--- /dev/null
+++ b/environments/enable-swap.yaml
@@ -0,0 +1,3 @@
+# Use this environment to create a swap file in all Overcloud nodes
+resource_registry:
+ OS::TripleO::AllNodesExtraConfig: ../extraconfig/all_nodes/swap.yaml
diff --git a/environments/external-loadbalancer-vip-v6.yaml b/environments/external-loadbalancer-vip-v6.yaml
index 5a2ef505..fbd1fb98 100644
--- a/environments/external-loadbalancer-vip-v6.yaml
+++ b/environments/external-loadbalancer-vip-v6.yaml
@@ -1,29 +1,24 @@
resource_registry:
- OS::TripleO::Network::Ports::NetVipMap: ../network/ports/net_vip_map_external_v6.yaml
- OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/from_service_v6.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool_v6.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool_v6.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool_v6.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool_v6.yaml
# OVS doesn't support IPv6 endpoints for tunneling yet, so this remains IPv4 for now.
OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+ # Management network is optional and disabled by default
+ #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management_from_pool_v6.yaml
parameter_defaults:
# When using an external loadbalancer set the following in parameter_defaults
# to control your VIPs (currently one per network)
# NOTE: we will eventually move to one VIP per service
#
- ControlPlaneIP: 192.0.2.251
- ExternalNetworkVip: 2001:db8:fd00:1000:0000:0000:0000:0005
- InternalApiNetworkVip: fd00:fd00:fd00:2000:0000:0000:0000:0005
- StorageNetworkVip: fd00:fd00:fd00:3000:0000:0000:0000:0005
- StorageMgmtNetworkVip: fd00:fd00:fd00:4000:0000:0000:0000:0005
- ServiceVips:
- redis: fd00:fd00:fd00:2000:0000:0000:0000:0006
+ ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+ PublicVirtualFixedIPs: [{'ip_address':'2001:db8:fd00:1000:0000:0000:0000:0005'}]
+ InternalApiVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0005'}]
+ StorageVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:3000:0000:0000:0000:0005'}]
+ StorageMgmtVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:4000:0000:0000:0000:0005'}]
+ RedisVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0006'}]
ControllerIPs:
external:
- 2001:db8:fd00:1000:0000:0000:0000:0007
diff --git a/environments/external-loadbalancer-vip.yaml b/environments/external-loadbalancer-vip.yaml
index 8656ba1a..1759c04c 100644
--- a/environments/external-loadbalancer-vip.yaml
+++ b/environments/external-loadbalancer-vip.yaml
@@ -1,10 +1,4 @@
resource_registry:
- OS::TripleO::Network::Ports::NetVipMap: ../network/ports/net_vip_map_external.yaml
- OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/from_service.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
@@ -18,13 +12,12 @@ parameter_defaults:
# to control your VIPs (currently one per network)
# NOTE: we will eventually move to one VIP per service
#
- ControlPlaneIP: 192.0.2.251
- ExternalNetworkVip: 10.0.0.251
- InternalApiNetworkVip: 172.16.2.251
- StorageNetworkVip: 172.16.1.251
- StorageMgmtNetworkVip: 172.16.3.251
- ServiceVips:
- redis: 172.16.2.252
+ ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+ PublicVirtualFixedIPs: [{'ip_address':'10.0.0.251'}]
+ InternalApiVirtualFixedIPs: [{'ip_address':'172.16.2.251'}]
+ StorageVirtualFixedIPs: [{'ip_address':'172.16.1.251'}]
+ StorageMgmtVirtualFixedIPs: [{'ip_address':'172.16.3.251'}]
+ RedisVirtualFixedIPs: [{'ip_address':'172.16.2.252'}]
ControllerIPs:
external:
- 10.0.0.253
diff --git a/environments/horizon_password_validation.yaml b/environments/horizon_password_validation.yaml
new file mode 100644
index 00000000..1a0f92cc
--- /dev/null
+++ b/environments/horizon_password_validation.yaml
@@ -0,0 +1,5 @@
+# Use this enviroment to pass in validation regex for horizons password
+# validation checks
+parameter_defaults:
+ HorizonPasswordValidator: '.*'
+ HorizonPasswordValidatorHelp: 'Your password does not meet the requirements.'
diff --git a/environments/host-config-pre-network.j2.yaml b/environments/host-config-pre-network.j2.yaml
new file mode 100644
index 00000000..fe1302b5
--- /dev/null
+++ b/environments/host-config-pre-network.j2.yaml
@@ -0,0 +1,16 @@
+resource_registry:
+# Create the registry only for roles with the word "Compute" in it. Like ComputeOvsDpdk, ComputeSriov, etc.,
+{% for role in roles %}
+{% if "Compute" in role.name %}
+ OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/{{role.name.lower()}}-host_config_and_reboot.yaml
+{% endif %}
+{% endfor %}
+
+parameter_defaults:
+ # Sample parameters for Compute and ComputeOvsDpdk roles
+ #ComputeKernelArgs: ""
+ #ComputeTunedProfileName: ""
+ #ComputeHostCpuList: ""
+ #ComputeOvsDpdkKernelArgs: ""
+ #ComputeOvsDpdkTunedProfileName: ""
+ #ComputeOvsDpdkHostCpuList: ""
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml
index cee4ae4a..77fa5a49 100644
--- a/environments/hyperconverged-ceph.yaml
+++ b/environments/hyperconverged-ceph.yaml
@@ -5,7 +5,24 @@ resource_registry:
parameter_defaults:
ComputeServices:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephClient
+ - OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::ComputeNeutronCorePlugin
+ - OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::ComputeCeilometerAgent
+ - OS::TripleO::Services::ComputeNeutronL3Agent
+ - OS::TripleO::Services::ComputeNeutronMetadataAgent
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::OpenDaylightOvs
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::CephOSD
-
-parameter_merge_strategies:
- ComputeServices: merge \ No newline at end of file
diff --git a/environments/low-memory-usage.yaml b/environments/low-memory-usage.yaml
index ad428686..47b2003d 100644
--- a/environments/low-memory-usage.yaml
+++ b/environments/low-memory-usage.yaml
@@ -13,3 +13,6 @@ parameter_defaults:
ApacheMaxRequestWorkers: 32
ApacheServerLimit: 32
+
+ ControllerExtraConfig:
+ 'nova::network::neutron::neutron_url_timeout': '60'
diff --git a/environments/major-upgrade-all-in-one.yaml b/environments/major-upgrade-all-in-one.yaml
new file mode 100644
index 00000000..69d72edd
--- /dev/null
+++ b/environments/major-upgrade-all-in-one.yaml
@@ -0,0 +1,8 @@
+# We run the upgrade steps without disabling the OS::TripleO::PostDeploySteps
+# this means you can do a major upgrade in one pass, which may be useful
+# e.g for all-in-one deployments where we can upgrade the compute services
+# at the same time as the controlplane
+# Note that it will be necessary to pass a mapping of OS::Heat::None again for
+# any subsequent updates, or the upgrade steps will run again.
+resource_registry:
+ OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml
diff --git a/environments/major-upgrade-composable-steps.yaml b/environments/major-upgrade-composable-steps.yaml
new file mode 100644
index 00000000..7e10014b
--- /dev/null
+++ b/environments/major-upgrade-composable-steps.yaml
@@ -0,0 +1,3 @@
+resource_registry:
+ OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml
+ OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/manila-cephfsnative-config.yaml b/environments/manila-cephfsnative-config.yaml
index 825a5066..5632d8d6 100644
--- a/environments/manila-cephfsnative-config.yaml
+++ b/environments/manila-cephfsnative-config.yaml
@@ -1,11 +1,11 @@
# A Heat environment file which can be used to enable a
# a Manila CephFS Native driver backend.
resource_registry:
- OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
- OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
# Only manila-share is pacemaker managed:
- OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
- OS::Tripleo::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
parameter_defaults:
diff --git a/environments/manila-generic-config.yaml b/environments/manila-generic-config.yaml
index 9344bc6e..65884a94 100644
--- a/environments/manila-generic-config.yaml
+++ b/environments/manila-generic-config.yaml
@@ -1,10 +1,10 @@
# This environment file enables Manila with the Generic backend.
resource_registry:
- OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
- OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
# Only manila-share is pacemaker managed:
- OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
- OS::Tripleo::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml
parameter_defaults:
ManilaServiceInstanceUser: ''
diff --git a/environments/manila-netapp-config.yaml b/environments/manila-netapp-config.yaml
index 3dadfe5d..7eb14941 100644
--- a/environments/manila-netapp-config.yaml
+++ b/environments/manila-netapp-config.yaml
@@ -1,10 +1,10 @@
# This environment file enables Manila with the Netapp backend.
resource_registry:
- OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
- OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
# Only manila-share is pacemaker managed:
- OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
- OS::Tripleo::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
+ OS::TripleO::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
parameter_defaults:
ManilaNetappBackendName: tripleo_netapp
diff --git a/environments/network-environment.yaml b/environments/network-environment.yaml
index d0fc9ec6..796eb806 100644
--- a/environments/network-environment.yaml
+++ b/environments/network-environment.yaml
@@ -43,14 +43,19 @@ parameter_defaults:
ExternalInterfaceDefaultRoute: 10.0.0.1
# Uncomment if using the Management Network (see network-management.yaml)
# ManagementNetCidr: 10.0.1.0/24
- # ManagementAllocationPools: [{'start': '10.0.1.10', 'end', '10.0.1.50'}]
+ # ManagementAllocationPools: [{'start': '10.0.1.10', 'end': '10.0.1.50'}]
# Use either this parameter or ControlPlaneDefaultRoute in the NIC templates
# ManagementInterfaceDefaultRoute: 10.0.1.1
# Define the DNS servers (maximum 2) for the overcloud nodes
DnsServers: ["8.8.8.8","8.8.4.4"]
# Set to empty string to enable multiple external networks or VLANs
NeutronExternalNetworkBridge: "''"
+ # List of Neutron network types for tenant networks (will be used in order)
+ NeutronNetworkType: 'vxlan,vlan'
# The tunnel type for the tenant network (vxlan or gre). Set to '' to disable tunneling.
NeutronTunnelTypes: 'vxlan'
+ # Neutron VLAN ranges per network, for example 'datacentre:1:499,tenant:500:1000':
+ NeutronNetworkVLANRanges: 'datacentre:1:1000'
# Customize bonding options, e.g. "mode=4 lacp_rate=1 updelay=1000 miimon=100"
+ # for Linux bonds w/LACP, or "bond_mode=active-backup" for OVS active/backup.
BondInterfaceOvsOptions: "bond_mode=active-backup"
diff --git a/environments/network-isolation-no-tunneling.yaml b/environments/network-isolation-no-tunneling.yaml
index 5d2a915b..ff1d7887 100644
--- a/environments/network-isolation-no-tunneling.yaml
+++ b/environments/network-isolation-no-tunneling.yaml
@@ -8,30 +8,54 @@ resource_registry:
OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
OS::TripleO::Network::Storage: ../network/storage.yaml
+ OS::TripleO::Network::Tenant: ../network/noop.yaml
+ # Management network is optional and disabled by default.
+ # To enable it, include environments/network-management.yaml
+ #OS::TripleO::Network::Management: ../network/management.yaml
+
+ # Port assignments for the VIPs
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
# Port assignments for the controller role
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Controller::Ports::TenantPort: ../network/ports/noop.yaml
+ #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
# Port assignments for the compute role
+ OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
+ OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::TenantPort: ../network/ports/noop.yaml
+ #OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
# Port assignments for the ceph storage role
+ OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
+ #OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
# Port assignments for the swift storage role
+ OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
+ #OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
# Port assignments for the block storage role
+ OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-
- # Port assignments for service virtual IPs for the controller role
- OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml
+ OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
+ #OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml
index 737d7d36..a6b4b8ae 100644
--- a/environments/network-isolation.yaml
+++ b/environments/network-isolation.yaml
@@ -18,8 +18,6 @@ resource_registry:
OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
- # Port assignments for service virtual IPs for the controller role
- OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml
# Port assignments for the controller role
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
@@ -59,4 +57,3 @@ resource_registry:
OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
#OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
diff --git a/environments/neutron-ml2-fujitsu-cfab.yaml b/environments/neutron-ml2-fujitsu-cfab.yaml
new file mode 100644
index 00000000..f14f7ee2
--- /dev/null
+++ b/environments/neutron-ml2-fujitsu-cfab.yaml
@@ -0,0 +1,21 @@
+# A Heat environment file which can be used to enable Fujitsu C-Fabric
+# plugin, configured via puppet
+resource_registry:
+ OS::TripleO::Services::NeutronML2FujitsuCfab: ../puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml
+
+parameter_defaults:
+ # Fixed
+ NeutronMechanismDrivers: ['openvswitch','fujitsu_cfab']
+ NeutronTypeDrivers: 'vlan'
+ NeutronNetworkType: 'vlan'
+
+ # Required
+ NeutronFujitsuCfabAddress: '192.168.0.1'
+ NeutronFujitsuCfabUserName: 'admin'
+ NeutronFujitsuCfabPassword:
+
+ # Optional
+ #NeutronFujitsuCfabPhysicalNetworks:
+ #NeutronFujitsuCfabSharePprofile:
+ #NeutronFujitsuCfabPprofilePrefix:
+ #NeutronFujitsuCfabSaveConfig:
diff --git a/environments/neutron-ml2-fujitsu-fossw.yaml b/environments/neutron-ml2-fujitsu-fossw.yaml
new file mode 100644
index 00000000..8db8da75
--- /dev/null
+++ b/environments/neutron-ml2-fujitsu-fossw.yaml
@@ -0,0 +1,22 @@
+# A Heat environment file which can be used to enable Fujitsu fossw
+# plugin, configured via puppet
+resource_registry:
+ OS::TripleO::Services::NeutronML2FujitsuFossw: ../puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml
+
+parameter_defaults:
+ # Fixed
+ NeutronMechanismDrivers: ['openvswitch','fujitsu_fossw']
+ NeutronTypeDrivers: ['vlan','vxlan']
+ NeutronNetworkType: ['vlan','vxlan']
+
+ # Required
+ NeutronFujitsuFosswIps: '192.168.0.1,192.168.0.2'
+ NeutronFujitsuFosswUserName:
+ NeutronFujitsuFosswPassword:
+
+ # Optional
+ #NeutronFujitsuFosswPort:
+ #NeutronFujitsuFosswTimeout:
+ #NeutronFujitsuFosswUdpDestPort:
+ #NeutronFujitsuFosswOvsdbVlanidRangeMin:
+ #NeutronFujitsuFosswOvsdbPort:
diff --git a/environments/neutron-ml2-ovn.yaml b/environments/neutron-ml2-ovn.yaml
index 821ad0c2..3da560c8 100644
--- a/environments/neutron-ml2-ovn.yaml
+++ b/environments/neutron-ml2-ovn.yaml
@@ -5,10 +5,13 @@ resource_registry:
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginML2OVN
OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-ovn.yaml
+# Disabling Neutron services that overlap with OVN
+ OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::OVNDBs: ../puppet/services/ovn-dbs.yaml
parameter_defaults:
NeutronMechanismDrivers: ovn
- OVNDbHost: '0.0.0.0'
OVNSouthboundServerPort: 6642
OVNNorthboundServerPort: 6641
OVNDbConnectionTimeout: 60
@@ -16,3 +19,4 @@ parameter_defaults:
OVNNeutronSyncMode: log
OVNQosDriver: ovn-qos
OVNTunnelEncapType: geneve
+ NeutronEnableDHCPAgent: false
diff --git a/environments/neutron-nuage-config.yaml b/environments/neutron-nuage-config.yaml
index e157ae35..74899246 100644
--- a/environments/neutron-nuage-config.yaml
+++ b/environments/neutron-nuage-config.yaml
@@ -19,7 +19,7 @@ parameter_defaults:
NeutronNuageBaseURIVersion: 'default_uri_version'
NeutronNuageCMSId: ''
UseForwardedFor: true
- NeutronCorePlugin: 'neutron.plugins.nuage.plugin.NuagePlugin'
+ NeutronCorePlugin: 'nuage_neutron.plugins.nuage.plugin.NuagePlugin'
NeutronEnableDHCPAgent: false
NeutronServicePlugins: []
NovaOVSBridge: 'alubr0'
diff --git a/environments/neutron-opendaylight-l3.yaml b/environments/neutron-opendaylight-l3.yaml
deleted file mode 100644
index 00be3048..00000000
--- a/environments/neutron-opendaylight-l3.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-# A Heat environment that can be used to deploy OpenDaylight with L3 DVR
-resource_registry:
- OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
- OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
- OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
- OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
- OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
- OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
-
-parameter_defaults:
- NeutronEnableForceMetadata: true
- NeutronMechanismDrivers: 'opendaylight'
- NeutronServicePlugins: "networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin"
- OpenDaylightEnableL3: "'yes'"
diff --git a/environments/neutron-opendaylight.yaml b/environments/neutron-opendaylight.yaml
index 35c90aab..e08b2b27 100644
--- a/environments/neutron-opendaylight.yaml
+++ b/environments/neutron-opendaylight.yaml
@@ -1,11 +1,13 @@
-# A Heat environment that can be used to deploy OpenDaylight
+# A Heat environment that can be used to deploy OpenDaylight with L3 DVR
resource_registry:
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
parameter_defaults:
NeutronEnableForceMetadata: true
- NeutronMechanismDrivers: 'opendaylight'
+ NeutronMechanismDrivers: 'opendaylight_v2'
+ NeutronServicePlugins: 'odl-router_v2'
diff --git a/environments/neutron-ovs-dvr.yaml b/environments/neutron-ovs-dvr.yaml
index b658d3a5..973cbe16 100644
--- a/environments/neutron-ovs-dvr.yaml
+++ b/environments/neutron-ovs-dvr.yaml
@@ -30,10 +30,15 @@ parameter_defaults:
# affect the agent on the controller node.
NeutronL3AgentMode: 'dvr_snat'
- # L3 HA isn't supported for DVR enabled routers. If upgrading from a system
- # where L3 HA is enabled and has neutron routers configured, it is
- # recommended setting this value to true until such time all routers can be
- # migrated to DVR routers. Once migration of the routers is complete,
- # NeutronL3HA can be returned to false. All new systems should be deployed
- # with NeutronL3HA set to false.
- NeutronL3HA: false
+ # Enabling DVR deploys additional services to the compute nodes that through
+ # normal operation will consume memory. The amount required is roughly
+ # proportional to the number of Neutron routers that will be scheduled to
+ # that host. It is necessary to reserve memory on the compute nodes to avoid
+ # memory issues when creating instances that are connected to routed
+ # networks. The current expected consumption is 50 MB per router in addition
+ # to the base reserved amount. Deployers should refer to existing
+ # documentation, release notes, etc. for additional information on estimating
+ # an appropriate value. The provided value here is based on an estimate of 10
+ # routers and is an example value *only* and should be reviewed and modified
+ # if necessary before deploying.
+ NovaReservedHostMemory: 2560
diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml
index 06e4f7aa..5f8b02ad 100644
--- a/environments/puppet-ceph-external.yaml
+++ b/environments/puppet-ceph-external.yaml
@@ -30,5 +30,8 @@ parameter_defaults:
# finally we disable the Cinder LVM backend
CinderEnableIscsiBackend: false
+ # Uncomment if connecting to a pre-Jewel or RHCS1.3 Ceph Cluster
+ # RbdDefaultFeatures: 1
+
# Backward compatibility setting, will be removed in the future
CephAdminKey: ''
diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml
index 8cfbab6d..da607a72 100644
--- a/environments/puppet-pacemaker.yaml
+++ b/environments/puppet-pacemaker.yaml
@@ -1,7 +1,6 @@
# An environment which enables configuration of an
# Overcloud controller with Pacemaker.
resource_registry:
- OS::TripleO::ControllerConfig: ../puppet/controller-config-pacemaker.yaml
OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
@@ -13,6 +12,10 @@ resource_registry:
OS::TripleO::Services::RabbitMQ: ../puppet/services/pacemaker/rabbitmq.yaml
OS::TripleO::Services::HAproxy: ../puppet/services/pacemaker/haproxy.yaml
OS::TripleO::Services::Pacemaker: ../puppet/services/pacemaker.yaml
+ OS::TripleO::Services::PacemakerRemote: ../puppet/services/pacemaker_remote.yaml
OS::TripleO::Services::Redis: ../puppet/services/pacemaker/database/redis.yaml
OS::TripleO::Services::MySQL: ../puppet/services/pacemaker/database/mysql.yaml
# Services that are disabled by default (use relevant environment files):
+
+ # Services that are disabled for HA deployments with pacemaker
+ OS::TripleO::Services::Keepalived: OS::Heat::None
diff --git a/environments/services/barbican.yaml b/environments/services/barbican.yaml
new file mode 100644
index 00000000..1735646a
--- /dev/null
+++ b/environments/services/barbican.yaml
@@ -0,0 +1,4 @@
+# A Heat environment file which can be used to enable
+# Barbican with the default secret store backend.
+resource_registry:
+ OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml
diff --git a/environments/services/ceph-mds.yaml b/environments/services/ceph-mds.yaml
new file mode 100644
index 00000000..2b51374c
--- /dev/null
+++ b/environments/services/ceph-mds.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml \ No newline at end of file
diff --git a/environments/services/ceph-rbdmirror.yaml b/environments/services/ceph-rbdmirror.yaml
new file mode 100644
index 00000000..b350e4c5
--- /dev/null
+++ b/environments/services/ceph-rbdmirror.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::CephRbdMirror: ../../puppet/services/pacemaker/ceph-rbdmirror.yaml
diff --git a/environments/services/disable-ceilometer-api.yaml b/environments/services/disable-ceilometer-api.yaml
new file mode 100644
index 00000000..94cd8d5d
--- /dev/null
+++ b/environments/services/disable-ceilometer-api.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::CeilometerApi: OS::Heat::None
diff --git a/environments/services/ec2-api.yaml b/environments/services/ec2-api.yaml
new file mode 100644
index 00000000..d751ba23
--- /dev/null
+++ b/environments/services/ec2-api.yaml
@@ -0,0 +1,3 @@
+# A Heat environment file which can be used to enable EC2-API service.
+resource_registry:
+ OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml
diff --git a/environments/services/etcd.yaml b/environments/services/etcd.yaml
new file mode 100644
index 00000000..08d54d58
--- /dev/null
+++ b/environments/services/etcd.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::Etcd: ../../puppet/services/etcd.yaml
diff --git a/environments/services/panko.yaml b/environments/services/panko.yaml
new file mode 100644
index 00000000..28bf99f6
--- /dev/null
+++ b/environments/services/panko.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
diff --git a/environments/services/zaqar.yaml b/environments/services/zaqar.yaml
new file mode 100644
index 00000000..ee137925
--- /dev/null
+++ b/environments/services/zaqar.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml
diff --git a/environments/sshd-banner.yaml b/environments/sshd-banner.yaml
new file mode 100644
index 00000000..041c0990
--- /dev/null
+++ b/environments/sshd-banner.yaml
@@ -0,0 +1,13 @@
+resource_registry:
+ OS::TripleO::Services::Sshd: ../puppet/services/sshd.yaml
+
+parameter_defaults:
+ BannerText: |
+ ******************************************************************
+ * This system is for the use of authorized users only. Usage of *
+ * this system may be monitored and recorded by system personnel. *
+ * Anyone using this system expressly consents to such monitoring *
+ * and is advised that if such monitoring reveals possible *
+ * evidence of criminal activity, system personnel may provide *
+ * the evidence from such monitoring to law enforcement officials.*
+ ******************************************************************
diff --git a/environments/storage-environment.yaml b/environments/storage-environment.yaml
index 8cf34622..8e02c300 100644
--- a/environments/storage-environment.yaml
+++ b/environments/storage-environment.yaml
@@ -34,18 +34,18 @@ parameter_defaults:
# CinderNfsServers: ''
- #### GLANCE FILE BACKEND PACEMAKER SETTINGS (used for mounting NFS) ####
+ #### GLANCE NFS SETTINGS ####
- ## Whether to make Glance 'file' backend a mount managed by Pacemaker
- # GlanceFilePcmkManage: false
- ## File system type of the mount
- # GlanceFilePcmkFstype: nfs
- ## Pacemaker mount point, e.g. '192.168.122.1:/export/glance' for NFS
- ## (If using IPv6, use both double- and single-quotes,
- ## e.g. "'[fdd0::1]:/export/glance'")
- # GlanceFilePcmkDevice: ''
- ## Options for the mount managed by Pacemaker
- # GlanceFilePcmkOptions: ''
+ ## Make sure to set `GlanceBackend: file` when enabling NFS
+ ##
+ ## Whether to make Glance 'file' backend a NFS mount
+ # GlanceNfsEnabled: false
+ ## NFS share for image storage, e.g. '192.168.122.1:/export/glance'
+ ## (If using IPv6, use both double- and single-quotes,
+ ## e.g. "'[fdd0::1]:/export/glance'")
+ # GlanceNfsShare: ''
+ ## Mount options for the NFS image storage mount point
+ # GlanceNfsOptions: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
#### CEPH SETTINGS ####
diff --git a/environments/tls-endpoints-public-dns.yaml b/environments/tls-endpoints-public-dns.yaml
index f94a7726..a02c479a 100644
--- a/environments/tls-endpoints-public-dns.yaml
+++ b/environments/tls-endpoints-public-dns.yaml
@@ -5,6 +5,9 @@ parameter_defaults:
AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
@@ -14,10 +17,12 @@ parameter_defaults:
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
- GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
@@ -47,12 +52,27 @@ parameter_defaults:
NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
diff --git a/environments/tls-endpoints-public-ip.yaml b/environments/tls-endpoints-public-ip.yaml
index eb2a23b4..bf4d4f41 100644
--- a/environments/tls-endpoints-public-ip.yaml
+++ b/environments/tls-endpoints-public-ip.yaml
@@ -5,6 +5,9 @@ parameter_defaults:
AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
AodhPublic: {protocol: 'https', port: '13042', host: 'IP_ADDRESS'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'IP_ADDRESS'}
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'}
@@ -14,10 +17,12 @@ parameter_defaults:
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'IP_ADDRESS'}
GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlancePublic: {protocol: 'https', port: '13292', host: 'IP_ADDRESS'}
- GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiPublic: {protocol: 'https', port: '13041', host: 'IP_ADDRESS'}
@@ -47,12 +52,27 @@ parameter_defaults:
NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaPublic: {protocol: 'https', port: '13774', host: 'IP_ADDRESS'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'IP_ADDRESS'}
NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'IP_ADDRESS'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
+ PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+ ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'IP_ADDRESS'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'IP_ADDRESS'}
diff --git a/environments/tls-everywhere-endpoints-dns.yaml b/environments/tls-everywhere-endpoints-dns.yaml
index c3fbaf49..6193dde5 100644
--- a/environments/tls-everywhere-endpoints-dns.yaml
+++ b/environments/tls-everywhere-endpoints-dns.yaml
@@ -5,6 +5,9 @@ parameter_defaults:
AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
@@ -14,10 +17,12 @@ parameter_defaults:
CinderAdmin: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
CinderInternal: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ Ec2ApiAdmin: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiInternal: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
GlanceAdmin: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
GlanceInternal: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
- GlanceRegistryInternal: {protocol: 'https', port: '9191', host: 'CLOUDNAME'}
GnocchiAdmin: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
GnocchiInternal: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
@@ -47,12 +52,27 @@ parameter_defaults:
NovaAdmin: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
NovaInternal: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementInternal: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
NovaVNCProxyAdmin: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
NovaVNCProxyInternal: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+ PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
SwiftAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
SwiftInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ ZaqarAdmin: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+ ZaqarInternal: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketInternal: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
diff --git a/environments/undercloud.yaml b/environments/undercloud.yaml
new file mode 100644
index 00000000..0fd01920
--- /dev/null
+++ b/environments/undercloud.yaml
@@ -0,0 +1,18 @@
+resource_registry:
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/noop.yaml
+ OS::TripleO::Network::Ports::ControlPlaneVipPort: ../deployed-server/deployed-neutron-port.yaml
+ OS::TripleO::Undercloud::Net::SoftwareConfig: ../net-config-undercloud.yaml
+ OS::TripleO::NodeExtraConfigPost: ../extraconfig/post_deploy/undercloud_post.yaml
+
+parameter_defaults:
+ StackAction: CREATE
+ SoftwareConfigTransport: POLL_SERVER_HEAT
+ NeutronTunnelTypes: []
+ NeutronBridgeMappings: ctlplane:br-ctlplane
+ NeutronAgentExtensions: []
+ NeutronFlatNetworks: '*'
+ NovaSchedulerAvailableFilters: 'tripleo_common.filters.list.tripleo_filters'
+ NovaSchedulerDefaultFilters: ['RetryFilter', 'TripleOCapabilitiesFilter', 'ComputeCapabilitiesFilter', 'AvailabilityZoneFilter', 'RamFilter', 'DiskFilter', 'ComputeFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
+ NeutronDhcpAgentsPerNetwork: 2
+ HeatConvergenceEngine: false
+ HeatMaxResourcesPerStack: -1
diff --git a/environments/updates/README.md b/environments/updates/README.md
index 426d7329..93714ed8 100644
--- a/environments/updates/README.md
+++ b/environments/updates/README.md
@@ -10,3 +10,6 @@ Contents
**update-from-publicvip-on-ctlplane.yaml**
To be used if the PublicVirtualIP resource was deployed as an additional VIP on the 'ctlplane'.
+
+**update-from-deloyed-server-newton.yaml**
+ To be used when updating from the deployed-server template from Newton.
diff --git a/environments/updates/update-from-deployed-server-newton.yaml b/environments/updates/update-from-deployed-server-newton.yaml
new file mode 100644
index 00000000..6fe3a4cb
--- /dev/null
+++ b/environments/updates/update-from-deployed-server-newton.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::DeployedServer::ControlPlanePort: ../../deployed-server/ctlplane-port.yaml
diff --git a/environments/updates/update-from-keystone-admin-internal-api.yaml b/environments/updates/update-from-keystone-admin-internal-api.yaml
index a5075300..97687c6a 100644
--- a/environments/updates/update-from-keystone-admin-internal-api.yaml
+++ b/environments/updates/update-from-keystone-admin-internal-api.yaml
@@ -2,32 +2,5 @@
# Keystone Admin API service is running on the Internal API network
parameter_defaults:
- ServiceNetMapDefaults:
- NeutronTenantNetwork: tenant
- CeilometerApiNetwork: internal_api
- MongodbNetwork: internal_api
- CinderApiNetwork: internal_api
- CinderIscsiNetwork: storage
- GlanceApiNetwork: storage
- GlanceRegistryNetwork: internal_api
+ ServiceNetMap:
KeystoneAdminApiNetwork: internal_api
- KeystonePublicApiNetwork: internal_api
- NeutronApiNetwork: internal_api
- HeatApiNetwork: internal_api
- NovaApiNetwork: internal_api
- NovaMetadataNetwork: internal_api
- NovaVncProxyNetwork: internal_api
- SwiftMgmtNetwork: storage_mgmt
- SwiftProxyNetwork: storage
- HorizonNetwork: internal_api
- MemcachedNetwork: internal_api
- RabbitmqNetwork: internal_api
- RedisNetwork: internal_api
- MysqlNetwork: internal_api
- CephClusterNetwork: storage_mgmt
- CephPublicNetwork: storage
- ControllerHostnameResolveNetwork: internal_api
- ComputeHostnameResolveNetwork: internal_api
- BlockStorageHostnameResolveNetwork: internal_api
- ObjectStorageHostnameResolveNetwork: internal_api
- CephStorageHostnameResolveNetwork: storage
diff --git a/environments/use-dns-for-vips.yaml b/environments/use-dns-for-vips.yaml
index daf07bc7..b700312f 100644
--- a/environments/use-dns-for-vips.yaml
+++ b/environments/use-dns-for-vips.yaml
@@ -1,5 +1,5 @@
# A Heat environment file which can be used to disable the writing of the VIPs
# to the /etc/hosts file in the overcloud. Use this in case you have a working
# DNS server that you will provide for the overcloud.
-resource_registry:
- OS::TripleO::Services::VipHosts: OS::Heat::None
+parameter_defaults:
+ AddVipsToEtcHosts: False
diff --git a/extraconfig/all_nodes/mac_hostname.j2.yaml b/extraconfig/all_nodes/mac_hostname.j2.yaml
index 75ffc9e6..fcf022ae 100644
--- a/extraconfig/all_nodes/mac_hostname.j2.yaml
+++ b/extraconfig/all_nodes/mac_hostname.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Example extra config for cluster config
diff --git a/extraconfig/all_nodes/random_string.j2.yaml b/extraconfig/all_nodes/random_string.j2.yaml
index 9ce2ca8a..77d4b381 100644
--- a/extraconfig/all_nodes/random_string.j2.yaml
+++ b/extraconfig/all_nodes/random_string.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Example extra config for cluster config
diff --git a/extraconfig/all_nodes/swap-partition.j2.yaml b/extraconfig/all_nodes/swap-partition.j2.yaml
index 36076b0c..b6fef79f 100644
--- a/extraconfig/all_nodes/swap-partition.j2.yaml
+++ b/extraconfig/all_nodes/swap-partition.j2.yaml
@@ -1,11 +1,7 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
-description: >
- Extra config to add swap space to nodes.
+description: Template file to add a swap partition to a node.
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
parameters:
servers:
type: json
@@ -14,9 +10,7 @@ parameters:
description: Swap partition label
default: 'swap1'
-
resources:
-
SwapConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -25,8 +19,13 @@ resources:
#!/bin/bash
set -eux
swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label)
- swapon $swap_partition
- echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
+ if [ -f "$swap_partition" ]; then
+ swapon $swap_partition
+ echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
+ else
+ echo "$swap_partition needs to be a valid path"
+ echo "Check that $swap_partition_label is a valid partition label"
+ fi
inputs:
- name: swap_partition_label
description: Swap partition label
diff --git a/extraconfig/all_nodes/swap.j2.yaml b/extraconfig/all_nodes/swap.j2.yaml
index ce65dacb..044f817c 100644
--- a/extraconfig/all_nodes/swap.j2.yaml
+++ b/extraconfig/all_nodes/swap.j2.yaml
@@ -1,11 +1,7 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
-description: >
- Extra config to add swap space to nodes.
+description: Template file to add a swap file to a node.
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
parameters:
servers:
type: json
@@ -18,9 +14,7 @@ parameters:
description: Full path to location of swap file
default: '/swap'
-
resources:
-
SwapConfig:
type: OS::Heat::SoftwareConfig
properties:
diff --git a/extraconfig/nova_metadata/krb-service-principals.yaml b/extraconfig/nova_metadata/krb-service-principals.yaml
new file mode 100644
index 00000000..c66e6460
--- /dev/null
+++ b/extraconfig/nova_metadata/krb-service-principals.yaml
@@ -0,0 +1,84 @@
+heat_template_version: ocata
+description: 'Generates the relevant service principals for a server'
+
+parameters:
+ RoleData:
+ type: json
+ description: the list containing the 'role_data' output for the ServiceChain
+
+ # Coming from parameter_defaults
+ CloudName:
+ default: overcloud.localdomain
+ description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
+ type: string
+ CloudNameInternal:
+ default: overcloud.internalapi.localdomain
+ description: >
+ The DNS name of this cloud's internal API endpoint. E.g.
+ 'ci-overcloud.internalapi.tripleo.org'.
+ type: string
+ CloudNameStorage:
+ default: overcloud.storage.localdomain
+ description: >
+ The DNS name of this cloud's storage endpoint. E.g.
+ 'ci-overcloud.storage.tripleo.org'.
+ type: string
+ CloudNameStorageManagement:
+ default: overcloud.storagemgmt.localdomain
+ description: >
+ The DNS name of this cloud's storage management endpoint. E.g.
+ 'ci-overcloud.storagemgmt.tripleo.org'.
+ type: string
+ CloudNameCtlplane:
+ default: overcloud.ctlplane.localdomain
+ description: >
+ The DNS name of this cloud's storage management endpoint. E.g.
+ 'ci-overcloud.management.tripleo.org'.
+ type: string
+
+resources:
+
+ IncomingMetadataSettings:
+ type: OS::Heat::Value
+ properties:
+ value:
+ yaql:
+ # Filter null values and values that contain don't contain
+ # 'metadata_settings', get the values from that key and get the
+ # unique ones.
+ expression: list($.data.where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct())
+ data: {get_param: RoleData}
+
+ # Generates entries for nova metadata with the following format:
+ # 'managed_service_<id>' : <service>/<fqdn>
+ # Depending on the requested network
+ IndividualServices:
+ type: OS::Heat::Value
+ properties:
+ value:
+ yaql:
+ expression: let(fqdns => $.data.fqdns) -> dict($.data.metadata.where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))]))
+ data:
+ metadata: {get_attr: [IncomingMetadataSettings, value]}
+ fqdns:
+ external: {get_param: CloudName}
+ internal_api: {get_param: CloudNameInternal}
+ storage: {get_param: CloudNameStorage}
+ storage_mgmt: {get_param: CloudNameStorageManagement}
+ ctlplane: {get_param: CloudNameCtlplane}
+
+ CompactServices:
+ type: OS::Heat::Value
+ properties:
+ value:
+ yaql:
+ expression: dict($.data.where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1]))
+ data: {get_attr: [IncomingMetadataSettings, value]}
+
+outputs:
+ metadata:
+ description: actual metadata entries that will be passed to the server.
+ value:
+ map_merge:
+ - {get_attr: [IndividualServices, value]}
+ - compact_services: {get_attr: [CompactServices, value]}
diff --git a/extraconfig/post_deploy/default.yaml b/extraconfig/post_deploy/default.yaml
index ddfe0243..4da54ead 100644
--- a/extraconfig/post_deploy/default.yaml
+++ b/extraconfig/post_deploy/default.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Extra Post Deployment Config'
parameters:
servers:
diff --git a/extraconfig/post_deploy/example.yaml b/extraconfig/post_deploy/example.yaml
index f83dff76..8ac7eb73 100644
--- a/extraconfig/post_deploy/example.yaml
+++ b/extraconfig/post_deploy/example.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Example extra config for post-deployment
diff --git a/extraconfig/post_deploy/example_run_on_update.yaml b/extraconfig/post_deploy/example_run_on_update.yaml
index 234488af..738e263b 100644
--- a/extraconfig/post_deploy/example_run_on_update.yaml
+++ b/extraconfig/post_deploy/example_run_on_update.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Example extra config for post-deployment, this re-runs every update
diff --git a/extraconfig/post_deploy/undercloud_post.sh b/extraconfig/post_deploy/undercloud_post.sh
new file mode 100755
index 00000000..8bcae1d3
--- /dev/null
+++ b/extraconfig/post_deploy/undercloud_post.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+set -eux
+
+ln -sf /etc/puppet/hiera.yaml /etc/hiera.yaml
+
+
+# WRITE OUT STACKRC
+if [ ! -e /root/stackrc ]; then
+ touch /root/stackrc
+ chmod 0600 /root/stackrc
+
+cat >> /root/stackrc <<-EOF_CAT
+export OS_PASSWORD=$admin_password
+export OS_AUTH_URL=$auth_url
+export OS_USERNAME=admin
+export OS_TENANT_NAME=admin
+export COMPUTE_API_VERSION=1.1
+export NOVA_VERSION=1.1
+export OS_BAREMETAL_API_VERSION=1.15
+export OS_NO_CACHE=True
+export OS_CLOUDNAME=undercloud
+EOF_CAT
+
+ if [ -n "$ssl_certificate" ]; then
+cat >> /root/stackrc <<-EOF_CAT
+export PYTHONWARNINGS="ignore:Certificate has no, ignore:A true SSLContext object is not available"
+EOF_CAT
+ fi
+fi
+
+source /root/stackrc
+
+if [ ! -f /root/.ssh/authorized_keys ]; then
+ sudo mkdir -p /root/.ssh
+ sudo chmod 7000 /root/.ssh/
+ sudo touch /root/.ssh/authorized_keys
+ sudo chmod 600 /root/.ssh/authorized_keys
+fi
+
+if [ ! -f /root/.ssh/id_rsa ]; then
+ ssh-keygen -b 1024 -N '' -f /root/.ssh/id_rsa
+fi
+
+if ! grep "$(cat /root/.ssh/id_rsa.pub)" /root/.ssh/authorized_keys; then
+ cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
+fi
+
+PHYSICAL_NETWORK=ctlplane
+
+ctlplane_id=$(openstack network list -f csv -c ID -c Name --quote none | tail -n +2 | grep ctlplane | cut -d, -f1)
+subnet_ids=$(openstack subnet list -f csv -c ID --quote none | tail -n +2)
+subnet_id=
+
+for subnet_id in $subnet_ids; do
+ network_id=$(openstack subnet show -f value -c network_id $subnet_id)
+ if [ "$network_id" = "$ctlplane_id" ]; then
+ break
+ fi
+done
+
+net_create=1
+if [ -n "$subnet_id" ]; then
+ cidr=$(openstack subnet show $subnet_id -f value -c cidr)
+ if [ "$cidr" = "$undercloud_network_cidr" ]; then
+ net_create=0
+ else
+ echo "New cidr $undercloud_network_cidr does not equal old cidr $cidr"
+ echo "Will attempt to delete and recreate subnet $subnet_id"
+ fi
+fi
+
+if [ "$net_create" -eq "1" ]; then
+ # Delete the subnet and network to make sure it doesn't already exist
+ if openstack subnet list | grep start; then
+ openstack subnet delete $(openstack subnet list | grep start | awk '{print $4}')
+ fi
+ if openstack network show ctlplane; then
+ openstack network delete ctlplane
+ fi
+
+
+ NETWORK_ID=$(openstack network create --provider-network-type=flat --provider-physical-network=ctlplane ctlplane | grep " id " | awk '{print $4}')
+
+ NAMESERVER_ARG=""
+ if [ -n "${undercloud_nameserver:-}" ]; then
+ NAMESERVER_ARG="--dns-nameserver $undercloud_nameserver"
+ fi
+
+ openstack subnet create --network=$NETWORK_ID \
+ --gateway=$undercloud_network_gateway \
+ --subnet-range=$undercloud_network_cidr \
+ --allocation-pool start=$undercloud_dhcp_start,end=$undercloud_dhcp_end \
+ --host-route destination=169.254.169.254/32,gateway=$local_ip \
+ $NAMESERVER_ARG ctlplane
+fi
+
+# Disable nova quotas
+openstack quota set --cores -1 --instances -1 --ram -1 $(openstack project show admin | awk '$2=="id" {print $4}')
+
+# MISTRAL WORKFLOW CONFIGURATION
+if [ "$(hiera mistral_api_enabled)" = "true" ]; then
+ # load workflows
+ for workbook in $(openstack workbook list | grep tripleo | cut -f 2 -d ' '); do
+ openstack workbook delete $workbook
+ done
+ for workflow in $(openstack workflow list | grep tripleo | cut -f 2 -d ' '); do
+ openstack workflow delete $workflow
+ done
+ for workbook in $(ls /usr/share/openstack-tripleo-common/workbooks/*); do
+ openstack workbook create $workbook
+ done
+
+ # Store the SNMP password in a mistral environment
+ if ! openstack workflow env show tripleo.undercloud-config &>/dev/null; then
+ TMP_MISTRAL_ENV=$(mktemp)
+ echo "{\"name\": \"tripleo.undercloud-config\", \"variables\": {\"undercloud_ceilometer_snmpd_password\": \"$snmp_readonly_user_password\"}}" > $TMP_MISTRAL_ENV
+ openstack workflow env create $TMP_MISTRAL_ENV
+ fi
+
+fi
+
+# IP forwarding is needed to allow the overcloud nodes access to the outside
+# internet in cases where they are on an isolated network.
+sysctl -w net.ipv4.ip_forward=1
+# Make it persistent
+echo "net.ipv4.ip_forward=1" > /etc/sysctl.d/ip-forward.conf
diff --git a/extraconfig/post_deploy/undercloud_post.yaml b/extraconfig/post_deploy/undercloud_post.yaml
new file mode 100644
index 00000000..38a9181e
--- /dev/null
+++ b/extraconfig/post_deploy/undercloud_post.yaml
@@ -0,0 +1,93 @@
+heat_template_version: ocata
+
+description: >
+ Post-deployment for the TripleO undercloud
+
+parameters:
+ servers:
+ type: json
+ DeployedServerPortMap:
+ default: {}
+ type: json
+ UndercloudDhcpRangeStart:
+ type: string
+ default: '192.168.24.5'
+ UndercloudDhcpRangeEnd:
+ type: string
+ default: '192.168.24.24'
+ UndercloudNetworkCidr:
+ type: string
+ default: '192.168.24.0/24'
+ UndercloudNetworkGateway:
+ type: string
+ default: '192.168.24.1'
+ UndercloudNameserver:
+ type: string
+ default: ''
+ AdminPassword: #supplied by tripleo-undercloud-passwords.yaml
+ type: string
+ description: The password for the keystone admin account, used for monitoring, querying neutron etc.
+ hidden: True
+ SSLCertificate:
+ description: >
+ The content of the SSL certificate (without Key) in PEM format.
+ type: string
+ default: ""
+ hidden: True
+ SnmpdReadonlyUserPassword:
+ description: The user password for SNMPd with readonly rights running on all Overcloud nodes
+ type: string
+ hidden: true
+
+conditions:
+
+ ssl_disabled: {equals : [{get_param: SSLCertificate}, ""]}
+
+resources:
+
+ UndercloudPostConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: deploy_identifier
+ - name: local_ip
+ - name: undercloud_dhcp_start
+ - name: undercloud_dhcp_end
+ - name: undercloud_network_cidr
+ - name: undercloud_network_gateway
+ - name: undercloud_nameserver
+ - name: admin_password
+ - name: auth_url
+ - name: snmp_readonly_user_password
+ config: {get_file: ./undercloud_post.sh}
+
+ UndercloudPostDeployment:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: UndercloudPostConfig}
+ input_values:
+ local_ip: {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]}
+ undercloud_dhcp_start: {get_param: UndercloudDhcpRangeStart}
+ undercloud_dhcp_end: {get_param: UndercloudDhcpRangeEnd}
+ undercloud_network_cidr: {get_param: UndercloudNetworkCidr}
+ undercloud_network_gateway: {get_param: UndercloudNetworkGateway}
+ undercloud_nameserver: {get_param: UndercloudNameserver}
+ ssl_certificate: {get_param: SSLCertificate}
+ admin_password: {get_param: AdminPassword}
+ snmp_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
+ # if SSL is enabled we use the public virtual ip as the stackrc endpoint
+ auth_url:
+ if:
+ - ssl_disabled
+ - list_join:
+ - ''
+ - - 'http://'
+ - {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]}
+ - ':5000/v2.0'
+ - list_join:
+ - ''
+ - - 'https://'
+ - {get_param: [DeployedServerPortMap, 'public_virtual_ip', fixed_ips, 0, ip_address]}
+ - ':13000/v2.0'
diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
index 7c65bd8b..fdf2e957 100644
--- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
RHEL Registration and unregistration software deployments.
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
index 1c9acd2b..2650a967 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
@@ -11,6 +11,7 @@ if [ -e $OK ] ; then
exit 0
fi
+retryCount=0
opts=
attach_opts=
sat5_opts=
@@ -96,12 +97,28 @@ if [ -n "${REG_TYPE:-}" ]; then
opts="$opts --type=$REG_TYPE"
fi
+function retry() {
+ if [[ $retryCount < 3 ]]; then
+ $@
+ if ! [[ $? == 0 ]]; then
+ retryCount=$(echo $retryCount + 1 | bc)
+ echo "WARN: Failed to connect when running '$@', retrying..."
+ retry $@
+ else
+ retryCount=0
+ fi
+ else
+ echo "ERROR: Failed to connect after 3 attempts when running '$@'"
+ exit 1
+ fi
+}
+
function detect_satellite_version {
ping_api=$REG_SAT_URL/katello/api/ping
- if curl -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
+ if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
echo Satellite 6 detected at $REG_SAT_URL
satellite_version=6
- elif curl -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
echo Satellite 5 detected at $REG_SAT_URL
satellite_version=5
else
@@ -112,28 +129,29 @@ function detect_satellite_version {
case "${REG_METHOD:-}" in
portal)
- subscription-manager register $opts
+ retry subscription-manager register $opts
if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then
- subscription-manager attach $attach_opts
+ retry subscription-manager attach $attach_opts
fi
- subscription-manager $repos
+ retry subscription-manager repos --disable '*'
+ retry subscription-manager $repos
;;
satellite)
detect_satellite_version
if [ "$satellite_version" = "6" ]; then
repos="$repos --enable ${satellite_repo}"
- curl -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
- subscription-manager register $opts
- subscription-manager $repos
- yum install -y katello-agent || true # needed for errata reporting to satellite6
+ retry subscription-manager register $opts
+ retry subscription-manager $repos
+ retry yum install -y katello-agent || true # needed for errata reporting to satellite6
katello-package-upload
- subscription-manager repos --disable ${satellite_repo}
+ retry subscription-manager repos --disable ${satellite_repo}
else
pushd /usr/share/rhn/
- curl -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
popd
- rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
+ retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
fi
;;
disable)
diff --git a/extraconfig/pre_network/ansible_host_config.ansible b/extraconfig/pre_network/ansible_host_config.ansible
new file mode 100644
index 00000000..c126c1a1
--- /dev/null
+++ b/extraconfig/pre_network/ansible_host_config.ansible
@@ -0,0 +1,58 @@
+---
+- name: Configuration to be applied before rebooting the node
+ connection: local
+ hosts: localhost
+
+ tasks:
+ # Kernel Args Configuration
+ - block:
+ - name: Ensure the kernel args ( {{ _KERNEL_ARGS_ }} ) is present as TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS
+ lineinfile:
+ dest: /etc/default/grub
+ regexp: '^TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS.*'
+ insertafter: '^GRUB_CMDLINE_LINUX.*'
+ line: 'TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS=" {{ _KERNEL_ARGS_ }} "'
+ - name: Add TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS to the GRUB_CMDLINE_LINUX parameter
+ lineinfile:
+ dest: /etc/default/grub
+ line: 'GRUB_CMDLINE_LINUX="${GRUB_CMDLINE_LINUX:+$GRUB_CMDLINE_LINUX }${TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS}"'
+ insertafter: '^TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS.*'
+ - name: Generate grub config file
+ command: grub2-mkconfig -o /boot/grub2/grub.cfg
+ become: true
+ when: _KERNEL_ARGS_|default("") != ""
+
+ # Tune-d Configuration
+ - block:
+ - name: Tune-d Configuration
+ lineinfile:
+ dest: /etc/tuned/cpu-partitioning-variables.conf
+ regexp: '^isolated_cores=.*'
+ line: 'isolated_cores={{ _HOST_CPUS_LIST_ }}'
+ when: _HOST_CPUS_LIST_|default("") != ""
+
+ - name: Tune-d provile activation
+ shell: tuned-adm profile {{ _TUNED_PROFILE_NAME_ }}
+ become: true
+ when: _TUNED_PROFILE_NAME_|default("") != ""
+
+ # Provisioning Network workaround
+ # The script will be executed before os-net-config, in which case, only Provisioning network will have IP
+ # BOOTPROTO of all interface config files (except provisioning), will be set to "none" to avoid reboot failing to acquire IP on other networks
+ - block:
+ - find:
+ paths: /etc/sysconfig/network-scripts/
+ patterns: ifcfg-*
+ register: ifcfg_files
+
+ - replace:
+ dest: "{{ item.path }}"
+ regexp: '^BOOTPROTO=.*'
+ replace: 'BOOTPROTO=none'
+ when:
+ - item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') != "lo"
+ # This condition will list all the interfaces except the one with valid IP (which is Provisioning network at this stage)
+ # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4']['address'] is undefined
+ - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4']['address'] is undefined
+ with_items:
+ - "{{ ifcfg_files.files }}"
diff --git a/extraconfig/pre_network/config_then_reboot.yaml b/extraconfig/pre_network/config_then_reboot.yaml
new file mode 100644
index 00000000..bb0b9511
--- /dev/null
+++ b/extraconfig/pre_network/config_then_reboot.yaml
@@ -0,0 +1,48 @@
+heat_template_version: ocata
+
+description: >
+ Do some configuration, then reboot - sometimes needed for early-boot
+ changes such as modifying kernel configuration
+
+parameters:
+ server:
+ type: string
+
+resources:
+
+ SomeConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ echo "did some config before reboot" > /root/pre-reboot-config
+
+ SomeDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ name: SomeDeployment
+ server: {get_param: server}
+ config: {get_resource: SomeConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+
+ RebootConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ # Stop os-collect-config to avoid any race collecting another
+ # deployment before reboot happens
+ systemctl stop os-collect-config.service
+ /sbin/reboot
+
+ RebootDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: SomeDeployment
+ properties:
+ name: RebootDeployment
+ server: {get_param: server}
+ config: {get_resource: RebootConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+ signal_transport: NO_SIGNAL
diff --git a/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml b/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
new file mode 100644
index 00000000..4ad53cb8
--- /dev/null
+++ b/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
@@ -0,0 +1,100 @@
+heat_template_version: ocata
+
+description: >
+ Do some configuration, then reboot - sometimes needed for early-boot
+ changes such as modifying kernel configuration
+
+parameters:
+ server:
+ type: string
+ {{role}}KernelArgs:
+ type: string
+ default: ""
+ {{role}}TunedProfileName:
+ type: string
+ default: ""
+ {{role}}HostCpusList:
+ type: string
+ default: ""
+
+conditions:
+ param_exists:
+ or:
+ - not:
+ equals:
+ - get_param: {{role}}KernelArgs
+ - ""
+ - not:
+ equals:
+ - get_param: {{role}}TunedProfileName
+ - ""
+
+resources:
+
+ HostParametersConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: param_exists
+ properties:
+ group: ansible
+ inputs:
+ - name: _KERNEL_ARGS_
+ - name: _TUNED_PROFILE_NAME_
+ - name: _HOST_CPUS_LIST_
+ outputs:
+ - name: result
+ config:
+ get_file: ansible_host_config.ansible
+
+ HostParametersDeployment:
+ type: OS::Heat::SoftwareDeployment
+ condition: param_exists
+ properties:
+ name: HostParametersDeployment
+ server: {get_param: server}
+ config: {get_resource: HostParametersConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+ input_values:
+ _KERNEL_ARGS_: {get_param: {{role}}KernelArgs}
+ _TUNED_PROFILE_NAME_: {get_param: {{role}}TunedProfileName}
+ _HOST_CPUS_LIST_: {get_param: {{role}}HostCpusList}
+
+ RebootConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: param_exists
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ # Stop os-collect-config to avoid any race collecting another
+ # deployment before reboot happens
+ systemctl stop os-collect-config.service
+ /sbin/reboot
+
+ RebootDeployment:
+ type: OS::Heat::SoftwareDeployment
+ condition: param_exists
+ depends_on: HostParametersDeployment
+ properties:
+ name: RebootDeployment
+ server: {get_param: server}
+ config: {get_resource: RebootConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+ signal_transport: NO_SIGNAL
+
+outputs:
+ result:
+ value:
+ get_attr: [HostParametersDeployment, result]
+ condition: param_exists
+ stdout:
+ value:
+ get_attr: [HostParametersDeployment, deploy_stdout]
+ condition: param_exists
+ stderr:
+ value:
+ get_attr: [HostParametersDeployment, deploy_stderr]
+ condition: param_exists
+ status_code:
+ value:
+ get_attr: [HostParametersDeployment, deploy_status_code]
+ condition: param_exists
diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh
index f161c049..64c4457e 100644
--- a/extraconfig/tasks/major_upgrade_block_storage.sh
+++ b/extraconfig/tasks/major_upgrade_block_storage.sh
@@ -5,18 +5,7 @@
set -eu
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
yum -y install python-zaqarclient # needed for os-collect-config
yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
index c87e6824..cf5d7a84 100644
--- a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
+++ b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Software-config for ceilometer configuration under httpd during upgrades
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
index e690a383..a745e723 100644
--- a/extraconfig/tasks/major_upgrade_ceph_storage.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -8,7 +8,9 @@ set -o pipefail
UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
-cat > $UPGRADE_SCRIPT << 'ENDOFCAT'
+declare -f special_case_ovs_upgrade_if_needed > $UPGRADE_SCRIPT
+# use >> here so we don't lose the declaration we added above
+cat >> $UPGRADE_SCRIPT << 'ENDOFCAT'
#!/bin/bash
### DO NOT MODIFY THIS FILE
### This file is automatically delivered to the ceph-storage nodes as part of the
@@ -49,19 +51,7 @@ timeout 60 bash -c "while kill -0 ${OSD_PIDS} 2> /dev/null; do
sleep 2;
done"
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
# Update (Ceph to Jewel)
yum -y install python-zaqarclient # needed for os-collect-config
@@ -86,7 +76,7 @@ elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
# If on ext4, we need to enforce lower values for name and namespace len
# or ceph-osd will refuse to start, see: http://tracker.ceph.com/issues/16187
for OSD_ID in $OSD_IDS; do
- OSD_FS=$(findmnt -n -o FSTYPE -T /var/lib/ceph/osd/ceph-${OSD_ID})
+ OSD_FS=$(df -l --output=fstype /var/lib/ceph/osd/ceph-${OSD_ID} | tail -n +2)
if [ ${OSD_FS} = ext4 ]; then
crudini --set /etc/ceph/ceph.conf global osd_max_object_name_len 256
crudini --set /etc/ceph/ceph.conf global osd_max_object_namespace_len 64
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
index b65f6915..8bdff5e7 100755
--- a/extraconfig/tasks/major_upgrade_check.sh
+++ b/extraconfig/tasks/major_upgrade_check.sh
@@ -18,14 +18,8 @@ check_pcsd()
fi
}
-check_disk_for_mysql_dump()
+mysql_need_update()
{
- # Where to backup current database if mysql need to be upgraded
- MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
- MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
- # Spare disk ratio for extra safety
- MYSQL_BACKUP_SIZE_RATIO=1.2
-
# Shall we upgrade mysql data directory during the stack upgrade?
if [ "$mariadb_do_major_upgrade" = "auto" ]; then
ret=$(is_mysql_upgrade_needed)
@@ -40,6 +34,17 @@ check_disk_for_mysql_dump()
else
DO_MYSQL_UPGRADE=1
fi
+}
+
+check_disk_for_mysql_dump()
+{
+ # Where to backup current database if mysql need to be upgraded
+ MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
+ MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
+ # Spare disk ratio for extra safety
+ MYSQL_BACKUP_SIZE_RATIO=1.2
+
+ mysql_need_update
if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh
index 950fe8d5..7a3e1073 100644
--- a/extraconfig/tasks/major_upgrade_compute.sh
+++ b/extraconfig/tasks/major_upgrade_compute.sh
@@ -18,24 +18,16 @@ set -eu
crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+$(declare -f special_case_ovs_upgrade_if_needed)
+special_case_ovs_upgrade_if_needed
yum -y install python-zaqarclient # needed for os-collect-config
yum -y update
+# Due to bug#1640177 we need to restart compute agent
+echo "Restarting openstack ceilometer agent compute"
+systemctl restart openstack-ceilometer-compute
+
ENDOFCAT
# ensure the permissions are OK
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
index 4a10fa7e..080831ab 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
@@ -2,8 +2,6 @@
set -eu
-cluster_sync_timeout=1800
-
check_cluster
check_pcsd
if [[ -n $(is_bootstrap_node) ]]; then
@@ -19,6 +17,11 @@ check_disk_for_mysql_dump
# at the end of this script
if [[ -n $(is_bootstrap_node) ]]; then
STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
+ # We create this empty file if stonith was set to true so we can reenable stonith in step2
+ rm -f /var/tmp/stonith-true
+ if [ $STONITH_STATE == "true" ]; then
+ touch /var/tmp/stonith-true
+ fi
pcs property set stonith-enabled=false
fi
@@ -28,182 +31,6 @@ fi
# services will be restart as there are no other constraints
if [[ -n $(is_bootstrap_node) ]]; then
migrate_full_to_ng_ha
- rabbitmq_mitaka_newton_upgrade
-fi
-
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
- manage_systemd_service stop "${service%%-clone}"
- # So the reason for not reusing check_resource_systemd is that
- # I have observed systemctl is-active returning unknown with at least
- # one service that was stopped (See LP 1627254)
- timeout=600
- tstart=$(date +%s)
- tend=$(( $tstart + $timeout ))
- check_interval=3
- while (( $(date +%s) < $tend )); do
- if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
- echo "$service still active, sleeping $check_interval seconds."
- sleep $check_interval
- else
- # we do not care if it is inactive, unknown or failed as long as it is
- # not running
- break
- fi
-
- done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
-# to support specific upgrade scenario
-
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
- cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
- fi
-
- pcs resource disable redis
- check_resource redis stopped 600
- pcs resource disable rabbitmq
- check_resource rabbitmq stopped 600
- pcs resource disable galera
- check_resource galera stopped 600
- pcs resource disable openstack-cinder-volume
- check_resource openstack-cinder-volume stopped 600
- # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
- pcs resource disable $vip
- check_resource $vip stopped 60
- done
- pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_sync_timeout )) ; then
- echo_error "ERROR: cluster shutdown timed out"
- exit 1
- fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
- echo_error "ERROR: mysql backup dir already exist"
- exit 1
- fi
- mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
+ rabbitmq_newton_ocata_upgrade
fi
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- # Scripts run via heat have no HOME variable set and this confuses
- # mysqladmin
- export HOME=/root
-
- mkdir /var/lib/mysql || /bin/true
- chown mysql:mysql /var/lib/mysql
- chmod 0755 /var/lib/mysql
- restorecon -R /var/lib/mysql/
- mysql_install_db --datadir=/var/lib/mysql --user=mysql
- chown -R mysql:mysql /var/lib/mysql/
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- mysqld_safe --wsrep-new-cluster &
- # We have a populated /root/.my.cnf with root/password here so
- # we need to temporarily rename it because the newly created
- # db is empty and no root password is set
- mv /root/.my.cnf /root/.my.cnf.temporary
- timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
- mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
- mv /root/.my.cnf.temporary /root/.my.cnf
- mysqladmin -u root shutdown
- # The import was successful so we may remove the folder
- rm -r "$MYSQL_BACKUP_DIR"
- fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller. Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $STONITH_STATE == "true" ]; then
- pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
- fi
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
index b3a0098c..6bfe1239 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -2,68 +2,175 @@
set -eu
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
+cluster_sync_timeout=1800
-if [[ -n $(is_bootstrap_node) ]]; then
- pcs cluster start --all
+# After migrating the cluster to HA-NG the services not under pacemaker's control
+# are still up and running. We need to stop them explicitely otherwise during the yum
+# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
+# is going to take a long time because rabbit is down. By having the service stopped
+# systemctl try-restart is a noop
+for service in $(services_to_migrate); do
+ manage_systemd_service stop "${service%%-clone}"
+ # So the reason for not reusing check_resource_systemd is that
+ # I have observed systemctl is-active returning unknown with at least
+ # one service that was stopped (See LP 1627254)
+ timeout=600
tstart=$(date +%s)
- while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_form_timeout )) ; then
- echo_error "ERROR: timed out forming the cluster"
- exit 1
- fi
+ tend=$(( $tstart + $timeout ))
+ check_interval=3
+ while (( $(date +%s) < $tend )); do
+ if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
+ echo "$service still active, sleeping $check_interval seconds."
+ sleep $check_interval
+ else
+ # we do not care if it is inactive, unknown or failed as long as it is
+ # not running
+ break
+ fi
+
done
+done
- if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
- echo_error "ERROR: timed out waiting for cluster to finish transition"
- exit 1
+# In case the mysql package is updated, the database on disk must be
+# upgraded as well. This typically needs to happen during major
+# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
+#
+# Because in-place upgrades are not supported across 2+ major versions
+# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
+# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
+#
+# The default is to determine automatically if upgrade is needed based
+# on mysql package versionning, but this can be overriden manually
+# to support specific upgrade scenario
+
+# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
+# later
+mysql_need_update
+
+if [[ -n $(is_bootstrap_node) ]]; then
+ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
+ cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
fi
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
- pcs resource enable $vip
- check_resource_pacemaker $vip started 60
+ pcs resource disable redis
+ check_resource redis stopped 600
+ pcs resource disable rabbitmq
+ check_resource rabbitmq stopped 600
+ pcs resource disable galera
+ check_resource galera stopped 600
+ pcs resource disable openstack-cinder-volume
+ check_resource openstack-cinder-volume stopped 600
+ # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
+ for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
+ pcs resource disable $vip
+ check_resource $vip stopped 60
done
+ pcs cluster stop --all
fi
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
-if [[ -n $(is_bootstrap_node) ]]; then
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo_error "ERROR galera sync timed out"
- exit 1
- fi
- done
+# Swift isn't controlled by pacemaker
+systemctl_swift stop
+
+tstart=$(date +%s)
+while systemctl is-active pacemaker; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > cluster_sync_timeout )) ; then
+ echo_error "ERROR: cluster shutdown timed out"
+ exit 1
+ fi
+done
+
+# The reason we do an sql dump *and* we move the old dir out of
+# the way is because it gives us an extra level of safety in case
+# something goes wrong during the upgrade. Once the restore is
+# successful we go ahead and remove it. If the directory exists
+# we bail out as it means the upgrade process had issues in the last
+# run.
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
+ echo_error "ERROR: mysql backup dir already exist"
+ exit 1
+ fi
+ mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
+fi
- # Run all the db syncs
- # TODO: check if this can be triggered in puppet and removed from here
- ceilometer-dbsync --config-file=/etc/ceilometer/ceilometer.conf
- cinder-manage db sync
- glance-manage --config-file=/etc/glance/glance-registry.conf db_sync
- heat-manage --config-file /etc/heat/heat.conf db_sync
- keystone-manage db_sync
- neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
- nova-manage db sync
- nova-manage api_db sync
- nova-manage db online_data_migrations
- gnocchi-upgrade
- sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+special_case_ovs_upgrade_if_needed
+
+yum -y install python-zaqarclient # needed for os-collect-config
+yum -y -q update
+
+# We need to ensure at least those two configuration settings, otherwise
+# mariadb 10.1+ won't activate galera replication.
+# wsrep_cluster_address must only be set though, its value does not
+# matter because it's overriden by the galera resource agent.
+cat >> /etc/my.cnf.d/galera.cnf <<EOF
+[mysqld]
+wsrep_on = ON
+wsrep_cluster_address = gcomm://localhost
+EOF
+
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ # Scripts run via heat have no HOME variable set and this confuses
+ # mysqladmin
+ export HOME=/root
+
+ mkdir /var/lib/mysql || /bin/true
+ chown mysql:mysql /var/lib/mysql
+ chmod 0755 /var/lib/mysql
+ restorecon -R /var/lib/mysql/
+ mysql_install_db --datadir=/var/lib/mysql --user=mysql
+ chown -R mysql:mysql /var/lib/mysql/
+
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ mysqld_safe --wsrep-new-cluster &
+ # We have a populated /root/.my.cnf with root/password here so
+ # we need to temporarily rename it because the newly created
+ # db is empty and no root password is set
+ mv /root/.my.cnf /root/.my.cnf.temporary
+ timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
+ mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
+ mv /root/.my.cnf.temporary /root/.my.cnf
+ mysqladmin -u root shutdown
+ # The import was successful so we may remove the folder
+ rm -r "$MYSQL_BACKUP_DIR"
+ fi
+fi
+
+# If we reached here without error we can safely blow away the origin
+# mysql dir from every controller
+
+# TODO: What if the upgrade fails on the bootstrap node, but not on
+# this controller. Data may be lost.
+if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+ rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
+fi
+
+# Let's reset the stonith back to true if it was true, before starting the cluster
+if [[ -n $(is_bootstrap_node) ]]; then
+ if [ -f /var/tmp/stonith-true ]; then
+ pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
+ fi
+ rm -f /var/tmp/stonith-true
fi
+
+# Pin messages sent to compute nodes to kilo, these will be upgraded later
+crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
+# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
+# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
+crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
+# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
+# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
+crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
+# LP: 1615035, required only for M/N upgrade.
+crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
+# LP: 1627450, required only for M/N upgrade
+crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
+
+crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
+
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
index b653c7c7..a3cbd945 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
@@ -2,25 +2,67 @@
set -eu
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
+cluster_form_timeout=600
+cluster_settle_timeout=1800
+galera_sync_timeout=600
+
+if [[ -n $(is_bootstrap_node) ]]; then
+ pcs cluster start --all
+
+ tstart=$(date +%s)
+ while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > cluster_form_timeout )) ; then
+ echo_error "ERROR: timed out forming the cluster"
+ exit 1
+ fi
+ done
+
+ if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
+ echo_error "ERROR: timed out waiting for cluster to finish transition"
+ exit 1
+ fi
+
+ for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
+ pcs resource enable $vip
+ check_resource_pacemaker $vip started 60
+ done
+fi
+
+start_or_enable_service galera
+check_resource galera started 600
start_or_enable_service redis
check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
-
+# We need mongod which is now a systemd service up and running before calling
+# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
+# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
+# we should be good.
+# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
+systemctl start mongod
+check_resource mongod started 600
-# Swift isn't controled by pacemaker
-systemctl_swift start
+if [[ -n $(is_bootstrap_node) ]]; then
+ tstart=$(date +%s)
+ while ! clustercheck; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > galera_sync_timeout )) ; then
+ echo_error "ERROR galera sync timed out"
+ exit 1
+ fi
+ done
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
- services=${services%%openstack-sahara*}
+ # Run all the db syncs
+ # TODO: check if this can be triggered in puppet and removed from here
+ ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
+ cinder-manage db sync
+ glance-manage db_sync
+ heat-manage --config-file /etc/heat/heat.conf db_sync
+ keystone-manage db_sync
+ neutron-db-manage upgrade heads
+ nova-manage db sync
+ nova-manage api_db sync
+ nova-manage db online_data_migrations
+ sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
fi
-for service in $services; do
- manage_systemd_service start "${service%%-clone}"
- check_resource_systemd "${service%%-clone}" started 600
-done
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
new file mode 100755
index 00000000..d2cb9553
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -eu
+
+start_or_enable_service rabbitmq
+check_resource rabbitmq started 600
+start_or_enable_service redis
+check_resource redis started 600
+start_or_enable_service openstack-cinder-volume
+check_resource openstack-cinder-volume started 600
+
+# start httpd so keystone is available for gnocchi
+# upgrade to run.
+systemctl start httpd
+
+# Swift isn't controled by pacemaker
+systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
new file mode 100755
index 00000000..fa95f1f8
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+set -eu
+
+if [[ -n $(is_bootstrap_node) ]]; then
+ # run gnocchi upgrade
+ gnocchi-upgrade
+fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
new file mode 100755
index 00000000..d569084d
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -eu
+
+# We need to start the systemd services we explicitely stopped at step _1.sh
+# FIXME: Should we let puppet during the convergence step do the service enabling or
+# should we add it here?
+services=$(services_to_migrate)
+if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
+ services=${services%%openstack-sahara*}
+fi
+for service in $services; do
+ manage_systemd_service start "${service%%-clone}"
+ check_resource_systemd "${service%%-clone}" started 600
+done
diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh
index 750ad82c..d9d1b4d5 100644
--- a/extraconfig/tasks/major_upgrade_object_storage.sh
+++ b/extraconfig/tasks/major_upgrade_object_storage.sh
@@ -23,19 +23,8 @@ function systemctl_swift {
done
}
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+$(declare -f special_case_ovs_upgrade_if_needed)
+special_case_ovs_upgrade_if_needed
systemctl_swift stop
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index 7c78d5ad..b63aafbd 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'Upgrade for Pacemaker deployments'
parameters:
@@ -97,7 +97,11 @@ resources:
depends_on: ControllerPacemakerUpgradeDeployment_Step1
properties:
group: script
- config: {get_file: major_upgrade_block_storage.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_block_storage.sh
BlockStorageUpgradeDeployment:
type: OS::Heat::SoftwareDeploymentGroup
@@ -113,7 +117,20 @@ resources:
config:
list_join:
- ''
- - - get_file: pacemaker_common_functions.sh
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
+ params:
+ UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+ - str_replace:
+ template: |
+ #!/bin/bash
+ mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
+ params:
+ MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
+ - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_check.sh
- get_file: major_upgrade_pacemaker_migrations.sh
- get_file: major_upgrade_controller_pacemaker_2.sh
@@ -132,6 +149,63 @@ resources:
config:
list_join:
- ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - get_file: major_upgrade_controller_pacemaker_3.sh
+
+ ControllerPacemakerUpgradeDeployment_Step3:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: ControllerPacemakerUpgradeDeployment_Step2
+ properties:
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
+ input_values: {get_param: input_values}
+
+ ControllerPacemakerUpgradeConfig_Step4:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - get_file: major_upgrade_controller_pacemaker_4.sh
+
+ ControllerPacemakerUpgradeDeployment_Step4:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: ControllerPacemakerUpgradeDeployment_Step3
+ properties:
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step4}
+ input_values: {get_param: input_values}
+
+ ControllerPacemakerUpgradeConfig_Step5:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_pacemaker_migrations.sh
+ - get_file: major_upgrade_controller_pacemaker_5.sh
+
+ ControllerPacemakerUpgradeDeployment_Step5:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: ControllerPacemakerUpgradeDeployment_Step4
+ properties:
+ servers: {get_param: [servers, Controller]}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
+ input_values: {get_param: input_values}
+
+ ControllerPacemakerUpgradeConfig_Step6:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
- - str_replace:
template: |
#!/bin/bash
@@ -140,13 +214,12 @@ resources:
KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
- get_file: pacemaker_common_functions.sh
- get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_3.sh
+ - get_file: major_upgrade_controller_pacemaker_6.sh
- ControllerPacemakerUpgradeDeployment_Step3:
+ ControllerPacemakerUpgradeDeployment_Step6:
type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step2
+ depends_on: ControllerPacemakerUpgradeDeployment_Step5
properties:
servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
+ config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
input_values: {get_param: input_values}
-
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
index f6aa3066..c308720b 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Upgrade for Pacemaker deployments'
parameters:
@@ -54,19 +54,28 @@ resources:
upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
params:
UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+ - get_file: pacemaker_common_functions.sh
- get_file: major_upgrade_compute.sh
ObjectStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: major_upgrade_object_storage.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_object_storage.sh
CephStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: major_upgrade_ceph_storage.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_ceph_storage.sh
{% for role in roles %}
UpgradeInit{{role.name}}Deployment:
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
index 7c9083a4..ae22a1e7 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
@@ -83,7 +83,6 @@ function services_to_migrate {
openstack-cinder-api-clone
openstack-cinder-scheduler-clone
openstack-glance-api-clone
- openstack-glance-registry-clone
openstack-gnocchi-metricd-clone
openstack-gnocchi-statsd-clone
openstack-heat-api-cfn-clone
@@ -179,3 +178,23 @@ function disable_standalone_ceilometer_api {
fi
fi
}
+
+
+# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
+# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
+# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
+# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
+# Note that changing an attribute like this makes the rabbitmq resource restart
+function rabbitmq_newton_ocata_upgrade {
+ if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
+ # Number of controller is obtained by counting how many hostnames we
+ # have in controller_node_names hiera key
+ nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
+ nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
+ if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
+ echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
+ exit 1
+ fi
+ pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
+ fi
+}
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
index b9a87d33..45933fb7 100644
--- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
+++ b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Software-config for performing aodh data migration
diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
index 1c376285..a8d43663 100644
--- a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
+++ b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
@@ -48,7 +48,13 @@ $mongodb_replset = hiera('mongodb::server::replset')
$mongo_node_string = join($mongo_node_ips_with_port, ',')
$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
-include ::ceilometer
+$rabbit_hosts = hiera('rabbitmq_node_ips', undef)
+$rabbit_port = hiera('ceilometer::rabbit_port', 5672)
+$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}")
+
+class { '::ceilometer' :
+ rabbit_hosts => $rabbit_endpoints,
+}
class {'::ceilometer::db':
database_connection => $database_connection,
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index 4f17b69a..aae4a2de 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -284,7 +284,7 @@ function systemctl_swift {
services=$(systemctl | grep openstack-swift- | grep running | awk '{print $1}')
;;
start)
- enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml 'enable_swift_storage')
+ enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml tripleo::profile::base::swift::storage::enable_swift_storage)
if [[ $enable_swift_storage != "true" ]]; then
services=( openstack-swift-proxy )
fi
@@ -297,3 +297,27 @@ function systemctl_swift {
manage_systemd_service $action $service
done
}
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+function special_case_ovs_upgrade_if_needed {
+ if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ rm -rf OVS_UPGRADE
+ mkdir OVS_UPGRADE && pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ for pkg in $(ls -1 *.rpm); do
+ if rpm -U --test $pkg 2>&1 | grep "already installed" ; then
+ echo "Looks like newer version of $pkg is already installed, skipping"
+ else
+ echo "Updating $pkg with nopostun option"
+ rpm -U --replacepkgs --nopostun $pkg
+ fi
+ done
+ popd
+ else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
+ fi
+
+}
+
diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh
index 3da7efec..49d39bc8 100755
--- a/extraconfig/tasks/pacemaker_resource_restart.sh
+++ b/extraconfig/tasks/pacemaker_resource_restart.sh
@@ -4,11 +4,14 @@ set -eux
# Run if pacemaker is running, we're the bootstrap node,
# and we're updating the deployment (not creating).
-if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
+
+RESTART_FOLDER="/var/lib/tripleo/pacemaker-restarts"
+
+if [[ -d "$RESTART_FOLDER" && -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
TIMEOUT=600
- SERVICES_TO_RESTART="$(ls /var/lib/tripleo/pacemaker-restarts)"
PCS_STATUS_OUTPUT="$(pcs status)"
+ SERVICES_TO_RESTART="$(ls $RESTART_FOLDER)"
for service in $SERVICES_TO_RESTART; do
if ! echo "$PCS_STATUS_OUTPUT" | grep $service; then
@@ -20,6 +23,11 @@ if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
for service in $SERVICES_TO_RESTART; do
echo "Restarting $service..."
pcs resource restart --wait=$TIMEOUT $service
- rm -f /var/lib/tripleo/pacemaker-restarts/$service
+ rm -f "$RESTART_FOLDER"/$service
done
+
+fi
+
+if [ $(systemctl is-active haproxy) = "active" ]; then
+ systemctl reload haproxy
fi
diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml
index b62502f8..a63868c9 100644
--- a/extraconfig/tasks/post_puppet_pacemaker.yaml
+++ b/extraconfig/tasks/post_puppet_pacemaker.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Post-Puppet Config for Pacemaker deployments'
parameters:
diff --git a/extraconfig/tasks/post_puppet_pacemaker_restart.yaml b/extraconfig/tasks/post_puppet_pacemaker_restart.yaml
index 52760c87..475a6688 100644
--- a/extraconfig/tasks/post_puppet_pacemaker_restart.yaml
+++ b/extraconfig/tasks/post_puppet_pacemaker_restart.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Post-Puppet restart config for Pacemaker deployments'
parameters:
diff --git a/extraconfig/tasks/pre_puppet_pacemaker.yaml b/extraconfig/tasks/pre_puppet_pacemaker.yaml
index 82546588..aa7514f9 100644
--- a/extraconfig/tasks/pre_puppet_pacemaker.yaml
+++ b/extraconfig/tasks/pre_puppet_pacemaker.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Pre-Puppet Config for Pacemaker deployments'
parameters:
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index 4ca0b140..edcc9e8e 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -42,7 +42,7 @@ if [[ "$list_updates" == "" ]]; then
exit 0
fi
-pacemaker_status=$(systemctl is-active pacemaker)
+pacemaker_status=$(systemctl is-active pacemaker || :)
# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455
# and https://bugs.launchpad.net/tripleo/+bug/1634851
@@ -62,6 +62,8 @@ if [[ "$pacemaker_status" == "active" && \
fi
fi
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+special_case_ovs_upgrade_if_needed
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Pacemaker running, stopping cluster node and doing full package update"
@@ -73,28 +75,14 @@ if [[ "$pacemaker_status" == "active" ]] ; then
pcs cluster stop
fi
else
- echo "Upgrading openstack-puppet-modules"
+ echo "Upgrading openstack-puppet-modules and its dependencies"
yum -q -y update openstack-puppet-modules
+ yum deplist openstack-puppet-modules | awk '/dependency/{print $2}' | xargs yum -q -y update
echo "Upgrading other packages is handled by config management tooling"
echo -n "true" > $heat_outputs_path.update_managed_packages
exit 0
fi
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun"
- rpm -U --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
-
command=${command:-update}
full_command="yum -q -y $command $command_arguments"
echo "Running: $full_command"
@@ -104,6 +92,17 @@ return_code=$?
echo "$result"
echo "yum return code: $return_code"
+# Writes any changes caused by alterations to os-net-config and bounces the
+# interfaces *before* restarting the cluster.
+os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
+RETVAL=$?
+if [[ $RETVAL == 2 ]]; then
+ echo "os-net-config: interface configuration files updated successfully"
+elif [[ $RETVAL != 0 ]]; then
+ echo "ERROR: os-net-config configuration failed"
+ exit $RETVAL
+fi
+
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Starting cluster node"
pcs cluster start
diff --git a/extraconfig/tasks/yum_update.yaml b/extraconfig/tasks/yum_update.yaml
index d313ca9f..8cff838e 100644
--- a/extraconfig/tasks/yum_update.yaml
+++ b/extraconfig/tasks/yum_update.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Software-config for performing package updates using yum
@@ -9,7 +9,12 @@ resources:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: yum_update.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: yum_update.sh
+
inputs:
- name: update_identifier
description: yum will only run for previously unused values of update_identifier
diff --git a/extraconfig/tasks/yum_update_noop.yaml b/extraconfig/tasks/yum_update_noop.yaml
index b759d9c5..9400c1d2 100644
--- a/extraconfig/tasks/yum_update_noop.yaml
+++ b/extraconfig/tasks/yum_update_noop.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'No-op yum update task'
resources:
diff --git a/firstboot/os-net-config-mappings.yaml b/firstboot/os-net-config-mappings.yaml
index 833c3bc2..d7e0c524 100644
--- a/firstboot/os-net-config-mappings.yaml
+++ b/firstboot/os-net-config-mappings.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Configure os-net-config mappings for specific nodes
@@ -38,7 +38,7 @@ resources:
str_replace:
template: |
#!/bin/sh
- eth_addr=$(/sbin/ifconfig eth0 | grep ether | awk '{print $2}')
+ eth_addr=$(cat /sys/class/net/*/address | tr '\n' ',')
mkdir -p /etc/os-net-config
# Create an os-net-config mapping file, note this defaults to
@@ -51,7 +51,7 @@ resources:
input = sys.stdin.readline() or '{}'
data = json.loads(input)
for node in data:
- if '${eth_addr}' in data[node].values():
+ if any(x in '$eth_addr'.split(',') for x in data[node].values()):
interface_mapping = {'interface_mapping': data[node]}
with open('/etc/os-net-config/mapping.yaml', 'w') as f:
yaml.safe_dump(interface_mapping, f, default_flow_style=False)
diff --git a/firstboot/userdata_default.yaml b/firstboot/userdata_default.yaml
index 140d2bf8..bc379f4c 100644
--- a/firstboot/userdata_default.yaml
+++ b/firstboot/userdata_default.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
This is a default no-op template which provides empty user-data
diff --git a/firstboot/userdata_dev_rsync.yaml b/firstboot/userdata_dev_rsync.yaml
index 7dc7bd4d..d412b93a 100644
--- a/firstboot/userdata_dev_rsync.yaml
+++ b/firstboot/userdata_dev_rsync.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
This is first boot configuration for development purposes. It allows
diff --git a/firstboot/userdata_example.yaml b/firstboot/userdata_example.yaml
index a0d8c7ac..a352093f 100644
--- a/firstboot/userdata_example.yaml
+++ b/firstboot/userdata_example.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
# NOTE: You don't need to pass the parameter explicitly from the
# parent template, it can be specified via the parameter_defaults
diff --git a/firstboot/userdata_heat_admin.yaml b/firstboot/userdata_heat_admin.yaml
index f8891b29..ed8302dc 100644
--- a/firstboot/userdata_heat_admin.yaml
+++ b/firstboot/userdata_heat_admin.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
parameters:
# Can be overridden via parameter_defaults in the environment
@@ -6,6 +6,10 @@ parameters:
type: string
default: heat-admin
+ node_admin_extra_ssh_keys:
+ type: comma_delimited_list
+ default: []
+
description: >
Uses cloud-init to create an additional user with a known name, in addition
to the distro-default user created by the cloud-init default.
@@ -23,6 +27,8 @@ resources:
properties:
cloud_config:
user: {get_param: node_admin_username}
+ ssh_authorized_keys: {get_param: node_admin_extra_ssh_keys}
+
outputs:
OS::stack_id:
diff --git a/firstboot/userdata_root_password.yaml b/firstboot/userdata_root_password.yaml
new file mode 100644
index 00000000..63dd5a9c
--- /dev/null
+++ b/firstboot/userdata_root_password.yaml
@@ -0,0 +1,38 @@
+heat_template_version: ocata
+
+description: >
+ Uses cloud-init to enable root logins and set the root password.
+ Note this is less secure than the default configuration and may not be
+ appropriate for production environments, it's intended for illustration
+ and development/debugging only.
+
+parameters:
+ NodeRootPassword:
+ description: Root password for the nodes
+ hidden: true
+ type: string
+
+resources:
+ userdata:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: {get_resource: root_config}
+
+ root_config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ ssh_pwauth: true
+ disable_root: false
+ chpasswd:
+ list:
+ str_replace:
+ template: "root:PASSWORD"
+ params:
+ PASSWORD: {get_param: NodeRootPassword}
+ expire: False
+
+outputs:
+ OS::stack_id:
+ value: {get_resource: userdata}
diff --git a/hosts-config.yaml b/hosts-config.yaml
index df0addfd..5a211716 100644
--- a/hosts-config.yaml
+++ b/hosts-config.yaml
@@ -1,21 +1,25 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'All Hosts Config'
parameters:
hosts:
- type: comma_delimited_list
+ type: string
resources:
hostsConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
- config:
- hosts:
- list_join:
- - "\n"
- - {get_param: hosts}
+ group: script
+ inputs:
+ - name: hosts
+ default:
+ list_join:
+ - ' '
+ - str_split:
+ - '\n'
+ - {get_param: hosts}
+ config: {get_file: scripts/hosts-config.sh}
outputs:
config_id:
@@ -28,3 +32,6 @@ outputs:
hostname-based access to the deployed nodes (useful for testing without
setting up a DNS).
value: {get_attr: [hostsConfigImpl, config, hosts]}
+ OS::stack_id:
+ description: The ID of the hostsConfigImpl resource.
+ value: {get_resource: hostsConfigImpl}
diff --git a/net-config-bond.yaml b/net-config-bond.yaml
index ec881bdc..3ae09c98 100644
--- a/net-config-bond.yaml
+++ b/net-config-bond.yaml
@@ -1,20 +1,22 @@
-heat_template_version: 2016-10-14
-
+heat_template_version: ocata
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge.
-
parameters:
BondInterfaceOvsOptions:
default: ''
- description: |
- The ovs_options string for the bond interface. Set things like
+ description: 'The ovs_options string for the bond interface. Set things like
+
lacp=active and/or bond_mode=balance-slb using this option.
+
+ '
type: string
constraints:
- - allowed_pattern: "^((?!balance.tcp).)*$"
- description: |
- The balance-tcp bond mode is known to cause packet loss and
- should not be used in BondInterfaceOvsOptions.
+ - allowed_pattern: ^((?!balance.tcp).)*$
+ description: 'The balance-tcp bond mode is known to cause packet loss and
+
+ should not be used in BondInterfaceOvsOptions.
+
+ '
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
@@ -43,43 +45,35 @@ parameters:
default: ''
description: IP address/subnet on the management network
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- use_dhcp: true
- # Can't do this yet: https://bugs.launchpad.net/heat/+bug/1344284
- #ovs_extra:
- # - list_join:
- # - ' '
- # - - br-set-external-id
- # - {get_input: bridge_name}
- # - bridge-id
- # - {get_input: bridge_name}
- members:
- -
- type: ovs_bond
+ str_replace:
+ template:
+ get_file: network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: bridge_name
+ use_dhcp: true
+ members:
+ - type: ovs_bond
name: bond1
use_dhcp: true
- ovs_options: {get_param: BondInterfaceOvsOptions}
+ ovs_options:
+ get_param: BondInterfaceOvsOptions
members:
- # os-net-config translates nic1 => em1 (for example)
- -
- type: interface
- name: nic1
- -
- type: interface
- name: nic2
-
+ - type: interface
+ name: nic1
+ - type: interface
+ name: nic2
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/net-config-bridge.yaml b/net-config-bridge.yaml
index 4f7a19dc..10d53880 100644
--- a/net-config-bridge.yaml
+++ b/net-config-bridge.yaml
@@ -1,8 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
Software Config to drive os-net-config for a simple bridge.
-
parameters:
ControlPlaneIp:
default: ''
@@ -32,35 +30,29 @@ parameters:
default: ''
description: IP address/subnet on the management network
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- use_dhcp: true
- # Can't do this yet: https://bugs.launchpad.net/heat/+bug/1344284
- #ovs_extra:
- # - list_join:
- # - ' '
- # - - br-set-external-id
- # - {get_input: bridge_name}
- # - bridge-id
- # - {get_input: bridge_name}
- members:
- -
- type: interface
- name: {get_input: interface_name}
+ str_replace:
+ template:
+ get_file: network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: bridge_name
+ use_dhcp: true
+ members:
+ - type: interface
+ name: interface_name
# force the MAC address of the bridge to this interface
primary: true
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/net-config-linux-bridge.yaml b/net-config-linux-bridge.yaml
index 0980803e..04664818 100644
--- a/net-config-linux-bridge.yaml
+++ b/net-config-linux-bridge.yaml
@@ -1,8 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
Software Config to drive os-net-config for a simple bridge.
-
parameters:
ControlPlaneIp:
default: ''
@@ -35,43 +33,45 @@ parameters:
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
- default: '192.0.2.1'
+ default: 192.0.2.1
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
- default: '169.254.169.254/32'
-
-
+ default: 169.254.169.254/32
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: linux_bridge
- name: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: ControlPlaneIp}
- members:
- -
- type: interface
- name: {get_input: interface_name}
+ str_replace:
+ template:
+ get_file: network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: linux_bridge
+ name: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: ControlPlaneIp
+ members:
+ - type: interface
+ name: interface_name
# force the MAC address of the bridge to this interface
primary: true
- routes:
- -
- ip_netmask: 0.0.0.0/0
- next_hop: {get_param: ControlPlaneDefaultRoute}
+ routes:
+ - ip_netmask: 0.0.0.0/0
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
default: true
- -
- ip_netmask: {get_param: EC2MetadataIp}
- next_hop: {get_param: ControlPlaneDefaultRoute}
-
+ - ip_netmask:
+ get_param: EC2MetadataIp
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/net-config-noop.yaml b/net-config-noop.yaml
index 94c492c6..be05cc11 100644
--- a/net-config-noop.yaml
+++ b/net-config-noop.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Software Config to no-op for os-net-config. Using this will allow you
@@ -38,8 +38,8 @@ resources:
OsNetConfigImpl:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
- config:
+ group: apply-config
+ config: {}
outputs:
OS::stack_id:
diff --git a/net-config-static-bridge-with-external-dhcp.yaml b/net-config-static-bridge-with-external-dhcp.yaml
index 6dbe5982..12374a28 100644
--- a/net-config-static-bridge-with-external-dhcp.yaml
+++ b/net-config-static-bridge-with-external-dhcp.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config for a simple bridge configured
- with a static IP address for the ctlplane network.
-
+ Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
parameters:
ControlPlaneIp:
default: ''
@@ -47,53 +44,44 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- use_dhcp: true
- members:
- -
- type: interface
- name: {get_input: interface_name}
+ str_replace:
+ template:
+ get_file: network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: bridge_name
+ use_dhcp: true
+ members:
+ - type: interface
+ name: interface_name
# force the MAC address of the bridge to this interface
primary: true
- -
- type: interface
- # would like to do the following, but can't b/c of:
- # https://bugs.launchpad.net/heat/+bug/1344284
- # name:
- # list_join:
- # - '/'
- # - - {get_input: bridge_name}
- # - ':0'
- # So, just hardcode to br-ex:0 for now, br-ex is hardcoded in
- # controller.yaml anyway.
- name: br-ex:0
- addresses:
- -
- ip_netmask:
+ - type: interface
+ name: br-ex:0
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
-
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/net-config-static-bridge.yaml b/net-config-static-bridge.yaml
index a3d6d8b5..50e541be 100644
--- a/net-config-static-bridge.yaml
+++ b/net-config-static-bridge.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config for a simple bridge configured
- with a static IP address for the ctlplane network.
-
+ Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
parameters:
ControlPlaneIp:
default: ''
@@ -47,42 +44,44 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: bridge_name
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
- name: {get_input: interface_name}
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
+ name: interface_name
# force the MAC address of the bridge to this interface
primary: true
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/net-config-static.yaml b/net-config-static.yaml
index 9de16cd8..a52e22ba 100644
--- a/net-config-static.yaml
+++ b/net-config-static.yaml
@@ -1,8 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
Software Config to drive os-net-config for a simple bridge.
-
parameters:
ControlPlaneIp:
default: ''
@@ -46,37 +44,39 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: {get_input: interface_name}
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: interface_name
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
-
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/net-config-undercloud.yaml b/net-config-undercloud.yaml
new file mode 100644
index 00000000..9be51c0f
--- /dev/null
+++ b/net-config-undercloud.yaml
@@ -0,0 +1,77 @@
+heat_template_version: ocata
+description: >
+ Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet:
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: disable_configure_safe_defaults
+ default: true
+ config:
+ str_replace:
+ template:
+ get_file: network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: br-ctlplane
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
+ list_join:
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ members:
+ - type: interface
+ name: eth1
+ # force the MAC address of the bridge to this interface
+ primary: true
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/bond-with-vlans/ceph-storage.yaml b/network/config/bond-with-vlans/ceph-storage.yaml
index 9f537c02..703fea08 100644
--- a/network/config/bond-with-vlans/ceph-storage.yaml
+++ b/network/config/bond-with-vlans/ceph-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config with 2 bonded nics on a bridge
- with VLANs attached for the ceph storage role.
-
+ Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the ceph storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -35,14 +32,16 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like
- lacp=active and/or bond_mode=balance-slb using this option.
+ description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
+ this option.
type: string
constraints:
- - allowed_pattern: "^((?!balance.tcp).)*$"
- description: |
- The balance-tcp bond mode is known to cause packet loss and
- should not be used in BondInterfaceOvsOptions.
+ - allowed_pattern: ^((?!balance.tcp).)*$
+ description: 'The balance-tcp bond mode is known to cause packet loss and
+
+ should not be used in BondInterfaceOvsOptions.
+
+ '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -75,7 +74,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -89,64 +88,63 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: ovs_bridge
- name: br-bond
- members:
- -
- type: ovs_bond
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: ovs_bridge
+ name: br-bond
+ members:
+ - type: ovs_bond
name: bond1
- ovs_options: {get_param: BondInterfaceOvsOptions}
+ ovs_options:
+ get_param: BondInterfaceOvsOptions
members:
- -
- type: interface
- name: nic2
- primary: true
- -
- type: interface
- name: nic3
- -
- type: vlan
+ - type: interface
+ name: nic2
+ primary: true
+ - type: interface
+ name: nic3
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageNetworkVlanID}
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -161,8 +159,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/bond-with-vlans/cinder-storage.yaml b/network/config/bond-with-vlans/cinder-storage.yaml
index b4d71fa3..df15cd63 100644
--- a/network/config/bond-with-vlans/cinder-storage.yaml
+++ b/network/config/bond-with-vlans/cinder-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config with 2 bonded nics on a bridge
- with VLANs attached for the cinder storage role.
-
+ Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the cinder storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -35,14 +32,16 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like
- lacp=active and/or bond_mode=balance-slb using this option.
+ description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
+ this option.
type: string
constraints:
- - allowed_pattern: "^((?!balance.tcp).)*$"
- description: |
- The balance-tcp bond mode is known to cause packet loss and
- should not be used in BondInterfaceOvsOptions.
+ - allowed_pattern: ^((?!balance.tcp).)*$
+ description: 'The balance-tcp bond mode is known to cause packet loss and
+
+ should not be used in BondInterfaceOvsOptions.
+
+ '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -75,7 +74,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -89,71 +88,70 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: ovs_bridge
- name: br-bond
- members:
- -
- type: ovs_bond
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: ovs_bridge
+ name: br-bond
+ members:
+ - type: ovs_bond
name: bond1
- ovs_options: {get_param: BondInterfaceOvsOptions}
+ ovs_options:
+ get_param: BondInterfaceOvsOptions
members:
- -
- type: interface
- name: nic2
- primary: true
- -
- type: interface
- name: nic3
- -
- type: vlan
+ - type: interface
+ name: nic2
+ primary: true
+ - type: interface
+ name: nic3
+ - type: vlan
device: bond1
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageNetworkVlanID}
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -168,8 +166,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/bond-with-vlans/compute-dpdk.yaml b/network/config/bond-with-vlans/compute-dpdk.yaml
index 3fc764be..4677241b 100644
--- a/network/config/bond-with-vlans/compute-dpdk.yaml
+++ b/network/config/bond-with-vlans/compute-dpdk.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config with 2 bonded nics on a bridge
- with VLANs attached for the compute role.
-
+ Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the compute role.
parameters:
ControlPlaneIp:
default: ''
@@ -35,8 +32,8 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like
- lacp=active and/or bond_mode=balance-slb using this option.
+ description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
+ this option.
type: string
ExternalNetworkVlanID:
default: 10
@@ -70,7 +67,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -84,71 +81,70 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- members:
- -
- type: ovs_bond
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: ovs_bridge
+ name: bridge_name
+ members:
+ - type: ovs_bond
name: bond1
- ovs_options: {get_param: BondInterfaceOvsOptions}
+ ovs_options:
+ get_param: BondInterfaceOvsOptions
members:
- -
- type: interface
- name: nic2
- primary: true
- -
- type: interface
- name: nic3
- -
- type: vlan
+ - type: interface
+ name: nic2
+ primary: true
+ - type: interface
+ name: nic3
+ - type: vlan
device: bond1
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageNetworkVlanID}
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: TenantNetworkVlanID}
+ vlan_id:
+ get_param: TenantNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -163,30 +159,25 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
- -
- type: ovs_user_bridge
- name: br-link
- members:
- -
- type: ovs_dpdk_bond
+ - type: ovs_user_bridge
+ name: br-link
+ members:
+ - type: ovs_dpdk_bond
name: dpdkbond0
members:
- -
- type: ovs_dpdk_port
- name: dpdk0
- members:
- -
- type: interface
- name: nic4
- -
- type: ovs_dpdk_port
- name: dpdk1
- members:
- -
- type: interface
- name: nic5
-
+ - type: ovs_dpdk_port
+ name: dpdk0
+ members:
+ - type: interface
+ name: nic4
+ - type: ovs_dpdk_port
+ name: dpdk1
+ members:
+ - type: interface
+ name: nic5
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/bond-with-vlans/compute.yaml b/network/config/bond-with-vlans/compute.yaml
index b2cfb0a2..f9c926d3 100644
--- a/network/config/bond-with-vlans/compute.yaml
+++ b/network/config/bond-with-vlans/compute.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config with 2 bonded nics on a bridge
- with VLANs attached for the compute role.
-
+ Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the compute role.
parameters:
ControlPlaneIp:
default: ''
@@ -35,14 +32,16 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like
- lacp=active and/or bond_mode=balance-slb using this option.
+ description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
+ this option.
type: string
constraints:
- - allowed_pattern: "^((?!balance.tcp).)*$"
- description: |
- The balance-tcp bond mode is known to cause packet loss and
- should not be used in BondInterfaceOvsOptions.
+ - allowed_pattern: ^((?!balance.tcp).)*$
+ description: 'The balance-tcp bond mode is known to cause packet loss and
+
+ should not be used in BondInterfaceOvsOptions.
+
+ '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -75,7 +74,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -89,71 +88,70 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- members:
- -
- type: ovs_bond
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: ovs_bridge
+ name: bridge_name
+ members:
+ - type: ovs_bond
name: bond1
- ovs_options: {get_param: BondInterfaceOvsOptions}
+ ovs_options:
+ get_param: BondInterfaceOvsOptions
members:
- -
- type: interface
- name: nic2
- primary: true
- -
- type: interface
- name: nic3
- -
- type: vlan
+ - type: interface
+ name: nic2
+ primary: true
+ - type: interface
+ name: nic3
+ - type: vlan
device: bond1
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageNetworkVlanID}
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: TenantNetworkVlanID}
+ vlan_id:
+ get_param: TenantNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -168,8 +166,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/bond-with-vlans/controller-no-external.yaml b/network/config/bond-with-vlans/controller-no-external.yaml
index 4c3e59fa..ce1e8654 100644
--- a/network/config/bond-with-vlans/controller-no-external.yaml
+++ b/network/config/bond-with-vlans/controller-no-external.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config with 2 bonded nics on a bridge
- with VLANs attached for the controller role.
-
+ Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role.
parameters:
ControlPlaneIp:
default: ''
@@ -35,14 +32,16 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like
- lacp=active and/or bond_mode=balance-slb using this option.
+ description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
+ this option.
type: string
constraints:
- - allowed_pattern: "^((?!balance.tcp).)*$"
- description: |
- The balance-tcp bond mode is known to cause packet loss and
- should not be used in BondInterfaceOvsOptions.
+ - allowed_pattern: ^((?!balance.tcp).)*$
+ description: 'The balance-tcp bond mode is known to cause packet loss and
+
+ should not be used in BondInterfaceOvsOptions.
+
+ '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -71,7 +70,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute:
- default: '10.0.0.1'
+ default: 10.0.0.1
description: default route for the external network
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -89,79 +88,76 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- use_dhcp: true
- members:
- -
- type: ovs_bond
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: ovs_bridge
+ name: bridge_name
+ use_dhcp: true
+ members:
+ - type: ovs_bond
name: bond1
- ovs_options: {get_param: BondInterfaceOvsOptions}
+ ovs_options:
+ get_param: BondInterfaceOvsOptions
members:
- -
- type: interface
- name: nic2
- primary: true
- -
- type: interface
- name: nic3
- -
- type: vlan
+ - type: interface
+ name: nic2
+ primary: true
+ - type: interface
+ name: nic3
+ - type: vlan
device: bond1
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageNetworkVlanID}
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: TenantNetworkVlanID}
+ vlan_id:
+ get_param: TenantNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -176,8 +172,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/bond-with-vlans/controller-v6.yaml b/network/config/bond-with-vlans/controller-v6.yaml
index 1361d969..bb4ac274 100644
--- a/network/config/bond-with-vlans/controller-v6.yaml
+++ b/network/config/bond-with-vlans/controller-v6.yaml
@@ -1,11 +1,8 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config with 2 bonded nics on a bridge
- with VLANs attached for the controller role with IPv6 on the External
- network. The IPv6 default route is on the External network, and the
- IPv4 default route is on the Control Plane.
-
+ Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role with IPv6
+ on the External network. The IPv6 default route is on the External network, and the IPv4 default route is on the Control
+ Plane.
parameters:
ControlPlaneIp:
default: ''
@@ -36,15 +33,17 @@ parameters:
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
- default: 'bond_mode=active-backup'
- description: The ovs_options string for the bond interface. Set things like
- lacp=active and/or bond_mode=balance-slb using this option.
+ default: bond_mode=active-backup
+ description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
+ this option.
type: string
constraints:
- - allowed_pattern: "^((?!balance.tcp).)*$"
- description: |
- The balance-tcp bond mode is known to cause packet loss and
- should not be used in BondInterfaceOvsOptions.
+ - allowed_pattern: ^((?!balance.tcp).)*$
+ description: 'The balance-tcp bond mode is known to cause packet loss and
+
+ should not be used in BondInterfaceOvsOptions.
+
+ '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -77,7 +76,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute:
- default: '10.0.0.1'
+ default: 10.0.0.1
description: default route for the external network
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -91,91 +90,88 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- # IPv4 Default Route
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- dns_servers: {get_param: DnsServers}
- members:
- -
- type: ovs_bond
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: ovs_bridge
+ name: bridge_name
+ dns_servers:
+ get_param: DnsServers
+ members:
+ - type: ovs_bond
name: bond1
- ovs_options: {get_param: BondInterfaceOvsOptions}
+ ovs_options:
+ get_param: BondInterfaceOvsOptions
members:
- -
- type: interface
- name: nic2
- primary: true
- -
- type: interface
- name: nic3
- -
- type: vlan
+ - type: interface
+ name: nic2
+ primary: true
+ - type: interface
+ name: nic3
+ - type: vlan
device: bond1
- vlan_id: {get_param: ExternalNetworkVlanID}
+ vlan_id:
+ get_param: ExternalNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
+ - ip_netmask:
+ get_param: ExternalIpSubnet
routes:
- # IPv6 Default Route
- -
- default: true
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
- -
- type: vlan
+ - default: true
+ next_hop:
+ get_param: ExternalInterfaceDefaultRoute
+ - type: vlan
device: bond1
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageNetworkVlanID}
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: TenantNetworkVlanID}
+ vlan_id:
+ get_param: TenantNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the External interface. This will
@@ -191,8 +187,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/bond-with-vlans/controller.yaml b/network/config/bond-with-vlans/controller.yaml
index 677c90c5..91515385 100644
--- a/network/config/bond-with-vlans/controller.yaml
+++ b/network/config/bond-with-vlans/controller.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config with 2 bonded nics on a bridge
- with VLANs attached for the controller role.
-
+ Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role.
parameters:
ControlPlaneIp:
default: ''
@@ -34,15 +31,17 @@ parameters:
description: IP address/subnet on the management network
type: string
BondInterfaceOvsOptions:
- default: 'bond_mode=active-backup'
- description: The ovs_options string for the bond interface. Set things like
- lacp=active and/or bond_mode=balance-slb using this option.
+ default: bond_mode=active-backup
+ description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
+ this option.
type: string
constraints:
- - allowed_pattern: "^((?!balance.tcp).)*$"
- description: |
- The balance-tcp bond mode is known to cause packet loss and
- should not be used in BondInterfaceOvsOptions.
+ - allowed_pattern: ^((?!balance.tcp).)*$
+ description: 'The balance-tcp bond mode is known to cause packet loss and
+
+ should not be used in BondInterfaceOvsOptions.
+
+ '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -71,7 +70,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute:
- default: '10.0.0.1'
+ default: 10.0.0.1
description: default route for the external network
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -89,86 +88,85 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- dns_servers: {get_param: DnsServers}
- members:
- -
- type: ovs_bond
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - type: ovs_bridge
+ name: bridge_name
+ dns_servers:
+ get_param: DnsServers
+ members:
+ - type: ovs_bond
name: bond1
- ovs_options: {get_param: BondInterfaceOvsOptions}
+ ovs_options:
+ get_param: BondInterfaceOvsOptions
members:
- -
- type: interface
- name: nic2
- primary: true
- -
- type: interface
- name: nic3
- -
- type: vlan
+ - type: interface
+ name: nic2
+ primary: true
+ - type: interface
+ name: nic3
+ - type: vlan
device: bond1
- vlan_id: {get_param: ExternalNetworkVlanID}
+ vlan_id:
+ get_param: ExternalNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
+ - ip_netmask:
+ get_param: ExternalIpSubnet
routes:
- -
- default: true
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
- -
- type: vlan
+ - default: true
+ next_hop:
+ get_param: ExternalInterfaceDefaultRoute
+ - type: vlan
device: bond1
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageNetworkVlanID}
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: TenantNetworkVlanID}
+ vlan_id:
+ get_param: TenantNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the External interface. This will
@@ -184,8 +182,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/bond-with-vlans/swift-storage.yaml b/network/config/bond-with-vlans/swift-storage.yaml
index e16d6b6e..6d4e3681 100644
--- a/network/config/bond-with-vlans/swift-storage.yaml
+++ b/network/config/bond-with-vlans/swift-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config with 2 bonded nics on a bridge
- with VLANs attached for the swift storage role.
-
+ Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the swift storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -35,14 +32,16 @@ parameters:
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like
- lacp=active and/or bond_mode=balance-slb using this option.
+ description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
+ this option.
type: string
constraints:
- - allowed_pattern: "^((?!balance.tcp).)*$"
- description: |
- The balance-tcp bond mode is known to cause packet loss and
- should not be used in BondInterfaceOvsOptions.
+ - allowed_pattern: ^((?!balance.tcp).)*$
+ description: 'The balance-tcp bond mode is known to cause packet loss and
+
+ should not be used in BondInterfaceOvsOptions.
+
+ '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -75,7 +74,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -89,71 +88,70 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: ovs_bridge
- name: br-bond
- members:
- -
- type: ovs_bond
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: ovs_bridge
+ name: br-bond
+ members:
+ - type: ovs_bond
name: bond1
- ovs_options: {get_param: BondInterfaceOvsOptions}
+ ovs_options:
+ get_param: BondInterfaceOvsOptions
members:
- -
- type: interface
- name: nic2
- primary: true
- -
- type: interface
- name: nic3
- -
- type: vlan
+ - type: interface
+ name: nic2
+ primary: true
+ - type: interface
+ name: nic3
+ - type: vlan
device: bond1
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageNetworkVlanID}
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
device: bond1
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -168,8 +166,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/multiple-nics/ceph-storage.yaml b/network/config/multiple-nics/ceph-storage.yaml
index c31c6e65..6a788063 100644
--- a/network/config/multiple-nics/ceph-storage.yaml
+++ b/network/config/multiple-nics/ceph-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure multiple interfaces
- for the ceph storage role.
-
+ Software Config to drive os-net-config to configure multiple interfaces for the ceph storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -65,7 +62,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,48 +76,48 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: interface
- name: nic2
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: interface
- name: nic3
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: interface
+ name: nic2
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: interface
+ name: nic3
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -135,8 +132,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/multiple-nics/cinder-storage.yaml b/network/config/multiple-nics/cinder-storage.yaml
index 4f8b7f64..d2384445 100644
--- a/network/config/multiple-nics/cinder-storage.yaml
+++ b/network/config/multiple-nics/cinder-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure multiple interfaces
- for the cinder storage role.
-
+ Software Config to drive os-net-config to configure multiple interfaces for the cinder storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -65,7 +62,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,55 +76,54 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: interface
- name: nic2
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: interface
- name: nic3
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: interface
- name: nic4
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: interface
+ name: nic2
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: interface
+ name: nic3
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: interface
+ name: nic4
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -142,8 +138,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/multiple-nics/compute-dvr.yaml b/network/config/multiple-nics/compute-dvr.yaml
new file mode 100644
index 00000000..abfd323f
--- /dev/null
+++ b/network/config/multiple-nics/compute-dvr.yaml
@@ -0,0 +1,162 @@
+heat_template_version: ocata
+description: >
+ Software Config to drive os-net-config to configure multiple interfaces for the
+ compute role with external bridge for DVR.
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ ExternalInterfaceDefaultRoute: # Not used by default in this template
+ default: 10.0.0.1
+ description: The default route of the external network.
+ type: string
+ ManagementInterfaceDefaultRoute: # Commented out by default in this template
+ default: unset
+ description: The default route of the management network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
+ list_join:
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: interface
+ name: nic2
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: interface
+ name: nic4
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: ovs_bridge
+ name: br-tenant
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: TenantIpSubnet
+ members:
+ - type: interface
+ name: nic5
+ use_dhcp: false
+ primary: true
+ # External bridge for DVR (no IP address required)
+ - type: ovs_bridge
+ name: bridge_name
+ dns_servers:
+ get_param: DnsServers
+ use_dhcp: false
+ members:
+ - type: interface
+ name: nic6
+ primary: true
+ # Uncomment when including environments/network-management.yaml
+ # If setting default route on the Management interface, comment
+ # out the default route on the Control Plane.
+ #-
+ # type: interface
+ # name: nic7
+ # use_dhcp: false
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
+ # routes:
+ # -
+ # default: true
+ # next_hop: {get_param: ManagementInterfaceDefaultRoute}
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/multiple-nics/compute.yaml b/network/config/multiple-nics/compute.yaml
index 77514745..101a08d3 100644
--- a/network/config/multiple-nics/compute.yaml
+++ b/network/config/multiple-nics/compute.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure multiple interfaces
- for the compute role.
-
+ Software Config to drive os-net-config to configure multiple interfaces for the compute role.
parameters:
ControlPlaneIp:
default: ''
@@ -65,7 +62,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,62 +76,58 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: interface
- name: nic2
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: interface
- name: nic4
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- # Create a bridge which can also be used for VLAN-mode bridge mapping
- type: ovs_bridge
- name: br-tenant
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: interface
+ name: nic2
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: interface
+ name: nic4
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: ovs_bridge
+ name: br-tenant
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: TenantIpSubnet
+ members:
+ - type: interface
name: nic5
use_dhcp: false
- # force the MAC address of the bridge to this interface
primary: true
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
@@ -150,8 +143,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/multiple-nics/controller-v6.yaml b/network/config/multiple-nics/controller-v6.yaml
index da1f95f1..4fae435a 100644
--- a/network/config/multiple-nics/controller-v6.yaml
+++ b/network/config/multiple-nics/controller-v6.yaml
@@ -1,11 +1,7 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure multiple interfaces
- for the controller role with IPv6 on the External network. The IPv6
- default route is on the External network, and the IPv4 default route
- is on the Control Plane.
-
+ Software Config to drive os-net-config to configure multiple interfaces for the controller role with IPv6 on the External
+ network. The IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
parameters:
ControlPlaneIp:
default: ''
@@ -67,7 +63,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute:
- default: '10.0.0.1'
+ default: 10.0.0.1
description: default route for the external network
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -81,89 +77,81 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
# IPv4 Default Route
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: interface
- name: nic2
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: interface
- name: nic3
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: interface
- name: nic4
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- # Create a bridge which can also be used for VLAN-mode bridge mapping
- type: ovs_bridge
- name: br-tenant
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
- members:
- -
- type: interface
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: interface
+ name: nic2
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: interface
+ name: nic3
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: interface
+ name: nic4
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: ovs_bridge
+ name: br-tenant
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: TenantIpSubnet
+ members:
+ - type: interface
name: nic5
use_dhcp: false
- # force the MAC address of the bridge to this interface
primary: true
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- dns_servers: {get_param: DnsServers}
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
- routes:
- # IPv6 Default Route
- -
- default: true
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
- members:
- -
- type: interface
+ - type: ovs_bridge
+ name: bridge_name
+ dns_servers:
+ get_param: DnsServers
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: ExternalIpSubnet
+ routes:
+ - default: true
+ next_hop:
+ get_param: ExternalInterfaceDefaultRoute
+ members:
+ - type: interface
name: nic6
- # force the MAC address of the bridge to this interface
primary: true
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
@@ -180,8 +168,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/multiple-nics/controller.yaml b/network/config/multiple-nics/controller.yaml
index 7a1f9e5f..ba9f8fd4 100644
--- a/network/config/multiple-nics/controller.yaml
+++ b/network/config/multiple-nics/controller.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure multiple interfaces
- for the controller role.
-
+ Software Config to drive os-net-config to configure multiple interfaces for the controller role.
parameters:
ControlPlaneIp:
default: ''
@@ -65,7 +62,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute:
- default: '10.0.0.1'
+ default: 10.0.0.1
description: default route for the external network
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,84 +76,77 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- type: interface
- name: nic2
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: interface
- name: nic3
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: interface
- name: nic4
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- # Create a bridge which can also be used for VLAN-mode bridge mapping
- type: ovs_bridge
- name: br-tenant
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - type: interface
+ name: nic2
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: interface
+ name: nic3
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: interface
+ name: nic4
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: ovs_bridge
+ name: br-tenant
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: TenantIpSubnet
+ members:
+ - type: interface
name: nic5
use_dhcp: false
- # force the MAC address of the bridge to this interface
primary: true
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- dns_servers: {get_param: DnsServers}
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
- routes:
- -
- default: true
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
- members:
- -
- type: interface
+ - type: ovs_bridge
+ name: bridge_name
+ dns_servers:
+ get_param: DnsServers
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: ExternalIpSubnet
+ routes:
+ - default: true
+ next_hop:
+ get_param: ExternalInterfaceDefaultRoute
+ members:
+ - type: interface
name: nic6
- # force the MAC address of the bridge to this interface
primary: true
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
@@ -173,8 +163,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/multiple-nics/swift-storage.yaml b/network/config/multiple-nics/swift-storage.yaml
index 05083105..4019012a 100644
--- a/network/config/multiple-nics/swift-storage.yaml
+++ b/network/config/multiple-nics/swift-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure multiple interfaces
- for the swift storage role.
-
+ Software Config to drive os-net-config to configure multiple interfaces for the swift storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -65,7 +62,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,55 +76,54 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: interface
- name: nic1
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- -
- type: interface
- name: nic2
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: interface
- name: nic3
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: interface
- name: nic4
- use_dhcp: false
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: interface
+ name: nic2
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: interface
+ name: nic3
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: interface
+ name: nic4
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -142,8 +138,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml b/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
index fc8e8b6f..448df69c 100644
--- a/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- ceph storage role.
-
+ Software Config to drive os-net-config to configure VLANs for the ceph storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -65,7 +62,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,54 +76,55 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: linux_bridge
- name: br-storage
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: linux_bridge
+ name: br-storage
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
name: nic1
# force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
- device: br-storage
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
- device: br-storage
- addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
+ device: br-storage
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
+ device: br-storage
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -141,8 +139,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml b/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
index 6fb247ed..465555d3 100644
--- a/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- cinder storage role.
-
+ Software Config to drive os-net-config to configure VLANs for the cinder storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -65,7 +62,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,61 +76,62 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: linux_bridge
- name: br-storage
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: linux_bridge
+ name: br-storage
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
name: nic1
# force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
- device: br-storage
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
- device: br-storage
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
- device: br-storage
- addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
+ device: br-storage
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
+ device: br-storage
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
+ device: br-storage
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -148,8 +146,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-linux-bridge-vlans/compute.yaml b/network/config/single-nic-linux-bridge-vlans/compute.yaml
index e31720d8..a21bc8f9 100644
--- a/network/config/single-nic-linux-bridge-vlans/compute.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/compute.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- compute role.
-
+ Software Config to drive os-net-config to configure VLANs for the compute role.
parameters:
ControlPlaneIp:
default: ''
@@ -65,7 +62,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,68 +76,69 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: linux_bridge
- name: {get_input: bridge_name}
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: linux_bridge
+ name: bridge_name
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
- name: {get_input: interface_name}
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
+ name: interface_name
# force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: TenantNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: TenantNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
#-
# type: vlan
# vlan_id: {get_param: ManagementNetworkVlanID}
- # device: {get_input: bridge_name}
+ # device: bridge_name
# addresses:
# -
# ip_netmask: {get_param: ManagementIpSubnet}
@@ -148,8 +146,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-linux-bridge-vlans/controller-v6.yaml b/network/config/single-nic-linux-bridge-vlans/controller-v6.yaml
index 80125149..bb8bb9c2 100644
--- a/network/config/single-nic-linux-bridge-vlans/controller-v6.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/controller-v6.yaml
@@ -1,11 +1,7 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- controller role with IPv6 on the External network. The IPv6 default
- route is on the External network, and the IPv4 default route is on
- the Control Plane.
-
+ Software Config to drive os-net-config to configure VLANs for the controller role with IPv6 on the External network. The
+ IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
parameters:
ControlPlaneIp:
default: ''
@@ -63,7 +59,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute:
- default: '10.0.0.1'
+ default: 10.0.0.1
description: default route for the external network
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -81,81 +77,79 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: linux_bridge
- name: {get_input: bridge_name}
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: linux_bridge
+ name: bridge_name
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- # IPv4 Default Route
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
- name: {get_input: interface_name}
- # force the MAC address of the bridge to this interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
+ name: interface_name
primary: true
- -
- type: vlan
- vlan_id: {get_param: ExternalNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
- routes:
- # IPv6 Default Route
- -
- default: true
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: TenantNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - type: vlan
+ vlan_id:
+ get_param: ExternalNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: ExternalIpSubnet
+ routes:
+ - default: true
+ next_hop:
+ get_param: ExternalInterfaceDefaultRoute
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: TenantNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the External interface. This will
@@ -163,7 +157,7 @@ resources:
#-
# type: vlan
# vlan_id: {get_param: ManagementNetworkVlanID}
- # device: {get_input: bridge_name}
+ # device: bridge_name
# addresses:
# -
# ip_netmask: {get_param: ManagementIpSubnet}
@@ -171,8 +165,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-linux-bridge-vlans/controller.yaml b/network/config/single-nic-linux-bridge-vlans/controller.yaml
index aef5d4e3..a9689ce9 100644
--- a/network/config/single-nic-linux-bridge-vlans/controller.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/controller.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- controller role.
-
+ Software Config to drive os-net-config to configure VLANs for the controller role.
parameters:
ControlPlaneIp:
default: ''
@@ -61,7 +58,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute:
- default: '10.0.0.1'
+ default: 10.0.0.1
description: default route for the external network
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,81 +76,79 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: linux_bridge
- name: {get_input: bridge_name}
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: linux_bridge
+ name: bridge_name
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- # IPv4 Default Route
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
- name: {get_input: interface_name}
- # force the MAC address of the bridge to this interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
+ name: interface_name
primary: true
- -
- type: vlan
- vlan_id: {get_param: ExternalNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
- routes:
- # IPv6 Default Route
- -
- default: true
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: TenantNetworkVlanID}
- device: {get_input: bridge_name}
- addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - type: vlan
+ vlan_id:
+ get_param: ExternalNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: ExternalIpSubnet
+ routes:
+ - default: true
+ next_hop:
+ get_param: ExternalInterfaceDefaultRoute
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: TenantNetworkVlanID
+ device: bridge_name
+ addresses:
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the External interface. This will
@@ -161,7 +156,7 @@ resources:
#-
# type: vlan
# vlan_id: {get_param: ManagementNetworkVlanID}
- # device: {get_input: bridge_name}
+ # device: bridge_name
# addresses:
# -
# ip_netmask: {get_param: ManagementIpSubnet}
@@ -169,8 +164,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml b/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
index a5d2f966..c8e4db29 100644
--- a/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- swift storage role.
-
+ Software Config to drive os-net-config to configure VLANs for the swift storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -65,7 +62,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,61 +76,62 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: linux_bridge
- name: br-storage
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: linux_bridge
+ name: br-storage
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
name: nic1
# force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
- device: br-storage
- addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
- device: br-storage
- addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
- device: br-storage
- addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
+ device: br-storage
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
+ device: br-storage
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
+ device: br-storage
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -148,8 +146,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-vlans/ceph-storage.yaml b/network/config/single-nic-vlans/ceph-storage.yaml
index 6fa288af..0b5eb0c9 100644
--- a/network/config/single-nic-vlans/ceph-storage.yaml
+++ b/network/config/single-nic-vlans/ceph-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- ceph storage role.
-
+ Software Config to drive os-net-config to configure VLANs for the ceph storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -53,7 +50,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -67,52 +64,53 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: br-storage
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: br-storage
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
name: nic1
# force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -126,8 +124,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-vlans/cinder-storage.yaml b/network/config/single-nic-vlans/cinder-storage.yaml
index d1135776..882d6ebc 100644
--- a/network/config/single-nic-vlans/cinder-storage.yaml
+++ b/network/config/single-nic-vlans/cinder-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- cinder storage role.
-
+ Software Config to drive os-net-config to configure VLANs for the cinder storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -57,7 +54,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -71,58 +68,59 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: br-storage
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: br-storage
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
name: nic1
# force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -136,8 +134,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-vlans/compute.yaml b/network/config/single-nic-vlans/compute.yaml
index bd3cef34..42cfd781 100644
--- a/network/config/single-nic-vlans/compute.yaml
+++ b/network/config/single-nic-vlans/compute.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- compute role.
-
+ Software Config to drive os-net-config to configure VLANs for the compute role.
parameters:
ControlPlaneIp:
default: ''
@@ -57,7 +54,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -71,58 +68,59 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: bridge_name
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
name: nic1
# force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: TenantNetworkVlanID}
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: TenantNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -136,8 +134,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-vlans/controller-no-external.yaml b/network/config/single-nic-vlans/controller-no-external.yaml
index 8e8b0f5d..9e0680ea 100644
--- a/network/config/single-nic-vlans/controller-no-external.yaml
+++ b/network/config/single-nic-vlans/controller-no-external.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- controller role. No external IP is configured.
-
+ Software Config to drive os-net-config to configure VLANs for the controller role. No external IP is configured.
parameters:
ControlPlaneIp:
default: ''
@@ -61,7 +58,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute:
- default: '10.0.0.1'
+ default: 10.0.0.1
description: default route for the external network
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,64 +76,65 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: bridge_name
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
name: nic1
# force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: TenantNetworkVlanID}
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: TenantNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -150,8 +148,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-vlans/controller-v6.yaml b/network/config/single-nic-vlans/controller-v6.yaml
index ecbf2efb..1f9a67d6 100644
--- a/network/config/single-nic-vlans/controller-v6.yaml
+++ b/network/config/single-nic-vlans/controller-v6.yaml
@@ -1,11 +1,7 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- controller role with IPv6 on the External network. The IPv6 default
- route is on the External network, and the IPv4 default route is on
- the Control Plane.
-
+ Software Config to drive os-net-config to configure VLANs for the controller role with IPv6 on the External network. The
+ IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
parameters:
ControlPlaneIp:
default: ''
@@ -67,7 +63,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute:
- default: '10.0.0.1'
+ default: 10.0.0.1
description: default route for the external network
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -81,76 +77,74 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: bridge_name
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- # IPv4 Default Route
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
name: nic1
- # force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: ExternalNetworkVlanID}
+ - type: vlan
+ vlan_id:
+ get_param: ExternalNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
+ - ip_netmask:
+ get_param: ExternalIpSubnet
routes:
- # IPv6 Default Route
- -
- default: true
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ - default: true
+ next_hop:
+ get_param: ExternalInterfaceDefaultRoute
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: TenantNetworkVlanID}
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: TenantNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the External interface. This will
@@ -165,8 +159,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-vlans/controller.yaml b/network/config/single-nic-vlans/controller.yaml
index c5979a89..4ac18315 100644
--- a/network/config/single-nic-vlans/controller.yaml
+++ b/network/config/single-nic-vlans/controller.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- controller role.
-
+ Software Config to drive os-net-config to configure VLANs for the controller role.
parameters:
ControlPlaneIp:
default: ''
@@ -61,7 +58,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute:
- default: '10.0.0.1'
+ default: 10.0.0.1
description: default route for the external network
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -79,71 +76,72 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: {get_input: bridge_name}
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: bridge_name
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ members:
+ - type: interface
name: nic1
# force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: ExternalNetworkVlanID}
+ - type: vlan
+ vlan_id:
+ get_param: ExternalNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
+ - ip_netmask:
+ get_param: ExternalIpSubnet
routes:
- -
- default: true
- next_hop: {get_param: ExternalInterfaceDefaultRoute}
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ - default: true
+ next_hop:
+ get_param: ExternalInterfaceDefaultRoute
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: TenantNetworkVlanID}
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: TenantNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: TenantIpSubnet}
+ - ip_netmask:
+ get_param: TenantIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the External interface. This will
@@ -158,8 +156,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/single-nic-vlans/swift-storage.yaml b/network/config/single-nic-vlans/swift-storage.yaml
index 7b06580c..605b8ee4 100644
--- a/network/config/single-nic-vlans/swift-storage.yaml
+++ b/network/config/single-nic-vlans/swift-storage.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: ocata
description: >
- Software Config to drive os-net-config to configure VLANs for the
- swift storage role.
-
+ Software Config to drive os-net-config to configure VLANs for the swift storage role.
parameters:
ControlPlaneIp:
default: ''
@@ -57,7 +54,7 @@ parameters:
description: The default route of the control plane network.
type: string
ExternalInterfaceDefaultRoute: # Not used by default in this template
- default: '10.0.0.1'
+ default: 10.0.0.1
description: The default route of the external network.
type: string
ManagementInterfaceDefaultRoute: # Commented out by default in this template
@@ -71,58 +68,59 @@ parameters:
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-
resources:
OsNetConfigImpl:
- type: OS::Heat::StructuredConfig
+ type: OS::Heat::SoftwareConfig
properties:
- group: os-apply-config
+ group: script
config:
- os_net_config:
- network_config:
- -
- type: ovs_bridge
- name: br-storage
- use_dhcp: false
- dns_servers: {get_param: DnsServers}
- addresses:
- -
- ip_netmask:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: br-storage
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
list_join:
- - '/'
- - - {get_param: ControlPlaneIp}
- - {get_param: ControlPlaneSubnetCidr}
- routes:
- -
- ip_netmask: 169.254.169.254/32
- next_hop: {get_param: EC2MetadataIp}
- -
- default: true
- next_hop: {get_param: ControlPlaneDefaultRoute}
- members:
- -
- type: interface
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ members:
+ - type: interface
name: nic1
# force the MAC address of the bridge to this interface
primary: true
- -
- type: vlan
- vlan_id: {get_param: InternalApiNetworkVlanID}
+ - type: vlan
+ vlan_id:
+ get_param: InternalApiNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: InternalApiIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageNetworkVlanID}
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageIpSubnet}
- -
- type: vlan
- vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
addresses:
- -
- ip_netmask: {get_param: StorageMgmtIpSubnet}
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
# Uncomment when including environments/network-management.yaml
# If setting default route on the Management interface, comment
# out the default route on the Control Plane.
@@ -136,8 +134,9 @@ resources:
# -
# default: true
# next_hop: {get_param: ManagementInterfaceDefaultRoute}
-
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
- value: {get_resource: OsNetConfigImpl}
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/endpoints/build_endpoint_map.py b/network/endpoints/build_endpoint_map.py
index 964f58f7..990cbabc 100755
--- a/network/endpoints/build_endpoint_map.py
+++ b/network/endpoints/build_endpoint_map.py
@@ -191,7 +191,7 @@ def template_endpoint_items(config):
def generate_endpoint_map_template(config):
return collections.OrderedDict([
- ('heat_template_version', '2015-04-30'),
+ ('heat_template_version', 'ocata'),
('description', 'A map of OpenStack endpoints. Since the endpoints '
'are URLs, we need to have brackets around IPv6 IP addresses. The '
'inputs to these parameters come from net_ip_uri_map, which will '
@@ -280,8 +280,9 @@ def main():
try:
if options.check:
if not check_up_to_date(options.output_file, options.input_file):
- print('EndpointMap template does not match input data',
- file=sys.stderr)
+ print('EndpointMap template does not match input data. Please '
+ 'run the build_endpoint_map.py tool to update the '
+ 'template.', file=sys.stderr)
sys.exit(2)
else:
build_endpoint_map(options.output_file, options.input_file)
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
index 011dea7d..1b2f842c 100644
--- a/network/endpoints/endpoint_data.yaml
+++ b/network/endpoints/endpoint_data.yaml
@@ -10,6 +10,15 @@ Aodh:
net_param: AodhApi
port: 8042
+Barbican:
+ Internal:
+ net_param: BarbicanApi
+ Public:
+ net_param: Public
+ Admin:
+ net_param: BarbicanApi
+ port: 9311
+
Ceilometer:
Internal:
net_param: CeilometerApi
@@ -19,6 +28,15 @@ Ceilometer:
net_param: CeilometerApi
port: 8777
+Ec2Api:
+ Internal:
+ net_param: Ec2Api
+ Public:
+ net_param: Public
+ Admin:
+ net_param: Ec2Api
+ port: 8788
+
Gnocchi:
Internal:
net_param: GnocchiApi
@@ -28,6 +46,15 @@ Gnocchi:
net_param: GnocchiApi
port: 8041
+Panko:
+ Internal:
+ net_param: PankoApi
+ Public:
+ net_param: Public
+ Admin:
+ net_param: PankoApi
+ port: 8779
+
Cinder:
Internal:
net_param: CinderApi
@@ -58,11 +85,6 @@ Glance:
net_param: GlanceApi
port: 9292
-GlanceRegistry:
- Internal:
- net_param: GlanceRegistry
- port: 9191
-
Mysql:
Internal:
net_param: Mysql
@@ -187,6 +209,21 @@ Nova:
'': /v2.1
port: 8774
+NovaPlacement:
+ Internal:
+ net_param: NovaPlacement
+ uri_suffixes:
+ '': /placement
+ Public:
+ net_param: Public
+ uri_suffixes:
+ '': /placement
+ Admin:
+ net_param: NovaPlacement
+ uri_suffixes:
+ '': /placement
+ port: 8778
+
NovaVNCProxy:
Internal:
net_param: NovaApi
@@ -258,3 +295,31 @@ Ironic:
uri_suffixes:
'': /v1
port: 6385
+
+Zaqar:
+ Internal:
+ net_param: ZaqarApi
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ZaqarApi
+ port: 8888
+
+ZaqarWebSocket:
+ Internal:
+ net_param: ZaqarApi
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ZaqarApi
+ port: 9000
+ protocol: ws
+
+Octavia:
+ Internal:
+ net_param: OctaviaApi
+ Public:
+ net_param: Public
+ Admin:
+ net_param: OctaviaApi
+ port: 9876
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index ac519a5f..2d60df8b 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -2,7 +2,7 @@
### This file is automatically generated from endpoint_data.yaml
### by the script build_endpoint_map.py
-heat_template_version: '2015-04-30'
+heat_template_version: ocata
description: A map of OpenStack endpoints. Since the endpoints are URLs,
we need to have brackets around IPv6 IP addresses. The inputs to these
parameters come from net_ip_uri_map, which will include these brackets
@@ -22,6 +22,9 @@ parameters:
AodhAdmin: {protocol: http, port: '8042', host: IP_ADDRESS}
AodhInternal: {protocol: http, port: '8042', host: IP_ADDRESS}
AodhPublic: {protocol: http, port: '8042', host: IP_ADDRESS}
+ BarbicanAdmin: {protocol: http, port: '9311', host: IP_ADDRESS}
+ BarbicanInternal: {protocol: http, port: '9311', host: IP_ADDRESS}
+ BarbicanPublic: {protocol: http, port: '9311', host: IP_ADDRESS}
CeilometerAdmin: {protocol: http, port: '8777', host: IP_ADDRESS}
CeilometerInternal: {protocol: http, port: '8777', host: IP_ADDRESS}
CeilometerPublic: {protocol: http, port: '8777', host: IP_ADDRESS}
@@ -31,10 +34,12 @@ parameters:
CinderAdmin: {protocol: http, port: '8776', host: IP_ADDRESS}
CinderInternal: {protocol: http, port: '8776', host: IP_ADDRESS}
CinderPublic: {protocol: http, port: '8776', host: IP_ADDRESS}
+ Ec2ApiAdmin: {protocol: http, port: '8788', host: IP_ADDRESS}
+ Ec2ApiInternal: {protocol: http, port: '8788', host: IP_ADDRESS}
+ Ec2ApiPublic: {protocol: http, port: '8788', host: IP_ADDRESS}
GlanceAdmin: {protocol: http, port: '9292', host: IP_ADDRESS}
GlanceInternal: {protocol: http, port: '9292', host: IP_ADDRESS}
GlancePublic: {protocol: http, port: '9292', host: IP_ADDRESS}
- GlanceRegistryInternal: {protocol: http, port: '9191', host: IP_ADDRESS}
GnocchiAdmin: {protocol: http, port: '8041', host: IP_ADDRESS}
GnocchiInternal: {protocol: http, port: '8041', host: IP_ADDRESS}
GnocchiPublic: {protocol: http, port: '8041', host: IP_ADDRESS}
@@ -64,15 +69,30 @@ parameters:
NovaAdmin: {protocol: http, port: '8774', host: IP_ADDRESS}
NovaInternal: {protocol: http, port: '8774', host: IP_ADDRESS}
NovaPublic: {protocol: http, port: '8774', host: IP_ADDRESS}
+ NovaPlacementAdmin: {protocol: http, port: '8778', host: IP_ADDRESS}
+ NovaPlacementInternal: {protocol: http, port: '8778', host: IP_ADDRESS}
+ NovaPlacementPublic: {protocol: http, port: '8778', host: IP_ADDRESS}
NovaVNCProxyAdmin: {protocol: http, port: '6080', host: IP_ADDRESS}
NovaVNCProxyInternal: {protocol: http, port: '6080', host: IP_ADDRESS}
NovaVNCProxyPublic: {protocol: http, port: '6080', host: IP_ADDRESS}
+ OctaviaAdmin: {protocol: http, port: '9876', host: IP_ADDRESS}
+ OctaviaInternal: {protocol: http, port: '9876', host: IP_ADDRESS}
+ OctaviaPublic: {protocol: http, port: '9876', host: IP_ADDRESS}
+ PankoAdmin: {protocol: http, port: '8779', host: IP_ADDRESS}
+ PankoInternal: {protocol: http, port: '8779', host: IP_ADDRESS}
+ PankoPublic: {protocol: http, port: '8779', host: IP_ADDRESS}
SaharaAdmin: {protocol: http, port: '8386', host: IP_ADDRESS}
SaharaInternal: {protocol: http, port: '8386', host: IP_ADDRESS}
SaharaPublic: {protocol: http, port: '8386', host: IP_ADDRESS}
SwiftAdmin: {protocol: http, port: '8080', host: IP_ADDRESS}
SwiftInternal: {protocol: http, port: '8080', host: IP_ADDRESS}
SwiftPublic: {protocol: http, port: '8080', host: IP_ADDRESS}
+ ZaqarAdmin: {protocol: http, port: '8888', host: IP_ADDRESS}
+ ZaqarInternal: {protocol: http, port: '8888', host: IP_ADDRESS}
+ ZaqarPublic: {protocol: http, port: '8888', host: IP_ADDRESS}
+ ZaqarWebSocketAdmin: {protocol: ws, port: '9000', host: IP_ADDRESS}
+ ZaqarWebSocketInternal: {protocol: ws, port: '9000', host: IP_ADDRESS}
+ ZaqarWebSocketPublic: {protocol: ws, port: '9000', host: IP_ADDRESS}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
CloudEndpoints:
@@ -326,6 +346,249 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, AodhPublic, port]
+ BarbicanAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ port:
+ get_param: [EndpointMap, BarbicanAdmin, port]
+ protocol:
+ get_param: [EndpointMap, BarbicanAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanAdmin, port]
+ BarbicanInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ port:
+ get_param: [EndpointMap, BarbicanInternal, port]
+ protocol:
+ get_param: [EndpointMap, BarbicanInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, BarbicanApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, BarbicanApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanInternal, port]
+ BarbicanPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, BarbicanPublic, port]
+ protocol:
+ get_param: [EndpointMap, BarbicanPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, BarbicanPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, BarbicanPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, BarbicanPublic, port]
CeilometerAdmin:
host:
str_replace:
@@ -1553,173 +1816,173 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, CinderPublic, port]
- GlanceAdmin:
+ Ec2ApiAdmin:
host:
str_replace:
template:
- get_param: [EndpointMap, GlanceAdmin, host]
+ get_param: [EndpointMap, Ec2ApiAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
- get_param: [EndpointMap, GlanceAdmin, host]
+ get_param: [EndpointMap, Ec2ApiAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
port:
- get_param: [EndpointMap, GlanceAdmin, port]
+ get_param: [EndpointMap, Ec2ApiAdmin, port]
protocol:
- get_param: [EndpointMap, GlanceAdmin, protocol]
+ get_param: [EndpointMap, Ec2ApiAdmin, protocol]
uri:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceAdmin, protocol]
+ - - get_param: [EndpointMap, Ec2ApiAdmin, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceAdmin, host]
+ get_param: [EndpointMap, Ec2ApiAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceAdmin, port]
+ - get_param: [EndpointMap, Ec2ApiAdmin, port]
uri_no_suffix:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceAdmin, protocol]
+ - - get_param: [EndpointMap, Ec2ApiAdmin, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceAdmin, host]
+ get_param: [EndpointMap, Ec2ApiAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceAdmin, port]
- GlanceInternal:
+ - get_param: [EndpointMap, Ec2ApiAdmin, port]
+ Ec2ApiInternal:
host:
str_replace:
template:
- get_param: [EndpointMap, GlanceInternal, host]
+ get_param: [EndpointMap, Ec2ApiInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
- get_param: [EndpointMap, GlanceInternal, host]
+ get_param: [EndpointMap, Ec2ApiInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
port:
- get_param: [EndpointMap, GlanceInternal, port]
+ get_param: [EndpointMap, Ec2ApiInternal, port]
protocol:
- get_param: [EndpointMap, GlanceInternal, protocol]
+ get_param: [EndpointMap, Ec2ApiInternal, protocol]
uri:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceInternal, protocol]
+ - - get_param: [EndpointMap, Ec2ApiInternal, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceInternal, host]
+ get_param: [EndpointMap, Ec2ApiInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceInternal, port]
+ - get_param: [EndpointMap, Ec2ApiInternal, port]
uri_no_suffix:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceInternal, protocol]
+ - - get_param: [EndpointMap, Ec2ApiInternal, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceInternal, host]
+ get_param: [EndpointMap, Ec2ApiInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceInternal, port]
- GlancePublic:
+ - get_param: [EndpointMap, Ec2ApiInternal, port]
+ Ec2ApiPublic:
host:
str_replace:
template:
- get_param: [EndpointMap, GlancePublic, host]
+ get_param: [EndpointMap, Ec2ApiPublic, host]
params:
CLOUDNAME:
get_param:
@@ -1736,7 +1999,7 @@ outputs:
host_nobrackets:
str_replace:
template:
- get_param: [EndpointMap, GlancePublic, host]
+ get_param: [EndpointMap, Ec2ApiPublic, host]
params:
CLOUDNAME:
get_param:
@@ -1747,17 +2010,17 @@ outputs:
- NetIpMap
- get_param: [ServiceNetMap, PublicNetwork]
port:
- get_param: [EndpointMap, GlancePublic, port]
+ get_param: [EndpointMap, Ec2ApiPublic, port]
protocol:
- get_param: [EndpointMap, GlancePublic, protocol]
+ get_param: [EndpointMap, Ec2ApiPublic, protocol]
uri:
list_join:
- ''
- - - get_param: [EndpointMap, GlancePublic, protocol]
+ - - get_param: [EndpointMap, Ec2ApiPublic, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlancePublic, host]
+ get_param: [EndpointMap, Ec2ApiPublic, host]
params:
CLOUDNAME:
get_param:
@@ -1772,15 +2035,15 @@ outputs:
get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlancePublic, port]
+ - get_param: [EndpointMap, Ec2ApiPublic, port]
uri_no_suffix:
list_join:
- ''
- - - get_param: [EndpointMap, GlancePublic, protocol]
+ - - get_param: [EndpointMap, Ec2ApiPublic, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlancePublic, host]
+ get_param: [EndpointMap, Ec2ApiPublic, host]
params:
CLOUDNAME:
get_param:
@@ -1795,88 +2058,250 @@ outputs:
get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlancePublic, port]
- GlanceRegistryInternal:
+ - get_param: [EndpointMap, Ec2ApiPublic, port]
+ GlanceAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ port:
+ get_param: [EndpointMap, GlanceAdmin, port]
+ protocol:
+ get_param: [EndpointMap, GlanceAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, GlanceAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, GlanceAdmin, port]
+ GlanceInternal:
host:
str_replace:
template:
- get_param: [EndpointMap, GlanceRegistryInternal, host]
+ get_param: [EndpointMap, GlanceInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ get_param: [ServiceNetMap, GlanceApiNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
- get_param: [EndpointMap, GlanceRegistryInternal, host]
+ get_param: [EndpointMap, GlanceInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- - get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
port:
- get_param: [EndpointMap, GlanceRegistryInternal, port]
+ get_param: [EndpointMap, GlanceInternal, port]
protocol:
- get_param: [EndpointMap, GlanceRegistryInternal, protocol]
+ get_param: [EndpointMap, GlanceInternal, protocol]
uri:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceRegistryInternal, protocol]
+ - - get_param: [EndpointMap, GlanceInternal, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceRegistryInternal, host]
+ get_param: [EndpointMap, GlanceInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ get_param: [ServiceNetMap, GlanceApiNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceRegistryInternal, port]
+ - get_param: [EndpointMap, GlanceInternal, port]
uri_no_suffix:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceRegistryInternal, protocol]
+ - - get_param: [EndpointMap, GlanceInternal, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceRegistryInternal, host]
+ get_param: [EndpointMap, GlanceInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ get_param: [ServiceNetMap, GlanceApiNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceRegistryInternal, port]
+ - get_param: [EndpointMap, GlanceInternal, port]
+ GlancePublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlancePublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlancePublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, GlancePublic, port]
+ protocol:
+ get_param: [EndpointMap, GlancePublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlancePublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlancePublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, GlancePublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlancePublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlancePublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, GlancePublic, port]
GnocchiAdmin:
host:
str_replace:
@@ -4822,6 +5247,252 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, NovaPublic, port]
+ NovaPlacementAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ port:
+ get_param: [EndpointMap, NovaPlacementAdmin, port]
+ protocol:
+ get_param: [EndpointMap, NovaPlacementAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementAdmin, port]
+ - /placement
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementAdmin, port]
+ NovaPlacementInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ port:
+ get_param: [EndpointMap, NovaPlacementInternal, port]
+ protocol:
+ get_param: [EndpointMap, NovaPlacementInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementInternal, port]
+ - /placement
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementInternal, port]
+ NovaPlacementPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, NovaPlacementPublic, port]
+ protocol:
+ get_param: [EndpointMap, NovaPlacementPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementPublic, port]
+ - /placement
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementPublic, port]
NovaVNCProxyAdmin:
host:
str_replace:
@@ -5065,6 +5736,492 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, NovaVNCProxyPublic, port]
+ OctaviaAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ port:
+ get_param: [EndpointMap, OctaviaAdmin, port]
+ protocol:
+ get_param: [EndpointMap, OctaviaAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaAdmin, port]
+ OctaviaInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ port:
+ get_param: [EndpointMap, OctaviaInternal, port]
+ protocol:
+ get_param: [EndpointMap, OctaviaInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaInternal, port]
+ OctaviaPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, OctaviaPublic, port]
+ protocol:
+ get_param: [EndpointMap, OctaviaPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaPublic, port]
+ PankoAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, PankoAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PankoApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PankoApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, PankoAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PankoApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PankoApiNetwork]
+ port:
+ get_param: [EndpointMap, PankoAdmin, port]
+ protocol:
+ get_param: [EndpointMap, PankoAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, PankoAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, PankoAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PankoApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PankoApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, PankoAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, PankoAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, PankoAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PankoApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PankoApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, PankoAdmin, port]
+ PankoInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, PankoInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PankoApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PankoApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, PankoInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PankoApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PankoApiNetwork]
+ port:
+ get_param: [EndpointMap, PankoInternal, port]
+ protocol:
+ get_param: [EndpointMap, PankoInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, PankoInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, PankoInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PankoApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PankoApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, PankoInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, PankoInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, PankoInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PankoApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PankoApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, PankoInternal, port]
+ PankoPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, PankoPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, PankoPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, PankoPublic, port]
+ protocol:
+ get_param: [EndpointMap, PankoPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, PankoPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, PankoPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, PankoPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, PankoPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, PankoPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, PankoPublic, port]
SaharaAdmin:
host:
str_replace:
@@ -5799,3 +6956,489 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, SwiftPublic, port]
+ ZaqarAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ port:
+ get_param: [EndpointMap, ZaqarAdmin, port]
+ protocol:
+ get_param: [EndpointMap, ZaqarAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarAdmin, port]
+ ZaqarInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ port:
+ get_param: [EndpointMap, ZaqarInternal, port]
+ protocol:
+ get_param: [EndpointMap, ZaqarInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarInternal, port]
+ ZaqarPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ZaqarPublic, port]
+ protocol:
+ get_param: [EndpointMap, ZaqarPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarPublic, port]
+ ZaqarWebSocketAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ port:
+ get_param: [EndpointMap, ZaqarWebSocketAdmin, port]
+ protocol:
+ get_param: [EndpointMap, ZaqarWebSocketAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarWebSocketAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarWebSocketAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarWebSocketAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarWebSocketAdmin, port]
+ ZaqarWebSocketInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ port:
+ get_param: [EndpointMap, ZaqarWebSocketInternal, port]
+ protocol:
+ get_param: [EndpointMap, ZaqarWebSocketInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarWebSocketInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarWebSocketInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarWebSocketInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ZaqarApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ZaqarApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarWebSocketInternal, port]
+ ZaqarWebSocketPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ZaqarWebSocketPublic, port]
+ protocol:
+ get_param: [EndpointMap, ZaqarWebSocketPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarWebSocketPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarWebSocketPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ZaqarWebSocketPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ZaqarWebSocketPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ZaqarWebSocketPublic, port]
diff --git a/network/external.yaml b/network/external.yaml
index 4dfbc77e..21260d3f 100644
--- a/network/external.yaml
+++ b/network/external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
diff --git a/network/external_v6.yaml b/network/external_v6.yaml
index e0736ab7..51000bb7 100644
--- a/network/external_v6.yaml
+++ b/network/external_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
diff --git a/network/internal_api.yaml b/network/internal_api.yaml
index 090e38f7..793535c6 100644
--- a/network/internal_api.yaml
+++ b/network/internal_api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Internal API network. Used for most APIs, Database, RPC.
diff --git a/network/internal_api_v6.yaml b/network/internal_api_v6.yaml
index 19d64b0a..53950656 100644
--- a/network/internal_api_v6.yaml
+++ b/network/internal_api_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Internal API network. Used for most APIs, Database, RPC.
diff --git a/network/management.yaml b/network/management.yaml
index 6798e11e..77fcd4ea 100644
--- a/network/management.yaml
+++ b/network/management.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Management network. System administration, SSH, DNS, NTP, etc. This network
diff --git a/network/management_v6.yaml b/network/management_v6.yaml
index a5e70667..e1391ad2 100644
--- a/network/management_v6.yaml
+++ b/network/management_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Management network. System administration, SSH, DNS, NTP, etc. This network
diff --git a/network/networks.yaml b/network/networks.yaml
index d3ae482b..26033ee2 100644
--- a/network/networks.yaml
+++ b/network/networks.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Create networks to split out Overcloud traffic
diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml
index 5ac7d344..0f21e3e8 100644
--- a/network/ports/ctlplane_vip.yaml
+++ b/network/ports/ctlplane_vip.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port for a VIP on the undercloud ctlplane network.
diff --git a/network/ports/external.yaml b/network/ports/external.yaml
index c4f815fb..c33643e7 100644
--- a/network/ports/external.yaml
+++ b/network/ports/external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the external network. The IP address will be chosen
diff --git a/network/ports/external_from_pool.yaml b/network/ports/external_from_pool.yaml
index 867176e3..893b26d9 100644
--- a/network/ports/external_from_pool.yaml
+++ b/network/ports/external_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/external_from_pool_v6.yaml b/network/ports/external_from_pool_v6.yaml
index e541049d..c67789af 100644
--- a/network/ports/external_from_pool_v6.yaml
+++ b/network/ports/external_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
diff --git a/network/ports/external_v6.yaml b/network/ports/external_v6.yaml
index bfe2686f..905974f5 100644
--- a/network/ports/external_v6.yaml
+++ b/network/ports/external_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the external network. The IP address will be chosen
diff --git a/network/ports/from_service.yaml b/network/ports/from_service.yaml
index 782b6b07..69a887ea 100644
--- a/network/ports/from_service.yaml
+++ b/network/ports/from_service.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Returns an IP from a service mapped list of IPs
diff --git a/network/ports/from_service_v6.yaml b/network/ports/from_service_v6.yaml
index 80060b57..c9673dd7 100644
--- a/network/ports/from_service_v6.yaml
+++ b/network/ports/from_service_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Returns an IP from a service mapped list of IPv6 IPs
diff --git a/network/ports/internal_api.yaml b/network/ports/internal_api.yaml
index 1d521a8d..1f96e3f2 100644
--- a/network/ports/internal_api.yaml
+++ b/network/ports/internal_api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the internal_api network.
diff --git a/network/ports/internal_api_from_pool.yaml b/network/ports/internal_api_from_pool.yaml
index d7b67e26..3f16f30c 100644
--- a/network/ports/internal_api_from_pool.yaml
+++ b/network/ports/internal_api_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/internal_api_from_pool_v6.yaml b/network/ports/internal_api_from_pool_v6.yaml
index afb144ba..b36ef235 100644
--- a/network/ports/internal_api_from_pool_v6.yaml
+++ b/network/ports/internal_api_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
diff --git a/network/ports/internal_api_v6.yaml b/network/ports/internal_api_v6.yaml
index 14738b33..e236156d 100644
--- a/network/ports/internal_api_v6.yaml
+++ b/network/ports/internal_api_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the internal_api network.
diff --git a/network/ports/management.yaml b/network/ports/management.yaml
index 967b66e1..b626bc20 100644
--- a/network/ports/management.yaml
+++ b/network/ports/management.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the management network. The IP address will be chosen
diff --git a/network/ports/management_from_pool.yaml b/network/ports/management_from_pool.yaml
index 451677b2..05fedb90 100644
--- a/network/ports/management_from_pool.yaml
+++ b/network/ports/management_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/management_from_pool_v6.yaml b/network/ports/management_from_pool_v6.yaml
index 4c1cc216..64758bf9 100644
--- a/network/ports/management_from_pool_v6.yaml
+++ b/network/ports/management_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
diff --git a/network/ports/management_v6.yaml b/network/ports/management_v6.yaml
index a94ebc7b..9e6a35b8 100644
--- a/network/ports/management_v6.yaml
+++ b/network/ports/management_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the management network. The IP address will be chosen
diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml
index d7863e02..5782bbe9 100644
--- a/network/ports/net_ip_list_map.yaml
+++ b/network/ports/net_ip_list_map.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
parameters:
ControlPlaneIpList:
@@ -138,3 +138,20 @@ outputs:
SERVICE_short_node_names: {get_param: ServiceHostnameList}
for_each:
SERVICE: {get_attr: [EnabledServicesValue, value]}
+ short_service_bootstrap_hostnames:
+ description: >
+ Map of enabled services to a list of hostnames where they're running regardless of the network
+ Used for bootstrap purposes
+ value:
+ yaql:
+ # If ServiceHostnameList is empty the role is deployed with zero nodes
+ # therefore we don't want to add any *_node_names to the map
+ expression: dict($.data.map.items().where(len($[1]) > 0))
+ data:
+ map:
+ map_merge:
+ repeat:
+ template:
+ SERVICE_short_bootstrap_node_name: {get_param: ServiceHostnameList}
+ for_each:
+ SERVICE: {get_attr: [EnabledServicesValue, value]}
diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml
index fcf2eeee..c8cf733f 100644
--- a/network/ports/net_ip_map.yaml
+++ b/network/ports/net_ip_map.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
parameters:
ControlPlaneIp:
diff --git a/network/ports/net_vip_map_external.yaml b/network/ports/net_vip_map_external.yaml
index 71e6e811..58f96e65 100644
--- a/network/ports/net_vip_map_external.yaml
+++ b/network/ports/net_vip_map_external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
parameters:
# Set these via parameter defaults to configure external VIPs
diff --git a/network/ports/net_vip_map_external_v6.yaml b/network/ports/net_vip_map_external_v6.yaml
index 8d054349..12db8d2d 100644
--- a/network/ports/net_vip_map_external_v6.yaml
+++ b/network/ports/net_vip_map_external_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
parameters:
# Set these via parameter defaults to configure external VIPs
diff --git a/network/ports/noop.yaml b/network/ports/noop.yaml
index 96c461e0..e2004cb0 100644
--- a/network/ports/noop.yaml
+++ b/network/ports/noop.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Returns the control plane port (provisioning network) as the ip_address.
diff --git a/network/ports/storage.yaml b/network/ports/storage.yaml
index 1ed5cca1..80400412 100644
--- a/network/ports/storage.yaml
+++ b/network/ports/storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the storage network.
diff --git a/network/ports/storage_from_pool.yaml b/network/ports/storage_from_pool.yaml
index 0a3d394c..dfab49ae 100644
--- a/network/ports/storage_from_pool.yaml
+++ b/network/ports/storage_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/storage_from_pool_v6.yaml b/network/ports/storage_from_pool_v6.yaml
index 18faf1bd..a6cde5fc 100644
--- a/network/ports/storage_from_pool_v6.yaml
+++ b/network/ports/storage_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
diff --git a/network/ports/storage_mgmt.yaml b/network/ports/storage_mgmt.yaml
index 548d226a..b96fbd0e 100644
--- a/network/ports/storage_mgmt.yaml
+++ b/network/ports/storage_mgmt.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the storage_mgmt API network.
diff --git a/network/ports/storage_mgmt_from_pool.yaml b/network/ports/storage_mgmt_from_pool.yaml
index c3f0f4e2..6ec3dbae 100644
--- a/network/ports/storage_mgmt_from_pool.yaml
+++ b/network/ports/storage_mgmt_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/storage_mgmt_from_pool_v6.yaml b/network/ports/storage_mgmt_from_pool_v6.yaml
index e1145a31..2f3ea196 100644
--- a/network/ports/storage_mgmt_from_pool_v6.yaml
+++ b/network/ports/storage_mgmt_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs This version is for IPv6
diff --git a/network/ports/storage_mgmt_v6.yaml b/network/ports/storage_mgmt_v6.yaml
index 9db66964..01e4c31a 100644
--- a/network/ports/storage_mgmt_v6.yaml
+++ b/network/ports/storage_mgmt_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the storage_mgmt API network.
diff --git a/network/ports/storage_v6.yaml b/network/ports/storage_v6.yaml
index adf3595a..1dd76199 100644
--- a/network/ports/storage_v6.yaml
+++ b/network/ports/storage_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the storage network.
diff --git a/network/ports/tenant.yaml b/network/ports/tenant.yaml
index d8f78c49..f6929b81 100644
--- a/network/ports/tenant.yaml
+++ b/network/ports/tenant.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the tenant network.
diff --git a/network/ports/tenant_from_pool.yaml b/network/ports/tenant_from_pool.yaml
index d5fd7080..c72b2278 100644
--- a/network/ports/tenant_from_pool.yaml
+++ b/network/ports/tenant_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/tenant_from_pool_v6.yaml b/network/ports/tenant_from_pool_v6.yaml
index d4f0d29c..bc056fa6 100644
--- a/network/ports/tenant_from_pool_v6.yaml
+++ b/network/ports/tenant_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/tenant_v6.yaml b/network/ports/tenant_v6.yaml
index 21ba1efa..84101828 100644
--- a/network/ports/tenant_v6.yaml
+++ b/network/ports/tenant_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the tenant network.
diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml
index 38322907..d996d03d 100644
--- a/network/ports/vip.yaml
+++ b/network/ports/vip.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port for a VIP on the isolated network NetworkName.
diff --git a/network/ports/vip_v6.yaml b/network/ports/vip_v6.yaml
index 498e5d69..7a45756c 100644
--- a/network/ports/vip_v6.yaml
+++ b/network/ports/vip_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port for a VIP on the isolated network NetworkName.
diff --git a/network/scripts/run-os-net-config.sh b/network/scripts/run-os-net-config.sh
new file mode 100755
index 00000000..8fe2d270
--- /dev/null
+++ b/network/scripts/run-os-net-config.sh
@@ -0,0 +1,148 @@
+#!/bin/bash
+# The following environment variables may be set to substitute in a
+# custom bridge or interface name. Normally these are provided by the calling
+# SoftwareConfig resource, but they may also be set manually for testing.
+# $bridge_name : The bridge device name to apply
+# $interface_name : The interface name to apply
+#
+# Also this token is replaced via a str_replace in the SoftwareConfig running
+# the script - in future we may extend this to also work with a variable, e.g
+# a deployment input via input_values
+# $network_config : the json serialized os-net-config config to apply
+#
+set -eux
+
+function get_metadata_ip() {
+
+ local METADATA_IP
+
+ # Look for a variety of Heat transports
+ # FIXME: Heat should provide a way to obtain this in a single place
+ for URL in os-collect-config.cfn.metadata_url os-collect-config.heat.auth_url os-collect-config.request.metadata_url os-collect-config.zaqar.auth_url; do
+ METADATA_IP=$(os-apply-config --key $URL --key-default '' --type raw 2>/dev/null | sed -e 's|http.*://\([^:]*\).*|\1|')
+ [ -n "$METADATA_IP" ] && break
+ done
+
+ echo $METADATA_IP
+
+}
+
+function is_local_ip() {
+ local IP_TO_CHECK=$1
+ if ip -o a | grep "inet6\? $IP_TO_CHECK/" &>/dev/null; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+function ping_metadata_ip() {
+ local METADATA_IP=$(get_metadata_ip)
+
+ if [ -n "$METADATA_IP" ] && ! is_local_ip $METADATA_IP; then
+
+ echo -n "Trying to ping metadata IP ${METADATA_IP}..."
+
+ local COUNT=0
+ until ping -c 1 $METADATA_IP &> /dev/null; do
+ COUNT=$(( $COUNT + 1 ))
+ if [ $COUNT -eq 10 ]; then
+ echo "FAILURE"
+ echo "$METADATA_IP is not pingable." >&2
+ exit 1
+ fi
+ done
+ echo "SUCCESS"
+
+ else
+ echo "No metadata IP found. Skipping."
+ fi
+}
+
+function configure_safe_defaults() {
+
+[[ $? == 0 ]] && return 0
+
+cat > /etc/os-net-config/dhcp_all_interfaces.yaml <<EOF_CAT
+# This file is an autogenerated safe defaults file for os-net-config
+# which runs DHCP on all discovered interfaces to ensure connectivity
+# back to the undercloud for updates
+network_config:
+EOF_CAT
+
+ for iface in $(ls /sys/class/net | grep -v ^lo$); do
+ local mac_addr_type="$(cat /sys/class/net/${iface}/addr_assign_type)"
+ if [ "$mac_addr_type" != "0" ]; then
+ echo "Device has generated MAC, skipping."
+ else
+ ip link set dev $iface up &>/dev/null
+ HAS_LINK="$(cat /sys/class/net/${iface}/carrier)"
+
+ TRIES=10
+ while [ "$HAS_LINK" == "0" -a $TRIES -gt 0 ]; do
+ HAS_LINK="$(cat /sys/class/net/${iface}/carrier)"
+ if [ "$HAS_LINK" == "1" ]; then
+ break
+ else
+ sleep 1
+ fi
+ TRIES=$(( TRIES - 1 ))
+ done
+ if [ "$HAS_LINK" == "1" ] ; then
+cat >> /etc/os-net-config/dhcp_all_interfaces.yaml <<EOF_CAT
+ -
+ type: interface
+ name: $iface
+ use_dhcp: true
+EOF_CAT
+ fi
+ fi
+ done
+ set +e
+ os-net-config -c /etc/os-net-config/dhcp_all_interfaces.yaml -v --detailed-exit-codes --cleanup
+ RETVAL=$?
+ set -e
+ if [[ $RETVAL == 2 ]]; then
+ ping_metadata_ip
+ elif [[ $RETVAL != 0 ]]; then
+ echo "ERROR: configuration of safe defaults failed."
+ fi
+}
+
+if [ -n '$network_config' ]; then
+ if [ -z "${disable_configure_safe_defaults:-''}" ]; then
+ trap configure_safe_defaults EXIT
+ fi
+
+ mkdir -p /etc/os-net-config
+ # Note these variables come from the calling heat SoftwareConfig
+ echo '$network_config' > /etc/os-net-config/config.json
+
+ if [ "$(type -t network_config_hook)" = "function" ]; then
+ network_config_hook
+ fi
+
+ sed -i "s/bridge_name/${bridge_name:-''}/" /etc/os-net-config/config.json
+ sed -i "s/interface_name/${interface_name:-''}/" /etc/os-net-config/config.json
+
+ set +e
+ os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
+ RETVAL=$?
+ set -e
+
+ if [[ $RETVAL == 2 ]]; then
+ ping_metadata_ip
+
+ #NOTE: dprince this udev rule can apparently leak DHCP processes?
+ # https://bugs.launchpad.net/tripleo/+bug/1538259
+ # until we discover the root cause we can simply disable the
+ # rule because networking has already been configured at this point
+ if [ -f /etc/udev/rules.d/99-dhcp-all-interfaces.rules ]; then
+ rm /etc/udev/rules.d/99-dhcp-all-interfaces.rules
+ fi
+
+ elif [[ $RETVAL != 0 ]]; then
+ echo "ERROR: os-net-config configuration failed." >&2
+ exit 1
+ fi
+fi
diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml
index ac05fc73..390b18b4 100644
--- a/network/service_net_map.j2.yaml
+++ b/network/service_net_map.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Mapping of service_name_network -> network name
@@ -21,28 +21,36 @@ parameters:
# snake_case - the names must still match when converted
ServiceNetMapDefaults:
default:
+ # Note the values in this map are replaced by *NetName
+ # to allow for sane defaults when the network names are
+ # overridden.
ApacheNetwork: internal_api
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
AodhApiNetwork: internal_api
+ PankoApiNetwork: internal_api
+ BarbicanApiNetwork: internal_api
GnocchiApiNetwork: internal_api
MongodbNetwork: internal_api
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
GlanceApiNetwork: storage
- GlanceRegistryNetwork: internal_api
IronicApiNetwork: ctlplane
IronicNetwork: ctlplane
KeystoneAdminApiNetwork: ctlplane # allows undercloud to config endpoints
KeystonePublicApiNetwork: internal_api
ManilaApiNetwork: internal_api
NeutronApiNetwork: internal_api
+ OctaviaApiNetwork: internal_api
HeatApiNetwork: internal_api
HeatApiCfnNetwork: internal_api
HeatApiCloudwatchNetwork: internal_api
NovaApiNetwork: internal_api
+ NovaPlacementNetwork: internal_api
NovaMetadataNetwork: internal_api
NovaVncProxyNetwork: internal_api
+ Ec2ApiNetwork: internal_api
+ Ec2ApiMetadataNetwork: internal_api
SwiftStorageNetwork: storage_mgmt
SwiftProxyNetwork: storage
SaharaApiNetwork: internal_api
@@ -56,10 +64,14 @@ parameters:
CephRgwNetwork: storage
PublicNetwork: external
OpendaylightApiNetwork: internal_api
+ OvnDbsNetwork: internal_api
MistralApiNetwork: internal_api
+ ZaqarApiNetwork: internal_api
+ PacemakerRemoteNetwork: internal_api
# We special-case the default ResolveNetwork for the CephStorage role
# for backwards compatibility, all other roles default to internal_api
CephStorageHostnameResolveNetwork: storage
+ EtcdNetwork: internal_api
{% for role in roles if role.name != 'CephStorage' %}
{{role.name}}HostnameResolveNetwork: internal_api
{% endfor %}
@@ -79,20 +91,62 @@ parameters:
internal use only, this will be removed in future.
type: json
+ InternalApiNetName:
+ default: internal_api
+ description: The name of the internal API network.
+ type: string
+ ExternalNetName:
+ default: external
+ description: The name of the external network.
+ type: string
+ ManagementNetName:
+ default: management
+ description: The name of the management network.
+ type: string
+ StorageNetName:
+ default: storage
+ description: The name of the storage network.
+ type: string
+ StorageMgmtNetName:
+ default: storage_mgmt
+ description: The name of the Storage management network.
+ type: string
+ TenantNetName:
+ default: tenant
+ description: The name of the tenant network.
+ type: string
+
+
parameter_groups:
- label: deprecated
description: Do not use deprecated params, they will be removed.
parameters:
- ServiceNetMapDeprecatedMapping
+resources:
+ ServiceNetMapValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_merge:
+ - map_replace:
+ - {get_param: ServiceNetMapDefaults}
+ - values:
+ external: {get_param: ExternalNetName}
+ internal_api: {get_param: InternalApiNetName}
+ storage: {get_param: StorageNetName}
+ storage_mgmt: {get_param: StorageMgmtNetName}
+ tenant: {get_param: TenantNetName}
+ management: {get_param: ManagementNetName}
+ - map_replace:
+ - {get_param: ServiceNetMap}
+ - keys: {get_param: ServiceNetMapDeprecatedMapping}
+
+
outputs:
service_net_map:
- value:
- map_merge:
- - {get_param: ServiceNetMapDefaults}
- - map_replace:
- - {get_param: ServiceNetMap}
- - keys: {get_param: ServiceNetMapDeprecatedMapping}
+ value: {get_attr: [ServiceNetMapValue, value]}
service_net_map_lower:
value:
@@ -102,9 +156,4 @@ outputs:
yaql:
expression: dict($.data.map.items().select([ regex(`([a-z0-9])([A-Z])`).replace($[0], '\\1_\\2').toLower(), $[1]]))
data:
- map:
- map_merge:
- - {get_param: ServiceNetMapDefaults}
- - map_replace:
- - {get_param: ServiceNetMap}
- - keys: {get_param: ServiceNetMapDeprecatedMapping}
+ map: {get_attr: [ServiceNetMapValue, value]}
diff --git a/network/storage.yaml b/network/storage.yaml
index 35dae17a..0a704ea3 100644
--- a/network/storage.yaml
+++ b/network/storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Storage network.
diff --git a/network/storage_mgmt.yaml b/network/storage_mgmt.yaml
index 03cfd139..c7117165 100644
--- a/network/storage_mgmt.yaml
+++ b/network/storage_mgmt.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Storage management network. Storage replication, etc.
diff --git a/network/storage_mgmt_v6.yaml b/network/storage_mgmt_v6.yaml
index 39c456db..2b065195 100644
--- a/network/storage_mgmt_v6.yaml
+++ b/network/storage_mgmt_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Storage management network. Storage replication, etc.
diff --git a/network/storage_v6.yaml b/network/storage_v6.yaml
index 5c8af9e5..777e6167 100644
--- a/network/storage_v6.yaml
+++ b/network/storage_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Storage network.
diff --git a/network/tenant.yaml b/network/tenant.yaml
index 1045b81b..33055fe8 100644
--- a/network/tenant.yaml
+++ b/network/tenant.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Tenant network.
diff --git a/network/tenant_v6.yaml b/network/tenant_v6.yaml
index bf758a50..0bf5d2f0 100644
--- a/network/tenant_v6.yaml
+++ b/network/tenant_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Tenant IPv6 network.
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index 9b9cd581..1b9646fe 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -11,6 +11,8 @@ resource_registry:
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
{% for role in roles %}
+ OS::TripleO::{{role.name}}::PreNetworkConfig: OS::Heat::None
+ OS::TripleO::{{role.name}}PostDeploySteps: puppet/post.yaml
OS::TripleO::{{role.name}}: puppet/{{role.name.lower()}}-role.yaml
OS::TripleO::{{role.name}}Config: puppet/{{role.name.lower()}}-config.yaml
OS::TripleO::Tasks::{{role.name}}PreConfig: OS::Heat::None
@@ -40,6 +42,8 @@ resource_registry:
# in the jinja loop
OS::TripleO::Controller::Net::SoftwareConfig: net-config-bridge.yaml
+ OS::TripleO::ServiceServerMetadataHook: OS::Heat::None
+
OS::TripleO::Server: OS::Nova::Server
# This creates the "heat-admin" user for all OS images by default
@@ -48,15 +52,22 @@ resource_registry:
# Hooks for operator extra config
# NodeUserData == Cloud-init additional user-data, e.g cloud-config
+ # role::NodeUserData == Role specific cloud-init additional user-data
# ControllerExtraConfigPre == Controller configuration pre service deployment
# NodeExtraConfig == All nodes configuration pre service deployment
# NodeExtraConfigPost == All nodes configuration post service deployment
OS::TripleO::NodeUserData: firstboot/userdata_default.yaml
+{% for role in roles %}
+ OS::TripleO::{{role.name}}::NodeUserData: firstboot/userdata_default.yaml
+{% endfor %}
OS::TripleO::NodeTLSCAData: OS::Heat::None
OS::TripleO::NodeTLSData: OS::Heat::None
OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
+ OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None
+
# "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
# phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
# configuration with knowledge of all nodes in the cluster is required vs single
@@ -86,6 +97,7 @@ resource_registry:
OS::TripleO::Network::Ports::StorageVipPort: network/ports/noop.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml
OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
+ OS::TripleO::Network::Ports::ControlPlaneVipPort: OS::Neutron::Port
# Service to network Mappings
OS::TripleO::ServiceNetMap: network/service_net_map.yaml
@@ -96,12 +108,18 @@ resource_registry:
# validation resources
OS::TripleO::AllNodes::Validation: all-nodes-validation.yaml
+ # Upgrade resources
+ OS::TripleO::UpgradeConfig: puppet/upgrade_config.yaml
+ OS::TripleO::UpgradeSteps: OS::Heat::None
+
# services
OS::TripleO::Services: puppet/services/services.yaml
OS::TripleO::Services::Apache: puppet/services/apache.yaml
OS::TripleO::Services::ApacheTLS: OS::Heat::None
OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
+ OS::TripleO::Services::CephMds: OS::Heat::None
OS::TripleO::Services::CephMon: OS::Heat::None
+ OS::TripleO::Services::CephRbdMirror: OS::Heat::None
OS::TripleO::Services::CephRgw: OS::Heat::None
OS::TripleO::Services::CephOSD: OS::Heat::None
OS::TripleO::Services::CephClient: OS::Heat::None
@@ -111,16 +129,16 @@ resource_registry:
OS::TripleO::Services::CinderScheduler: puppet/services/cinder-scheduler.yaml
OS::TripleO::Services::CinderVolume: puppet/services/cinder-volume.yaml
OS::TripleO::Services::BlockStorageCinderVolume: puppet/services/cinder-volume.yaml
- OS::TripleO::Services::Core: OS::Heat::None
OS::TripleO::Services::Keystone: puppet/services/keystone.yaml
OS::TripleO::Services::GlanceApi: puppet/services/glance-api.yaml
- OS::TripleO::Services::GlanceRegistry: puppet/services/glance-registry.yaml
+ OS::TripleO::Services::GlanceRegistry: puppet/services/disabled/glance-registry.yaml
OS::TripleO::Services::HeatApi: puppet/services/heat-api.yaml
OS::TripleO::Services::HeatApiCfn: puppet/services/heat-api-cfn.yaml
OS::TripleO::Services::HeatApiCloudwatch: puppet/services/heat-api-cloudwatch.yaml
OS::TripleO::Services::HeatEngine: puppet/services/heat-engine.yaml
OS::TripleO::Services::Kernel: puppet/services/kernel.yaml
OS::TripleO::Services::MySQL: puppet/services/database/mysql.yaml
+ OS::TripleO::Services::MySQLTLS: OS::Heat::None
OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
@@ -137,10 +155,13 @@ resource_registry:
OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml
OS::TripleO::Services::NeutronCorePluginNuage: puppet/services/neutron-plugin-nuage.yaml
OS::TripleO::Services::NeutronCorePluginOpencontrail: puppet/services/neutron-plugin-opencontrail.yaml
+ OS::TripleO::Services::OVNDBs: OS::Heat::None
+
OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml
OS::TripleO::Services::NeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
OS::TripleO::Services::ComputeNeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
OS::TripleO::Services::Pacemaker: OS::Heat::None
+ OS::TripleO::Services::PacemakerRemote: OS::Heat::None
OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None
OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml
@@ -150,10 +171,12 @@ resource_registry:
OS::TripleO::Services::Memcached: puppet/services/memcached.yaml
OS::TripleO::Services::SaharaApi: OS::Heat::None
OS::TripleO::Services::SaharaEngine: OS::Heat::None
+ OS::TripleO::Services::Sshd: OS::Heat::None
OS::TripleO::Services::Redis: puppet/services/database/redis.yaml
OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml
OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml
OS::TripleO::Services::NovaApi: puppet/services/nova-api.yaml
+ OS::TripleO::Services::NovaPlacement: puppet/services/nova-placement.yaml
OS::TripleO::Services::NovaMetadata: puppet/services/nova-metadata.yaml
OS::TripleO::Services::NovaScheduler: puppet/services/nova-scheduler.yaml
OS::TripleO::Services::NovaConsoleauth: puppet/services/nova-consoleauth.yaml
@@ -177,22 +200,23 @@ resource_registry:
OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml
OS::TripleO::Services::GnocchiMetricd: puppet/services/gnocchi-metricd.yaml
OS::TripleO::Services::GnocchiStatsd: puppet/services/gnocchi-statsd.yaml
- OS::TripleO::Services::VipHosts: puppet/services/vip-hosts.yaml
# Services that are disabled by default (use relevant environment files):
OS::TripleO::Services::FluentdClient: OS::Heat::None
OS::TripleO::LoggingConfiguration: puppet/services/logging/fluentd-config.yaml
- OS::Tripleo::Services::ManilaApi: OS::Heat::None
- OS::Tripleo::Services::ManilaScheduler: OS::Heat::None
- OS::Tripleo::Services::ManilaShare: OS::Heat::None
- OS::Tripleo::Services::ManilaBackendGeneric: OS::Heat::None
- OS::Tripleo::Services::ManilaBackendNetapp: OS::Heat::None
- OS::Tripleo::Services::ManilaBackendCephFs: OS::Heat::None
+ OS::TripleO::Services::ManilaApi: OS::Heat::None
+ OS::TripleO::Services::ManilaScheduler: OS::Heat::None
+ OS::TripleO::Services::ManilaShare: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendGeneric: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendNetapp: OS::Heat::None
+ OS::TripleO::Services::ManilaBackendCephFs: OS::Heat::None
OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronMetadataAgent: OS::Heat::None
+ OS::TripleO::Services::BarbicanApi: OS::Heat::None
OS::TripleO::Services::AodhApi: puppet/services/aodh-api.yaml
OS::TripleO::Services::AodhEvaluator: puppet/services/aodh-evaluator.yaml
OS::TripleO::Services::AodhNotifier: puppet/services/aodh-notifier.yaml
OS::TripleO::Services::AodhListener: puppet/services/aodh-listener.yaml
+ OS::TripleO::Services::PankoApi: OS::Heat::None
OS::TripleO::Services::MistralEngine: OS::Heat::None
OS::TripleO::Services::MistralApi: OS::Heat::None
OS::TripleO::Services::MistralExecutor: OS::Heat::None
@@ -209,7 +233,20 @@ resource_registry:
OS::TripleO::Services::ContrailControl: puppet/services/network/contrail-control.yaml
OS::TripleO::Services::ContrailDatabase: puppet/services/network/contrail-database.yaml
OS::TripleO::Services::ContrailWebui: puppet/services/network/contrail-webui.yaml
+ OS::TripleO::Services::TLSProxyBase: OS::Heat::None
+ OS::TripleO::Services::Zaqar: OS::Heat::None
+ OS::TripleO::Services::NeutronML2FujitsuCfab: OS::Heat::None
+ OS::TripleO::Services::NeutronML2FujitsuFossw: OS::Heat::None
+ OS::TripleO::Services::CinderHPELeftHandISCSI: OS::Heat::None
+ OS::TripleO::Services::Etcd: OS::Heat::None
+ OS::TripleO::Services::Ec2Api: OS::Heat::None
+ OS::TripleO::Services::AuditD: OS::Heat::None
parameter_defaults:
EnablePackageInstall: false
SoftwareConfigTransport: POLL_TEMP_URL
+
+{% for role in roles %}
+ # Parameters generated for {{role.name}} Role
+ {{role.name}}Services: {{role.ServicesDefault|default([])}}
+{% endfor %}
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index d8d38c2a..f93c19a3 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -1,4 +1,5 @@
-heat_template_version: 2016-10-14
+{% set primary_role_name = roles[0].name -%}
+heat_template_version: ocata
description: >
Deploy an OpenStack environment, consisting of several node types (roles),
@@ -121,7 +122,6 @@ parameters:
resource_registry) which represent nested stacks
for each service that should get installed on the {{role.name}} role.
type: comma_delimited_list
- default: {{role.ServicesDefault|default([])}}
{{role.name}}Count:
description: Number of {{role.name}} nodes to deploy
@@ -171,9 +171,50 @@ parameters:
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
+ AddVipsToEtcHosts:
+ default: True
+ type: boolean
+ description: >
+ Set to true to append per network Vips to /etc/hosts on each node.
+
+conditions:
+ add_vips_to_etc_hosts: {equals : [{get_param: AddVipsToEtcHosts}, True]}
resources:
+ VipHosts:
+ type: OS::Heat::Value
+ properties:
+ type: string
+ value:
+ list_join:
+ - "\n"
+ - - str_replace:
+ template: IP HOST
+ params:
+ IP: {get_attr: [VipMap, net_ip_map, external]}
+ HOST: {get_param: CloudName}
+ - str_replace:
+ template: IP HOST
+ params:
+ IP: {get_attr: [VipMap, net_ip_map, ctlplane]}
+ HOST: {get_param: CloudNameCtlplane}
+ - str_replace:
+ template: IP HOST
+ params:
+ IP: {get_attr: [VipMap, net_ip_map, internal_api]}
+ HOST: {get_param: CloudNameInternal}
+ - str_replace:
+ template: IP HOST
+ params:
+ IP: {get_attr: [VipMap, net_ip_map, storage]}
+ HOST: {get_param: CloudNameStorage}
+ - str_replace:
+ template: IP HOST
+ params:
+ IP: {get_attr: [VipMap, net_ip_map, storage_mgmt]}
+ HOST: {get_param: CloudNameStorageManagement}
+
HeatAuthEncryptionKey:
type: OS::Heat::RandomString
@@ -232,8 +273,19 @@ resources:
config: {get_attr: [allNodesConfig, config_id]}
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
input_values:
- bootstrap_nodeid: {get_attr: [{{role.name}}, resource.0.hostname]}
- bootstrap_nodeid_ip: {get_attr: [{{role.name}}, resource.0.ip_address]}
+ # Note we have to use yaql to look up the first hostname/ip in the
+ # list because heat path based attributes operate on the attribute
+ # inside the ResourceGroup, not the exposed list ref discussion in
+ # https://bugs.launchpad.net/heat/+bug/1640488
+ # The coalesce is needed because $.data is None during heat validation
+ bootstrap_nodeid:
+ yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{role.name}}, hostname]}
+ bootstrap_nodeid_ip:
+ yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{role.name}}, ip_address]}
{{role.name}}AllNodesValidationDeployment:
type: OS::Heat::StructuredDeployments
@@ -300,7 +352,7 @@ resources:
# - The outer one filters the map based on the services enabled for the role
# then merges the result into one map.
- yaql:
- expression: let(root => $) -> $.data.map.items().where($[0] in $root.data.services).select($[1]).reduce($1.mergeWith($2), {})
+ expression: let(root => $) -> $.data.map.items().where($[0] in coalesce($root.data.services, [])).select($[1]).reduce($1.mergeWith($2), {})
data:
map:
yaql:
@@ -312,15 +364,23 @@ resources:
services: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
ServiceNames: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]}
+ ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChain, role_data, service_metadata_settings]}
{% endfor %}
hostsConfig:
type: OS::TripleO::Hosts::SoftwareConfig
properties:
hosts:
+ list_join:
+ - "\n"
+ - - if:
+ - add_vips_to_etc_hosts
+ - {get_attr: [VipHosts, value]}
+ - ''
+ -
{% for role in roles %}
- - list_join:
- - '\n'
+ - list_join:
+ - "\n"
- {get_attr: [{{role.name}}, hosts_entry]}
{% endfor %}
@@ -356,8 +416,8 @@ resources:
{% for role in roles %}
- {get_attr: [{{role.name}}ServiceChain, role_data, logging_sources]}
{% endfor %}
- controller_ips: {get_attr: [Controller, ip_address]}
- controller_names: {get_attr: [Controller, hostname]}
+ controller_ips: {get_attr: [{{primary_role_name}}, ip_address]}
+ controller_names: {get_attr: [{{primary_role_name}}, hostname]}
service_ips:
# Note (shardy) this somewhat complex yaql may be replaced
# with a map_deep_merge function in ocata. It merges the
@@ -386,8 +446,16 @@ resources:
{% for role in roles %}
- {get_attr: [{{role.name}}IpListMap, short_service_hostnames]}
{% endfor %}
+ short_service_bootstrap_node:
+ yaql:
+ expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten().first()]))
+ data:
+ l:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, short_service_bootstrap_hostnames]}
+{% endfor %}
# FIXME(shardy): These require further work to move into service_ips
- memcache_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
+ memcache_node_ips: {get_attr: [{{primary_role_name}}IpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
NetVipMap: {get_attr: [VipMap, net_ip_map]}
RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
@@ -419,7 +487,7 @@ resources:
type: OS::TripleO::Network
ControlVirtualIP:
- type: OS::Neutron::Port
+ type: OS::TripleO::Network::Ports::ControlPlaneVipPort
depends_on: Networks
properties:
name: control_virtual_ip
@@ -493,12 +561,12 @@ resources:
PingTestIps:
list_join:
- ' '
- - - {get_attr: [Controller, resource.0.external_ip_address]}
- - {get_attr: [Controller, resource.0.internal_api_ip_address]}
- - {get_attr: [Controller, resource.0.storage_ip_address]}
- - {get_attr: [Controller, resource.0.storage_mgmt_ip_address]}
- - {get_attr: [Controller, resource.0.tenant_ip_address]}
- - {get_attr: [Controller, resource.0.management_ip_address]}
+ - - {get_attr: [{{primary_role_name}}, resource.0.external_ip_address]}
+ - {get_attr: [{{primary_role_name}}, resource.0.internal_api_ip_address]}
+ - {get_attr: [{{primary_role_name}}, resource.0.storage_ip_address]}
+ - {get_attr: [{{primary_role_name}}, resource.0.storage_mgmt_ip_address]}
+ - {get_attr: [{{primary_role_name}}, resource.0.tenant_ip_address]}
+ - {get_attr: [{{primary_role_name}}, resource.0.management_ip_address]}
UpdateWorkflow:
type: OS::TripleO::Tasks::UpdateWorkflow
@@ -525,15 +593,16 @@ resources:
- {{role.name}}AllNodesValidationDeployment
{% endfor %}
properties:
+ servers:
{% for role in roles %}
- servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{% endfor %}
- # Post deployment steps for all roles
- AllNodesDeploySteps:
- type: OS::TripleO::PostDeploySteps
-{% for role in roles %}
+ # Upgrade steps for all roles
+ AllNodesUpgradeSteps:
+ type: OS::TripleO::UpgradeSteps
depends_on:
+{% for role in roles %}
- {{role.name}}AllNodesDeployment
{% endfor %}
properties:
@@ -546,6 +615,20 @@ resources:
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
{% endfor %}
+ # Post deployment steps for all roles
+ AllNodesDeploySteps:
+ type: OS::TripleO::PostDeploySteps
+ depends_on: AllNodesUpgradeSteps
+ properties:
+ servers:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
+ role_data:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+{% endfor %}
+
outputs:
ManagedEndpoints:
description: Asserts that the keystone endpoints have been provisioned.
@@ -556,57 +639,6 @@ outputs:
KeystoneAdminVip:
description: Keystone Admin VIP endpoint
value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
- PublicVip:
- description: Controller VIP for public API endpoints
- value: {get_attr: [VipMap, net_ip_map, external]}
- AodhInternalVip:
- description: VIP for Aodh API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, AodhApiNetwork]}]}
- CeilometerInternalVip:
- description: VIP for Ceilometer API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CeilometerApiNetwork]}]}
- CephRgwInternalVip:
- description: VIP for Ceph RGW internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CephRgwNetwork]}]}
- CinderInternalVip:
- description: VIP for Cinder API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CinderApiNetwork]}]}
- GlanceInternalVip:
- description: VIP for Glance API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GlanceApiNetwork]}]}
- GnocchiInternalVip:
- description: VIP for Gnocchi API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, GnocchiApiNetwork]}]}
- MistralInternalVip:
- description: VIP for Mistral API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MistralApiNetwork]}]}
- HeatInternalVip:
- description: VIP for Heat API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, HeatApiNetwork]}]}
- IronicInternalVip:
- description: VIP for Ironic API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, IronicApiNetwork]}]}
- KeystoneInternalVip:
- description: VIP for Keystone API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]}
- ManilaInternalVip:
- description: VIP for Manila API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, ManilaApiNetwork]}]}
- NeutronInternalVip:
- description: VIP for Neutron API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NeutronApiNetwork]}]}
- NovaInternalVip:
- description: VIP for Nova API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, NovaApiNetwork]}]}
- OpenDaylightInternalVip:
- description: VIP for OpenDaylight API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, OpenDaylightApiNetwork]}]}
- SaharaInternalVip:
- description: VIP for Sahara API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SaharaApiNetwork]}]}
- SwiftInternalVip:
- description: VIP for Swift Proxy internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, SwiftProxyNetwork]}]}
EndpointMap:
description: |
Mapping of the resources with the needed info for their endpoints.
@@ -622,35 +654,16 @@ outputs:
list_join:
- "\n"
- - {get_attr: [hostsConfig, hosts_entries]}
- -
- - str_replace:
- template: IP HOST
- params:
- IP: {get_attr: [VipMap, net_ip_map, external]}
- HOST: {get_param: CloudName}
- - str_replace:
- template: IP HOST
- params:
- IP: {get_attr: [VipMap, net_ip_map, ctlplane]}
- HOST: {get_param: CloudNameCtlplane}
- - str_replace:
- template: IP HOST
- params:
- IP: {get_attr: [VipMap, net_ip_map, internal_api]}
- HOST: {get_param: CloudNameInternal}
- - str_replace:
- template: IP HOST
- params:
- IP: {get_attr: [VipMap, net_ip_map, storage]}
- HOST: {get_param: CloudNameStorage}
- - str_replace:
- template: IP HOST
- params:
- IP: {get_attr: [VipMap, net_ip_map, storage_mgmt]}
- HOST: {get_param: CloudNameStorageManagement}
+ - - {get_attr: [VipHosts, value]}
EnabledServices:
description: The services enabled on each role
value:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
{% endfor %}
+ RoleData:
+ description: The configuration data associated with each role
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+{% endfor %}
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index cc5e4eac..ee43c3a5 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'All Nodes Config for Puppet'
parameters:
@@ -28,6 +28,8 @@ parameters:
type: json
short_service_node_names:
type: json
+ short_service_bootstrap_node:
+ type: json
controller_names:
type: comma_delimited_list
memcache_node_ips:
@@ -71,140 +73,133 @@ resources:
allNodesConfigImpl:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- datafiles:
- bootstrap_node:
- mapped_data:
- bootstrap_nodeid: {get_input: bootstrap_nodeid}
- bootstrap_nodeid_ip: {get_input: bootstrap_nodeid_ip}
- all_nodes:
- mapped_data:
- map_merge:
- - tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
- - tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
- - enabled_services: {get_param: enabled_services}
- # This writes out a mapping of service_name_enabled: 'true'
- # For any services not enabled, hiera foo_enabled will
- # return nil, as it's undefined
- - map_merge:
- repeat:
- template:
- # Note this must be string 'true' due to
- # https://bugs.launchpad.net/heat/+bug/1617203
- SERVICE_enabled: 'true'
- for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
- # Dynamically generate per-service network data
- # This works as follows (outer->inner functions)
- # yaql - filters services where no mapping exists in ServiceNetMap
- # map_replace: substitute e.g heat_api_network with network name from ServiceNetMap
- # map_merge/repeat: generate a per-service mapping
- - yaql:
- # This filters any entries where the value hasn't been substituted for
- # a list, e.g it's still $service_network. This happens when there is
- # no network defined for the service in the ServiceNetMap, which is OK
- # as not all services have to be bound to a network, so we filter them
- expression: dict($.data.map.items().where(isString($[1]) and not $[1].endsWith("_network")))
- data:
- map:
- map_replace:
+ datafiles:
+ bootstrap_node:
+ bootstrap_nodeid: {get_input: bootstrap_nodeid}
+ bootstrap_nodeid_ip: {get_input: bootstrap_nodeid_ip}
+ all_nodes:
+ map_merge:
+ - tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
+ - tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
+ - enabled_services: {get_param: enabled_services}
+ # This writes out a mapping of service_name_enabled: 'true'
+ # For any services not enabled, hiera foo_enabled will
+ # return nil, as it's undefined
+ - map_merge:
+ repeat:
+ template:
+ # Note this must be string 'true' due to
+ # https://bugs.launchpad.net/heat/+bug/1617203
+ SERVICE_enabled: 'true'
+ for_each:
+ SERVICE:
+ str_split: [',', {get_param: enabled_services}]
+ # Dynamically generate per-service network data
+ # This works as follows (outer->inner functions)
+ # yaql - filters services where no mapping exists in ServiceNetMap
+ # map_replace: substitute e.g heat_api_network with network name from ServiceNetMap
+ # map_merge/repeat: generate a per-service mapping
+ - yaql:
+ # This filters any entries where the value hasn't been substituted for
+ # a list, e.g it's still $service_network. This happens when there is
+ # no network defined for the service in the ServiceNetMap, which is OK
+ # as not all services have to be bound to a network, so we filter them
+ expression: dict($.data.map.items().where(isString($[1]) and not $[1].endsWith("_network")))
+ data:
+ map:
+ map_replace:
+ - map_merge:
+ repeat:
+ template:
+ SERVICE_network: SERVICE_network
+ for_each:
+ SERVICE:
+ str_split: [',', {get_param: enabled_services}]
+ - values: {get_param: ServiceNetMap}
+ # Keystone doesn't provide separate entries for the public
+ # and admin endpoints, so we need to add them here manually
+ # like we do in the vip-config below
+ - keystone_admin_api_network: {get_param: [ServiceNetMap, keystone_admin_api_network]}
+ keystone_public_api_network: {get_param: [ServiceNetMap, keystone_public_api_network]}
+ # provides a mapping of service_name_ips to a list of IPs
+ - {get_param: service_ips}
+ - {get_param: service_node_names}
+ - {get_param: short_service_node_names}
+ - {get_param: short_service_bootstrap_node}
+ - controller_node_ips:
+ list_join:
+ - ','
+ - {get_param: controller_ips}
+ controller_node_names:
+ list_join:
+ - ','
+ - {get_param: controller_names}
+ memcached_node_ips_v6:
+ repeat:
+ template: "inet6:[NAME]"
+ for_each:
+ NAME: {get_param: memcache_node_ips}
+ deploy_identifier: {get_param: DeployIdentifier}
+ update_identifier: {get_param: UpdateIdentifier}
+ stack_action: {get_param: StackAction}
+ vip_data:
+ map_merge:
+ # Dynamically generate per-service VIP data based on enabled_services
+ # This works as follows (outer->inner functions)
+ # yaql - filters services where no mapping exists in ServiceNetMap
+ # map_replace: substitute e.g internal_api with the IP from NetVipMap
+ # map_replace: substitute e.g heat_api_network with network name from ServiceNetMap
+ # map_merge/repeat: generate a per-service mapping
+ - yaql:
+ # This filters any entries where the value hasn't been substituted for
+ # a list, e.g it's still $service_network. This happens when there is
+ # no network defined for the service in the ServiceNetMap, which is OK
+ # as not all services have to be bound to a network, so we filter them
+ expression: dict($.data.map.items().where(isString($[1]) and not $[1].endsWith("_network")))
+ data:
+ map:
+ map_replace:
+ - map_replace:
- map_merge:
repeat:
template:
- SERVICE_network: SERVICE_network
+ SERVICE_vip: SERVICE_network
for_each:
SERVICE:
str_split: [',', {get_param: enabled_services}]
- values: {get_param: ServiceNetMap}
- # Keystone doesn't provide separate entries for the public
- # and admin endpoints, so we need to add them here manually
- # like we do in the vip-config below
- - keystone_admin_api_network: {get_param: [ServiceNetMap, keystone_admin_api_network]}
- keystone_public_api_network: {get_param: [ServiceNetMap, keystone_public_api_network]}
- # provides a mapping of service_name_ips to a list of IPs
- - {get_param: service_ips}
- - {get_param: service_node_names}
- - {get_param: short_service_node_names}
- - controller_node_ips:
- list_join:
- - ','
- - {get_param: controller_ips}
- controller_node_names:
- list_join:
- - ','
- - {get_param: controller_names}
- memcached_node_ips_v6:
- str_replace:
- template: "['inet6:[SERVERS_LIST]']"
- params:
- SERVERS_LIST:
- list_join:
- - "]','inet6:["
- - {get_param: memcache_node_ips}
-
- deploy_identifier: {get_param: DeployIdentifier}
- update_identifier: {get_param: UpdateIdentifier}
- stack_action: {get_param: StackAction}
- vip_data:
- mapped_data:
- map_merge:
- # Dynamically generate per-service VIP data based on enabled_services
- # This works as follows (outer->inner functions)
- # yaql - filters services where no mapping exists in ServiceNetMap
- # map_replace: substitute e.g internal_api with the IP from NetVipMap
- # map_replace: substitute e.g heat_api_network with network name from ServiceNetMap
- # map_merge/repeat: generate a per-service mapping
- - yaql:
- # This filters any entries where the value hasn't been substituted for
- # a list, e.g it's still $service_network. This happens when there is
- # no network defined for the service in the ServiceNetMap, which is OK
- # as not all services have to be bound to a network, so we filter them
- expression: dict($.data.map.items().where(isString($[1]) and not $[1].endsWith("_network")))
- data:
- map:
- map_replace:
- - map_replace:
- - map_merge:
- repeat:
- template:
- SERVICE_vip: SERVICE_network
- for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
- - values: {get_param: ServiceNetMap}
- - values: {get_param: NetVipMap}
- - keystone_admin_api_vip:
- get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_admin_api_network]}]
- keystone_public_api_vip:
- get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_public_api_network]}]
- public_virtual_ip: {get_param: [NetVipMap, external]}
- controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
- internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]}
- storage_virtual_ip: {get_param: [NetVipMap, storage]}
- storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]}
- redis_vip: {get_param: RedisVirtualIP}
- # public_virtual_ip and controller_virtual_ip are needed in
- # both HAproxy & keepalived.
- tripleo::haproxy::public_virtual_ip: {get_param: [NetVipMap, external]}
- tripleo::haproxy::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
- tripleo::keepalived::public_virtual_ip: {get_param: [NetVipMap, external]}
- tripleo::keepalived::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
- tripleo::keepalived::internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]}
- tripleo::keepalived::storage_virtual_ip: {get_param: [NetVipMap, storage]}
- tripleo::keepalived::storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]}
- tripleo::keepalived::redis_virtual_ip: {get_param: RedisVirtualIP}
- tripleo::redis_notification::haproxy_monitor_ip: {get_param: [NetVipMap, ctlplane]}
- cloud_name_external: {get_param: cloud_name_external}
- cloud_name_internal_api: {get_param: cloud_name_internal_api}
- cloud_name_storage: {get_param: cloud_name_storage}
- cloud_name_storage_mgmt: {get_param: cloud_name_storage_mgmt}
- cloud_name_ctlplane: {get_param: cloud_name_ctlplane}
- # TLS parameters
- certmonger_ca: {get_param: CertmongerCA}
- enable_internal_tls: {get_param: EnableInternalTLS}
+ - values: {get_param: NetVipMap}
+ - keystone_admin_api_vip:
+ get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_admin_api_network]}]
+ keystone_public_api_vip:
+ get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_public_api_network]}]
+ public_virtual_ip: {get_param: [NetVipMap, external]}
+ controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
+ internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]}
+ storage_virtual_ip: {get_param: [NetVipMap, storage]}
+ storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]}
+ redis_vip: {get_param: RedisVirtualIP}
+ # public_virtual_ip and controller_virtual_ip are needed in
+ # both HAproxy & keepalived.
+ tripleo::haproxy::public_virtual_ip: {get_param: [NetVipMap, external]}
+ tripleo::haproxy::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
+ tripleo::keepalived::public_virtual_ip: {get_param: [NetVipMap, external]}
+ tripleo::keepalived::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]}
+ tripleo::keepalived::internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]}
+ tripleo::keepalived::storage_virtual_ip: {get_param: [NetVipMap, storage]}
+ tripleo::keepalived::storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]}
+ tripleo::keepalived::redis_virtual_ip: {get_param: RedisVirtualIP}
+ tripleo::redis_notification::haproxy_monitor_ip: {get_param: [NetVipMap, ctlplane]}
+ cloud_name_external: {get_param: cloud_name_external}
+ cloud_name_internal_api: {get_param: cloud_name_internal_api}
+ cloud_name_storage: {get_param: cloud_name_storage}
+ cloud_name_storage_mgmt: {get_param: cloud_name_storage_mgmt}
+ cloud_name_ctlplane: {get_param: cloud_name_ctlplane}
+ # TLS parameters
+ certmonger_ca: {get_param: CertmongerCA}
+ enable_internal_tls: {get_param: EnableInternalTLS}
outputs:
config_id:
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index 8b695fff..e92de45f 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'OpenStack cinder storage configured by Puppet'
parameters:
BlockStorageImage:
@@ -66,15 +66,25 @@ parameters:
constraints:
- allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
+ default: 'localdomain'
type: string
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ BlockStorageServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
BlockStorageSchedulerHints:
type: json
@@ -92,10 +102,19 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
BlockStorage:
@@ -117,7 +136,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: BlockStorageServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: BlockStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -129,6 +152,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -140,6 +165,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::BlockStorage::NodeUserData
+
ExternalPort:
type: OS::TripleO::BlockStorage::Ports::ExternalPort
properties:
@@ -216,17 +246,134 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::BlockStorage::PreNetworkConfig
+ properties:
+ server: {get_resource: BlockStorage}
+
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: BlockStorage}
actions: {get_param: NetworkDeploymentActions}
+ BlockStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ BlockStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: BlockStorageUpgradeInitDeployment
+ server: {get_resource: BlockStorage}
+ config: {get_resource: BlockStorageUpgradeInitConfig}
+
BlockStorageDeployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: BlockStorageUpgradeInitDeployment
properties:
name: BlockStorageDeployment
server: {get_resource: BlockStorage}
@@ -238,39 +385,39 @@ resources:
BlockStorageConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- hierarchy:
- - '"%{::uuid}"'
- - heat_config_%{::deploy_config_name}
- - volume_extraconfig
- - extraconfig
- - service_names
- - service_configs
- - volume
- - bootstrap_node # provided by allNodesConfig
- - all_nodes # provided by allNodesConfig
- - vip_data # provided by allNodesConfig
- - '"%{::osfamily}"'
- merge_behavior: deeper
- datafiles:
- service_names:
- mapped_data:
- service_names: {get_param: ServiceNames}
- sensu::subscriptions: {get_param: MonitoringSubscriptions}
- service_configs:
- mapped_data:
- map_replace:
- - {get_param: ServiceConfigSettings}
- - values: {get_attr: [NetIpMap, net_ip_map]}
- volume_extraconfig:
- mapped_data: {get_param: BlockStorageExtraConfig}
- extraconfig:
- mapped_data: {get_param: ExtraConfig}
- volume:
- mapped_data:
- tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ hierarchy:
+ - '"%{::uuid}"'
+ - heat_config_%{::deploy_config_name}
+ - volume_extraconfig
+ - extraconfig
+ - service_names
+ - service_configs
+ - volume
+ - bootstrap_node # provided by allNodesConfig
+ - all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
+ - '"%{::osfamily}"'
+ merge_behavior: deeper
+ datafiles:
+ service_names:
+ service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
+ service_configs:
+ map_replace:
+ - {get_param: ServiceConfigSettings}
+ - values: {get_attr: [NetIpMap, net_ip_map]}
+ volume_extraconfig: {get_param: BlockStorageExtraConfig}
+ extraconfig: {get_param: ExtraConfig}
+ volume:
+ tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -310,48 +457,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
value:
str_replace:
@@ -369,47 +481,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [BlockStorage, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for the block storage server
value:
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index 55b26336..892f91ef 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'OpenStack ceph storage node configured by Puppet'
parameters:
OvercloudCephStorageFlavor:
@@ -72,15 +72,25 @@ parameters:
constraints:
- allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
+ default: 'localdomain'
type: string
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ CephStorageServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
CephStorageSchedulerHints:
type: json
@@ -98,10 +108,19 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
CephStorage:
@@ -123,7 +142,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: CephStorageServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: CephStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -135,6 +158,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -146,6 +171,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::CephStorage::NodeUserData
+
ExternalPort:
type: OS::TripleO::CephStorage::Ports::ExternalPort
properties:
@@ -222,17 +252,134 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::CephStorage::PreNetworkConfig
+ properties:
+ server: {get_resource: CephStorage}
+
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: CephStorage}
actions: {get_param: NetworkDeploymentActions}
+ CephStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ CephStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: CephStorageUpgradeInitDeployment
+ server: {get_resource: CephStorage}
+ config: {get_resource: CephStorageUpgradeInitConfig}
+
CephStorageDeployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: CephStorageUpgradeInitDeployment
properties:
name: CephStorageDeployment
config: {get_resource: CephStorageConfig}
@@ -243,38 +390,39 @@ resources:
CephStorageConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- hierarchy:
- - '"%{::uuid}"'
- - heat_config_%{::deploy_config_name}
- - ceph_extraconfig
- - extraconfig
- - service_names
- - service_configs
- - bootstrap_node # provided by allNodesConfig
- - all_nodes # provided by allNodesConfig
- - vip_data # provided by allNodesConfig
- - '"%{::osfamily}"'
- merge_behavior: deeper
- datafiles:
- service_names:
- mapped_data:
- service_names: {get_param: ServiceNames}
- sensu::subscriptions: {get_param: MonitoringSubscriptions}
- service_configs:
- mapped_data:
- map_replace:
- - {get_param: ServiceConfigSettings}
- - values: {get_attr: [NetIpMap, net_ip_map]}
- ceph_extraconfig:
- mapped_data: {get_param: CephStorageExtraConfig}
- extraconfig:
- mapped_data: {get_param: ExtraConfig}
- ceph:
- mapped_data:
- tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ hierarchy:
+ - '"%{::uuid}"'
+ - heat_config_%{::deploy_config_name}
+ - ceph_extraconfig
+ - extraconfig
+ - service_names
+ - service_configs
+ - ceph
+ - bootstrap_node # provided by allNodesConfig
+ - all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
+ - '"%{::osfamily}"'
+ merge_behavior: deeper
+ datafiles:
+ service_names:
+ service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
+ service_configs:
+ map_replace:
+ - {get_param: ServiceConfigSettings}
+ - values: {get_attr: [NetIpMap, net_ip_map]}
+ ceph_extraconfig: {get_param: CephStorageExtraConfig}
+ extraconfig: {get_param: ExtraConfig}
+ ceph:
+ tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -320,48 +468,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
value:
str_replace:
@@ -379,47 +492,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [CephStorage, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for the ceph storage server
value:
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index 4d77d6d3..62adcd33 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack hypervisor node configured via Puppet.
@@ -87,15 +87,25 @@ parameters:
constraints:
- allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
+ default: 'localdomain'
type: string
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ NovaComputeServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
NovaComputeSchedulerHints:
type: json
@@ -110,10 +120,19 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
@@ -137,7 +156,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: NovaComputeServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: NovaComputeSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -149,6 +172,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -160,6 +185,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::Compute::NodeUserData
+
ExternalPort:
type: OS::TripleO::Compute::Ports::ExternalPort
properties:
@@ -225,6 +255,101 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::Compute::PreNetworkConfig
+ properties:
+ server: {get_resource: NovaCompute}
+
NetworkConfig:
type: OS::TripleO::Compute::Net::SoftwareConfig
properties:
@@ -238,6 +363,7 @@ resources:
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
@@ -247,52 +373,73 @@ resources:
bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
+ NovaComputeUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ NovaComputeUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: NovaComputeUpgradeInitDeployment
+ server: {get_resource: NovaCompute}
+ config: {get_resource: NovaComputeUpgradeInitConfig}
+
NovaComputeConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- hierarchy:
- - '"%{::uuid}"'
- - heat_config_%{::deploy_config_name}
- - compute_extraconfig
- - extraconfig
- - service_names
- - service_configs
- - compute
- - bootstrap_node # provided by allNodesConfig
- - all_nodes # provided by allNodesConfig
- - vip_data # provided by allNodesConfig
- - '"%{::osfamily}"'
- - neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre
- - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre
- - nova_nuage_data # Optionally provided by ComputeExtraConfigPre
- - midonet_data # Optionally provided by AllNodesExtraConfig
- - neutron_opencontrail_data # Optionally provided by ComputeExtraConfigPre
- - cisco_aci_data # Optionally provided by ComputeExtraConfigPre
- merge_behavior: deeper
- datafiles:
- service_names:
- mapped_data:
- service_names: {get_param: ServiceNames}
- sensu::subscriptions: {get_param: MonitoringSubscriptions}
- service_configs:
- mapped_data:
- map_replace:
- - {get_param: ServiceConfigSettings}
- - values: {get_attr: [NetIpMap, net_ip_map]}
- compute_extraconfig:
- mapped_data: {get_param: NovaComputeExtraConfig}
- extraconfig:
- mapped_data: {get_param: ExtraConfig}
- compute:
- mapped_data:
- tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ hierarchy:
+ - '"%{::uuid}"'
+ - heat_config_%{::deploy_config_name}
+ - compute_extraconfig
+ - extraconfig
+ - service_names
+ - service_configs
+ - compute
+ - bootstrap_node # provided by allNodesConfig
+ - all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
+ - '"%{::osfamily}"'
+ - neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre
+ - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre
+ - nova_nuage_data # Optionally provided by ComputeExtraConfigPre
+ - midonet_data # Optionally provided by AllNodesExtraConfig
+ - neutron_opencontrail_data # Optionally provided by ComputeExtraConfigPre
+ - cisco_aci_data # Optionally provided by ComputeExtraConfigPre
+ merge_behavior: deeper
+ datafiles:
+ service_names:
+ service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
+ service_configs:
+ map_replace:
+ - {get_param: ServiceConfigSettings}
+ - values: {get_attr: [NetIpMap, net_ip_map]}
+ compute_extraconfig: {get_param: NovaComputeExtraConfig}
+ extraconfig: {get_param: ExtraConfig}
+ compute:
+ tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
NovaComputeDeployment:
type: OS::TripleO::SoftwareDeployment
- depends_on: NetworkDeployment
+ depends_on: NovaComputeUpgradeInitDeployment
properties:
name: NovaComputeDeployment
config: {get_resource: NovaComputeConfig}
@@ -363,48 +510,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
description: >
Server's IP address and hostname in the /etc/hosts format
@@ -424,47 +536,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [NovaCompute, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
diff --git a/puppet/config.role.j2.yaml b/puppet/config.role.j2.yaml
index e59a0216..7337d062 100644
--- a/puppet/config.role.j2.yaml
+++ b/puppet/config.role.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
A software config which runs puppet on the {{role}} role
@@ -12,6 +12,14 @@ parameters:
type: string
description: Config manifests that will be used to step through the deployment.
default: ''
+ PuppetTags:
+ type: string
+ description: List of comma-separated tags to limit puppet catalog to.
+ default: ''
+
+conditions:
+
+ puppet_tags_empty: {equals : [{get_param: PuppetTags}, '']}
resources:
@@ -24,6 +32,13 @@ resources:
enable_hiera: True
enable_facter: False
modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
+ tags:
+ if:
+ - puppet_tags_empty
+ - ''
+ - list_join:
+ - ','
+ - ['file,concat,file_line', {get_param: PuppetTags}]
outputs:
- name: result
inputs:
diff --git a/puppet/controller-config-pacemaker.yaml b/puppet/controller-config-pacemaker.yaml
deleted file mode 100644
index 24f31dc8..00000000
--- a/puppet/controller-config-pacemaker.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
- A software config which runs manifests/overcloud_controller_pacemaker.pp
-
-parameters:
- ConfigDebug:
- default: false
- description: Whether to run config management (e.g. Puppet) in debug mode.
- type: boolean
- StepConfig:
- type: string
- description: Config manifests that will be used to step through the deployment.
- default: ''
-
-resources:
-
- ControllerPuppetConfigImpl:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- options:
- enable_debug: {get_param: ConfigDebug}
- enable_hiera: True
- enable_facter: False
- modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
- outputs:
- - name: result
- inputs:
- - name: step
- type: Number
- config:
- list_join:
- - ''
- - - get_file: manifests/overcloud_controller_pacemaker.pp
- - {get_param: StepConfig}
-
-outputs:
- OS::stack_id:
- description: The software config which runs overcloud_controller_pacemaker.pp
- value: {get_resource: ControllerPuppetConfigImpl}
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index b1433b04..9e35af5f 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack controller node configured by Puppet.
@@ -101,15 +101,25 @@ parameters:
constraints:
- allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
+ default: 'localdomain'
type: string
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ ControllerServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
ControllerSchedulerHints:
type: json
@@ -124,10 +134,19 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
parameter_groups:
- label: deprecated
@@ -156,7 +175,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: ControllerServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ControllerSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -168,6 +191,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -179,6 +204,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::Controller::NodeUserData
+
ExternalPort:
type: OS::TripleO::Controller::Ports::ExternalPort
properties:
@@ -244,6 +274,101 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::Controller::PreNetworkConfig
+ properties:
+ server: {get_resource: Controller}
+
NetworkConfig:
type: OS::TripleO::Controller::Net::SoftwareConfig
properties:
@@ -257,6 +382,7 @@ resources:
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
@@ -281,10 +407,30 @@ resources:
server: {get_resource: Controller}
NodeIndex: {get_param: NodeIndex}
+ ControllerUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ ControllerUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: ControllerUpgradeInitDeployment
+ server: {get_resource: Controller}
+ config: {get_resource: ControllerUpgradeInitConfig}
ControllerDeployment:
type: OS::TripleO::SoftwareDeployment
- depends_on: NetworkDeployment
+ depends_on: ControllerUpgradeInitDeployment
properties:
name: ControllerDeployment
config: {get_resource: ControllerConfig}
@@ -298,57 +444,57 @@ resources:
ControllerConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- hierarchy:
- - '"%{::uuid}"'
- - heat_config_%{::deploy_config_name}
- - controller_extraconfig
- - extraconfig
- - service_configs
- - service_names
- - controller
- - bootstrap_node # provided by BootstrapNodeConfig
- - all_nodes # provided by allNodesConfig
- - vip_data # provided by allNodesConfig
- - '"%{::osfamily}"'
- - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre
- - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
- - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre
- - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
- - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
- - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
- - midonet_data #Optionally provided by AllNodesExtraConfig
- - cisco_aci_data # Optionally provided by ControllerExtraConfigPre
- merge_behavior: deeper
- datafiles:
- service_names:
- mapped_data:
- service_names: {get_param: ServiceNames}
- sensu::subscriptions: {get_param: MonitoringSubscriptions}
- service_configs:
- mapped_data:
- map_replace:
- - {get_param: ServiceConfigSettings}
- - values: {get_attr: [NetIpMap, net_ip_map]}
- controller_extraconfig:
- mapped_data:
- map_merge:
- - {get_param: controllerExtraConfig}
- - {get_param: ControllerExtraConfig}
- extraconfig:
- mapped_data: {get_param: ExtraConfig}
- controller:
- mapped_data: # data supplied directly to this deployment configuration, etc
- bootstack_nodeid: {get_input: bootstack_nodeid}
-
- # Pacemaker
- enable_load_balancer: {get_input: enable_load_balancer}
-
- # Misc
- tripleo::haproxy::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
- tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ hierarchy:
+ - '"%{::uuid}"'
+ - heat_config_%{::deploy_config_name}
+ - controller_extraconfig
+ - extraconfig
+ - service_configs
+ - service_names
+ - controller
+ - bootstrap_node # provided by BootstrapNodeConfig
+ - all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
+ - '"%{::osfamily}"'
+ - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre
+ - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
+ - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre
+ - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
+ - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
+ - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
+ - midonet_data #Optionally provided by AllNodesExtraConfig
+ - cisco_aci_data # Optionally provided by ControllerExtraConfigPre
+ merge_behavior: deeper
+ datafiles:
+ service_names:
+ service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
+ service_configs:
+ map_replace:
+ - {get_param: ServiceConfigSettings}
+ - values: {get_attr: [NetIpMap, net_ip_map]}
+ controller_extraconfig:
+ map_merge:
+ - {get_param: controllerExtraConfig}
+ - {get_param: ControllerExtraConfig}
+ extraconfig: {get_param: ExtraConfig}
+ controller:
+ # data supplied directly to this deployment configuration, etc
+ bootstack_nodeid: {get_input: bootstack_nodeid}
+ # Pacemaker
+ enable_load_balancer: {get_input: enable_load_balancer}
+
+ # Misc
+ tripleo::haproxy::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
+ tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
# Hook for site-specific additional pre-deployment config, e.g extra hieradata
ControllerExtraConfigPre:
@@ -406,48 +552,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
description: >
Server's IP address and hostname in the /etc/hosts format
@@ -467,47 +578,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [Controller, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
diff --git a/puppet/deploy-artifacts.sh b/puppet/deploy-artifacts.sh
index 22fde9a7..4e1ad89f 100644
--- a/puppet/deploy-artifacts.sh
+++ b/puppet/deploy-artifacts.sh
@@ -8,7 +8,7 @@ trap cleanup EXIT
if [ -n "$artifact_urls" ]; then
for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
- curl -o $TMP_DATA/file_data "$artifact_urls"
+ curl --globoff -o $TMP_DATA/file_data "$URL"
if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
yum install -y $TMP_DATA/file_data
elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
diff --git a/puppet/deploy-artifacts.yaml b/puppet/deploy-artifacts.yaml
index 17f84163..5e89405b 100644
--- a/puppet/deploy-artifacts.yaml
+++ b/puppet/deploy-artifacts.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Software Config to install deployment artifacts (tarball's and/or
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
index 6a2ea4d5..3daf3fd3 100644
--- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: Configure hieradata for all MidoNet nodes
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index 4ca0fdcb..9b900bc4 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Network Cisco configuration
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
index 49c77190..7fe2a842 100644
--- a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Big Switch agents on compute node
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
index f5b1f0e6..66252f1f 100644
--- a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Compute node hieradata for Neutron OpenContrail configuration
diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
index 5561c74a..47c782c7 100644
--- a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Nuage configuration on the Compute
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
index 9423208e..7d639883 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: Configure hieradata for Cinder Dell Storage Center configuration
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
index c7af6f22..30509044 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: Configure hieradata for Cinder Eqlx configuration
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
index 48446e5a..763ae39a 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Cinder Netapp configuration
diff --git a/puppet/extraconfig/pre_deploy/controller/multiple.yaml b/puppet/extraconfig/pre_deploy/controller/multiple.yaml
index f949a397..d3d546dd 100644
--- a/puppet/extraconfig/pre_deploy/controller/multiple.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/multiple.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Extra Pre-Deployment Config, multiple'
parameters:
server:
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
index 467f57cc..0f4806db 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Neutron Big Switch configuration
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
index cec885cd..6eae812f 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Cisco N1KV configuration
diff --git a/puppet/extraconfig/pre_deploy/default.yaml b/puppet/extraconfig/pre_deploy/default.yaml
index dcbc6811..5da07f87 100644
--- a/puppet/extraconfig/pre_deploy/default.yaml
+++ b/puppet/extraconfig/pre_deploy/default.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Noop Extra Pre-Deployment Config'
parameters:
server:
diff --git a/puppet/extraconfig/pre_deploy/per_node.yaml b/puppet/extraconfig/pre_deploy/per_node.yaml
index e236e336..65113f6a 100644
--- a/puppet/extraconfig/pre_deploy/per_node.yaml
+++ b/puppet/extraconfig/pre_deploy/per_node.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata overrides for specific nodes
diff --git a/puppet/extraconfig/tls/ca-inject.yaml b/puppet/extraconfig/tls/ca-inject.yaml
index f955034d..04b5ccf6 100644
--- a/puppet/extraconfig/tls/ca-inject.yaml
+++ b/puppet/extraconfig/tls/ca-inject.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
This is a template which will inject the trusted anchor.
diff --git a/puppet/extraconfig/tls/freeipa-enroll.yaml b/puppet/extraconfig/tls/freeipa-enroll.yaml
new file mode 100644
index 00000000..7ce15069
--- /dev/null
+++ b/puppet/extraconfig/tls/freeipa-enroll.yaml
@@ -0,0 +1,83 @@
+heat_template_version: ocata
+
+description: Enroll nodes to FreeIPA
+
+parameters:
+ server:
+ description: ID of the controller node to apply this config to
+ type: string
+
+ CloudDomain:
+ description: >
+ The configured cloud domain; this will also be used as the kerberos realm
+ type: string
+
+ FreeIPAOTP:
+ default: ''
+ description: 'OTP that will be used for FreeIPA enrollment'
+ type: string
+ hidden: true
+ FreeIPAServer:
+ default: ''
+ description: 'FreeIPA server DNS name'
+ type: string
+ FreeIPAIPAddress:
+ default: ''
+ description: 'FreeIPA server IP Address'
+ type: string
+
+resources:
+ FreeIPAEnrollmentConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: otp
+ - name: ipa_server
+ - name: ipa_domain
+ - name: ipa_ip
+ config: |
+ #!/bin/sh
+ # If no IPA server was given as a parameter, it will be assumed from
+ # DNS.
+ if [ -n "${ipa_server}" ]; then
+ sed -i "/${ipa_server}/d" /etc/hosts
+ # Optionally add the FreeIPA server IP to /etc/hosts
+ if [ -n "${ipa_ip}" ]; then
+ echo "${ipa_ip} ${ipa_server}" >> /etc/hosts
+ fi
+ fi
+ # Set the node's domain if needed
+ if [ ! $(hostname -f | grep "${ipa_domain}$") ]; then
+ hostnamectl set-hostname "$(hostname).${ipa_domain}"
+ fi
+ yum install -y ipa-client
+ # Enroll. If there is already keytab, we have already done this. If
+ # this node hasn't enrolled and the OTP is missing, fail.
+ if [ ! -f /etc/krb5.keytab ]; then
+ if [ -z "${otp}" ]; then
+ echo "OTP is missing"
+ exit 1
+ fi
+ ipa-client-install --server ${ipa_server} -w ${otp} \
+ --domain=${ipa_domain} -U
+ fi
+ # Get a TGT
+ kinit -k -t /etc/krb5.keytab
+
+ FreeIPAControllerEnrollmentDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ name: FreeIPAEnrollmentDeployment
+ config: {get_resource: FreeIPAEnrollmentConfig}
+ server: {get_param: server}
+ input_values:
+ otp: {get_param: FreeIPAOTP}
+ ipa_server: {get_param: FreeIPAServer}
+ ipa_domain: {get_param: CloudDomain}
+ ipa_ip: {get_param: FreeIPAIPAddress}
+
+outputs:
+ deploy_stdout:
+ description: Output of the FreeIPA enrollment deployment
+ value: {get_attr: [FreeIPAControllerEnrollmentDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/tls/tls-cert-inject.yaml b/puppet/extraconfig/tls/tls-cert-inject.yaml
index 49d84574..2a61afc0 100644
--- a/puppet/extraconfig/tls/tls-cert-inject.yaml
+++ b/puppet/extraconfig/tls/tls-cert-inject.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
This is a template which will build the TLS Certificates necessary
diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml
new file mode 100644
index 00000000..eae85991
--- /dev/null
+++ b/puppet/major_upgrade_steps.j2.yaml
@@ -0,0 +1,147 @@
+{% set upgrade_steps_max = 8 -%}
+heat_template_version: ocata
+description: 'Upgrade steps for all roles'
+
+parameters:
+ servers:
+ type: json
+
+ role_data:
+ type: json
+ description: Mapping of Role name e.g Controller to the per-role data
+
+ UpdateIdentifier:
+ type: string
+ description: >
+ Setting to a previously unused value during stack-update will trigger
+ the Upgrade resources to re-run on all roles.
+
+conditions:
+ # Conditions to disable any steps where the task list is empty
+{% for step in range(0, upgrade_steps_max) %}
+ {% for role in roles %}
+ UpgradeBatchConfig_Step{{step}}Enabled:
+ not:
+ equals:
+ - {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
+ - []
+ UpgradeConfig_Step{{step}}Enabled:
+ not:
+ equals:
+ - {get_param: [role_data, {{role.name}}, upgrade_tasks]}
+ - []
+ {% endfor %}
+{% endfor %}
+
+resources:
+
+# Upgrade Steps for all roles, batched updates
+# FIXME(shardy): would be nice to make the number of steps configurable
+{% for step in range(0, upgrade_steps_max) %}
+ {% for role in roles %}
+ # Step {{step}} resources
+ {{role.name}}UpgradeBatchConfig_Step{{step}}:
+ type: OS::TripleO::UpgradeConfig
+ condition: UpgradeBatchConfig_Step{{step}}Enabled
+ # The UpgradeConfig resources could actually be created without
+ # serialization, but the event output is easier to follow if we
+ # do, and there should be minimal performance hit (creating the
+ # config is cheap compared to the time to apply the deployment).
+ {% if step > 0 %}
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}UpgradeBatch_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
+ step: {{step}}
+
+ {{role.name}}UpgradeBatch_Step{{step}}:
+ type: OS::Heat::StructuredDeploymentGroup
+ condition: UpgradeBatchConfig_Step{{step}}Enabled
+ {% if step > 0 %}
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}UpgradeBatch_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
+ update_policy:
+ batch_create:
+ max_batch_size: {{role.upgrade_batch_size|default(1)}}
+ rolling_update:
+ max_batch_size: {{role.upgrade_batch_size|default(1)}}
+ properties:
+ name: {{role.name}}UpgradeBatch_Step{{step}}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}UpgradeBatchConfig_Step{{step}}}
+ input_values:
+ role: {{role.name}}
+ update_identifier: {get_param: UpdateIdentifier}
+ {% endfor %}
+{% endfor %}
+
+# Upgrade Steps for all roles
+# FIXME(shardy): would be nice to make the number of steps configurable
+{% for step in range(0, upgrade_steps_max) %}
+ {% for role in roles %}
+ # Step {{step}} resources
+ {{role.name}}UpgradeConfig_Step{{step}}:
+ type: OS::TripleO::UpgradeConfig
+ condition: UpgradeConfig_Step{{step}}Enabled
+ # The UpgradeConfig resources could actually be created without
+ # serialization, but the event output is easier to follow if we
+ # do, and there should be minimal performance hit (creating the
+ # config is cheap compared to the time to apply the deployment).
+ depends_on:
+ {% if step > 0 %}
+ {% for dep in roles %}
+ {% if not dep.disable_upgrade_deployment|default(false) %}
+ - {{dep.name}}Upgrade_Step{{step -1}}
+ {% endif %}
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}UpgradeBatch_Step{{upgrade_steps_max -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]}
+ step: {{step}}
+ {% if not role.disable_upgrade_deployment|default(false) %}
+ {{role.name}}Upgrade_Step{{step}}:
+ type: OS::Heat::StructuredDeploymentGroup
+ condition: UpgradeConfig_Step{{step}}Enabled
+ depends_on:
+ {% if step > 0 %}
+ {% for dep in roles %}
+ {% if not dep.disable_upgrade_deployment|default(false) %}
+ - {{dep.name}}Upgrade_Step{{step -1}}
+ {% endif %}
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}UpgradeBatch_Step{{upgrade_steps_max -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ name: {{role.name}}Upgrade_Step{{step}}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}UpgradeConfig_Step{{step}}}
+ input_values:
+ role: {{role.name}}
+ update_identifier: {get_param: UpdateIdentifier}
+ {% endif %}
+ {% endfor %}
+{% endfor %}
+
+outputs:
+ # Output the config for each role, just use Step1 as the config should be
+ # the same for all steps (only the tag provided differs)
+ upgrade_configs:
+ description: The per-role upgrade configuration used
+ value:
+{% for role in roles %}
+ {{role.name.lower()}}: {get_attr: [{{role.name}}UpgradeConfig_Step1, upgrade_config]}
+{% endfor %}
+
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
deleted file mode 100644
index d329d5fc..00000000
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-if hiera('step') >= 4 {
- hiera_include('controller_classes', [])
-}
-
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
-package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_role.pp b/puppet/manifests/overcloud_role.pp
index 1a59620c..e2bf5146 100644
--- a/puppet/manifests/overcloud_role.pp
+++ b/puppet/manifests/overcloud_role.pp
@@ -24,3 +24,7 @@ if hiera('step') >= 4 {
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud___ROLE__', hiera('step')])
package_manifest{$package_manifest_name: ensure => present}
+
+# NOTE(gfidente): ensure deprecated package manifest is absent, can be removed after Pike
+$absent_package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
+package_manifest{$absent_package_manifest_name: ensure => absent}
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index d7681d10..1633134d 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'OpenStack swift storage node configured by Puppet'
parameters:
OvercloudSwiftStorageFlavor:
@@ -66,15 +66,25 @@ parameters:
constraints:
- allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
+ default: 'localdomain'
type: string
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ SwiftStorageServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
ObjectStorageSchedulerHints:
type: json
@@ -92,10 +102,19 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
@@ -117,7 +136,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: SwiftStorageServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ObjectStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -129,6 +152,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -140,6 +165,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::ObjectStorage::NodeUserData
+
ExternalPort:
type: OS::TripleO::SwiftStorage::Ports::ExternalPort
properties:
@@ -216,55 +246,171 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::ObjectStorage::PreNetworkConfig
+ properties:
+ server: {get_resource: SwiftStorage}
+
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: SwiftStorage}
actions: {get_param: NetworkDeploymentActions}
+ SwiftStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ SwiftStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: SwiftStorageUpgradeInitDeployment
+ server: {get_resource: SwiftStorage}
+ config: {get_resource: SwiftStorageUpgradeInitConfig}
+
SwiftStorageHieraConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- hierarchy:
- - '"%{::uuid}"'
- - heat_config_%{::deploy_config_name}
- - object_extraconfig
- - extraconfig
- - service_names
- - service_configs
- - object
- - bootstrap_node # provided by allNodesConfig
- - all_nodes # provided by allNodesConfig
- - vip_data # provided by allNodesConfig
- - '"%{::osfamily}"'
- merge_behavior: deeper
- datafiles:
- service_names:
- mapped_data:
- service_names: {get_param: ServiceNames}
- sensu::subscriptions: {get_param: MonitoringSubscriptions}
- service_configs:
- mapped_data:
- map_replace:
- - {get_param: ServiceConfigSettings}
- - values: {get_attr: [NetIpMap, net_ip_map]}
- object_extraconfig:
- mapped_data: {get_param: ObjectStorageExtraConfig}
- extraconfig:
- mapped_data: {get_param: ExtraConfig}
- object:
- mapped_data: # data supplied directly to this deployment configuration, etc
- tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
-
+ hierarchy:
+ - '"%{::uuid}"'
+ - heat_config_%{::deploy_config_name}
+ - object_extraconfig
+ - extraconfig
+ - service_names
+ - service_configs
+ - object
+ - bootstrap_node # provided by allNodesConfig
+ - all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
+ - '"%{::osfamily}"'
+ merge_behavior: deeper
+ datafiles:
+ service_names:
+ service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
+ service_configs:
+ map_replace:
+ - {get_param: ServiceConfigSettings}
+ - values: {get_attr: [NetIpMap, net_ip_map]}
+ object_extraconfig: {get_param: ObjectStorageExtraConfig}
+ extraconfig: {get_param: ExtraConfig}
+ object:
+ tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
SwiftStorageHieraDeploy:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: SwiftStorageUpgradeInitDeployment
properties:
name: SwiftStorageHieraDeploy
server: {get_resource: SwiftStorage}
@@ -309,48 +455,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
value:
str_replace:
@@ -368,47 +479,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [SwiftStorage, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for the swift storage server
value:
diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml
index 65c96ac2..83c32868 100644
--- a/puppet/post.j2.yaml
+++ b/puppet/post.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Post-deploy configuration steps via puppet for all roles,
@@ -21,11 +21,10 @@ parameters:
perform configuration on a Heat stack-update.
resources:
-
-{% for role in roles %}
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
- # {{role.name}} Role steps
+{% for role in roles %}
+ # {{role.name}} Role post deploy steps
{{role.name}}ArtifactsConfig:
type: deploy-artifacts.yaml
@@ -47,73 +46,35 @@ resources:
properties:
StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
- # Step through a series of configuration steps
- {{role.name}}Deployment_Step1:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ {% if role.name == 'Controller' %}
+ ControllerPrePuppet:
+ type: OS::TripleO::Tasks::ControllerPrePuppet
properties:
- name: {{role.name}}Deployment_Step1
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
+ servers: {get_param: [servers, Controller]}
input_values:
- step: 1
update_identifier: {get_param: DeployIdentifier}
+ {% endif %}
- {{role.name}}Deployment_Step2:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step1
- {% endfor %}
- properties:
- name: {{role.name}}Deployment_Step2
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}Deployment_Step3:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step2
- {% endfor %}
- properties:
- name: {{role.name}}Deployment_Step3
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}Deployment_Step4:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step3
- {% endfor %}
- properties:
- name: {{role.name}}Deployment_Step4
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}Deployment_Step5:
+ # Step through a series of configuration steps
+{% for step in range(1, 6) %}
+ {{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
+ {% if step == 1 %}
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ {% else %}
depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step4
- {% endfor %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
properties:
- name: {{role.name}}Deployment_Step5
+ name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}Config}
input_values:
- step: 5
+ step: {{step}}
update_identifier: {get_param: DeployIdentifier}
+{% endfor %}
{{role.name}}PostConfig:
type: OS::TripleO::Tasks::{{role.name}}PostConfig
@@ -136,4 +97,16 @@ resources:
type: OS::TripleO::NodeExtraConfigPost
properties:
servers: {get_param: [servers, {{role.name}}]}
+
+ {% if role.name == 'Controller' %}
+ ControllerPostPuppet:
+ depends_on:
+ - ControllerExtraConfigPost
+ type: OS::TripleO::Tasks::ControllerPostPuppet
+ properties:
+ servers: {get_param: [servers, Controller]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+ {% endif %}
+
{% endfor %}
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index e4307001..2f070da2 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -1,17 +1,21 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'OpenStack {{role}} node configured by Puppet'
parameters:
Overcloud{{role}}Flavor:
description: Flavor for the {{role}} node.
default: baremetal
type: string
+{% if disable_constraints is not defined %}
constraints:
- custom_constraint: nova.flavor
+{% endif %}
{{role}}Image:
type: string
default: overcloud-full
+{% if disable_constraints is not defined %}
constraints:
- custom_constraint: glance.image
+{% endif %}
ImageUpdatePolicy:
default: 'REBUILD_PRESERVE_EPHEMERAL'
description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
@@ -20,8 +24,14 @@ parameters:
description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
default: default
+{% if disable_constraints is not defined %}
constraints:
- custom_constraint: nova.keypair
+{% endif %}
+ NeutronPublicInterface:
+ default: nic1
+ description: What interface to bridge onto br-ex for network nodes.
+ type: string
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -72,15 +82,25 @@ parameters:
constraints:
- allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
CloudDomain:
+ default: 'localdomain'
type: string
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ {{role}}ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
{{role}}SchedulerHints:
type: json
@@ -98,6 +118,9 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
@@ -108,6 +131,13 @@ parameters:
LoggingGroups:
type: comma_delimited_list
default: []
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+
resources:
{{role}}:
@@ -129,7 +159,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: {{role}}ServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: {{role}}SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -141,6 +175,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -152,6 +188,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::{{role}}::NodeUserData
+
ExternalPort:
type: OS::TripleO::{{role}}::Ports::ExternalPort
properties:
@@ -228,17 +269,137 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::{{role}}::PreNetworkConfig
+ properties:
+ server: {get_resource: {{role}}}
+
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: {{role}}}
actions: {get_param: NetworkDeploymentActions}
+ input_values:
+ bridge_name: br-ex
+ interface_name: {get_param: NeutronPublicInterface}
+
+ {{role}}UpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ {{role}}UpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: {{role}}UpgradeInitDeployment
+ server: {get_resource: {{role}}}
+ config: {get_resource: {{role}}UpgradeInitConfig}
{{role}}Deployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: {{role}}UpgradeInitDeployment
properties:
name: {{role}}Deployment
config: {get_resource: {{role}}Config}
@@ -249,40 +410,41 @@ resources:
{{role}}Config:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: hiera
config:
- hiera:
- hierarchy:
- - '"%{::uuid}"'
- - heat_config_%{::deploy_config_name}
- - {{role.lower()}}_extraconfig
- - extraconfig
- - service_names
- - service_configs
- - bootstrap_node # provided by allNodesConfig
- - all_nodes # provided by allNodesConfig
- - vip_data # provided by allNodesConfig
- - '"%{::osfamily}"'
- merge_behavior: deeper
- datafiles:
- service_names:
- mapped_data:
- service_names: {get_param: ServiceNames}
- sensu::subscriptions: {get_param: MonitoringSubscriptions}
- service_configs:
- mapped_data:
- map_replace:
- - {get_param: ServiceConfigSettings}
- - values: {get_attr: [NetIpMap, net_ip_map]}
- {{role.lower()}}_extraconfig:
- mapped_data: {get_param: {{role}}ExtraConfig}
- extraconfig:
- mapped_data: {get_param: ExtraConfig}
- {{role.lower()}}:
- mapped_data:
- tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
- tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: LoggingSources}
- tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: LoggingGroups}
+ hierarchy:
+ - '"%{::uuid}"'
+ - heat_config_%{::deploy_config_name}
+ - {{role.lower()}}_extraconfig
+ - extraconfig
+ - service_names
+ - service_configs
+ - {{role.lower()}}
+ - bootstrap_node # provided by allNodesConfig
+ - all_nodes # provided by allNodesConfig
+ - vip_data # provided by allNodesConfig
+ - '"%{::osfamily}"'
+ merge_behavior: deeper
+ datafiles:
+ service_names:
+ service_names: {get_param: ServiceNames}
+ sensu::subscriptions: {get_param: MonitoringSubscriptions}
+ service_configs:
+ map_replace:
+ - {get_param: ServiceConfigSettings}
+ - values: {get_attr: [NetIpMap, net_ip_map]}
+ {{role.lower()}}_extraconfig: {get_param: {{role}}ExtraConfig}
+ extraconfig: {get_param: ExtraConfig}
+ {{role.lower()}}:
+ tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+ tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: LoggingSources}
+ tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: LoggingGroups}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -328,48 +490,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
value:
str_replace:
@@ -387,47 +514,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [{{role}}, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for {{role}} server
value:
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index 8fe51fa3..34cb350b 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -22,8 +22,8 @@ Config Settings
Each service may define a config_settings output variable which returns
Hiera settings to be configured.
-Steps
------
+Deployment Steps
+----------------
Each service may define an output variable which returns a puppet manifest
snippet that will run at each of the following steps. Earlier manifests
@@ -49,8 +49,64 @@ are re-asserted when applying latter ones.
5) Service activation (Pacemaker)
- 6) Fencing (Pacemaker)
+Batch Upgrade Steps
+-------------------
+
+Each service template may optionally define a `upgrade_batch_tasks` key, which
+is a list of ansible tasks to be performed during the upgrade process.
+
+Similar to the step_config, we allow a series of steps for the per-service
+upgrade sequence, defined as ansible tasks with a tag e.g "step1" for the first
+step, "step2" for the second, etc. Note that each step is performed in batches,
+then we move on to the next step which is also performed in batches (we don't
+perform all steps on one node, then move on to the next one which means you
+can sequence rolling upgrades of dependent services via the step value).
+
+The tasks performed at each step is service specific, but note that all batch
+upgrade steps are performed before the `upgrade_tasks` described below. This
+means that all services that support rolling upgrades can be upgraded without
+downtime during `upgrade_batch_tasks`, then any remaining services are stopped
+and upgraded during `upgrade_tasks`
+
+The default batch size is 1, but this can be overridden for each role via the
+`upgrade_batch_size` option in roles_data.yaml
+
+Upgrade Steps
+-------------
+
+Each service template may optionally define a `upgrade_tasks` key, which is a
+list of ansible tasks to be performed during the upgrade process.
+
+Similar to the step_config, we allow a series of steps for the per-service
+upgrade sequence, defined as ansible tasks with a tag e.g "step1" for the first
+step, "step2" for the second, etc.
+
+ Steps/tages correlate to the following:
+
+ 1) Quiesce the control-plane, e.g disable LoadBalancer, stop pacemaker cluster
+
+ 2) Stop all control-plane services, ready for upgrade
+
+ 3) Perform a package update, (either specific packages or the whole system)
+
+ 4) Start services needed for migration tasks (e.g DB)
+
+ 5) Perform any migration tasks, e.g DB sync commands
+
+ 6) Start control-plane services
+
+ 7) Any additional online migration tasks (e.g data migrations)
+
+Nova Server Metadata Settings
+-----------------------------
+
+One can use the hook of type `OS::TripleO::ServiceServerMetadataHook` to pass
+entries to the nova instances' metadata. It is, however, disabled by default.
+In order to overwrite it one needs to define it in the resource registry. An
+implementation of this hook needs to conform to the following:
-Note: Not all roles currently support all steps:
+* It needs to define an input called `RoleData` of json type. This gets as
+ input the contents of the `role_data` for each role's ServiceChain.
- * ObjectStorage role only supports steps 2, 3 and 4
+* This needs to define an output called `metadata` which will be given to the
+ Nova Server resource as the instance's metadata.
diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml
index da043c80..2401d764 100644
--- a/puppet/services/aodh-api.yaml
+++ b/puppet/services/aodh-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Aodh API service configured with Puppet
@@ -21,11 +21,9 @@ parameters:
MonitoringSubscriptionAodhApi:
default: 'overcloud-ceilometer-aodh-api'
type: string
- EnableCombinationAlarms:
- default: false
- description: Combination alarms are deprecated in Newton, hence disabled
- by default. To enable, set this parameter to true.
+ EnableInternalTLS:
type: boolean
+ default: false
resources:
AodhBase:
@@ -41,6 +39,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
role_data:
@@ -52,13 +51,14 @@ outputs:
map_merge:
- get_attr: [AodhBase, role_data, config_settings]
- get_attr: [ApacheServiceBase, role_data, config_settings]
- - aodh::wsgi::apache::ssl: false
+ - aodh::wsgi::apache::ssl: {get_param: EnableInternalTLS}
aodh::wsgi::apache::servername:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, AodhApiNetwork]}
+ aodh::wsgi::apache::wsgi_process_display_name: 'aodh_wsgi'
aodh::api::service_name: 'httpd'
aodh::api::enable_proxy_headers_parsing: true
tripleo.aodh_api.firewall_rules:
@@ -66,16 +66,32 @@ outputs:
dport:
- 8042
- 13042
+ aodh::api::host:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, AodhApiNetwork]}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
# (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- aodh::api::host: {get_param: [ServiceNetMap, AodhApiNetwork]}
aodh::wsgi::apache::bind_host: {get_param: [ServiceNetMap, AodhApiNetwork]}
- tripleo::profile::base::aodh::api::enable_combination_alarms: {get_param: EnableCombinationAlarms}
service_config_settings:
get_attr: [AodhBase, role_data, service_config_settings]
step_config: |
include tripleo::profile::base::aodh::api
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-aodh-api is running"
+ shell: /usr/bin/systemctl show 'openstack-aodh-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop aodh_api service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped
+ - name: Run aodh dbsync
+ tags: step5
+ command: aodh-dbsync
diff --git a/puppet/services/aodh-base.yaml b/puppet/services/aodh-base.yaml
index 0e2410f7..8648a971 100644
--- a/puppet/services/aodh-base.yaml
+++ b/puppet/services/aodh-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Aodh service configured with Puppet
@@ -69,6 +69,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/aodh'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
aodh::debug: {get_param: Debug}
aodh::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::rabbit_userid: {get_param: RabbitUserName}
diff --git a/puppet/services/aodh-evaluator.yaml b/puppet/services/aodh-evaluator.yaml
index 405c500e..56dbb558 100644
--- a/puppet/services/aodh-evaluator.yaml
+++ b/puppet/services/aodh-evaluator.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Aodh Evaluator service configured with Puppet
@@ -40,3 +40,10 @@ outputs:
get_attr: [AodhBase, role_data, config_settings]
step_config: |
include tripleo::profile::base::aodh::evaluator
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-aodh-evaluator is running"
+ shell: /usr/bin/systemctl show 'openstack-aodh-evaluator' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop aodh_evaluator service
+ tags: step2
+ service: name=openstack-aodh-evaluator state=stopped
diff --git a/puppet/services/aodh-listener.yaml b/puppet/services/aodh-listener.yaml
index fc4e8b39..76db0ca8 100644
--- a/puppet/services/aodh-listener.yaml
+++ b/puppet/services/aodh-listener.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Aodh Listener service configured with Puppet
@@ -40,3 +40,10 @@ outputs:
get_attr: [AodhBase, role_data, config_settings]
step_config: |
include tripleo::profile::base::aodh::listener
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-aodh-listener is running"
+ shell: /usr/bin/systemctl show 'openstack-aodh-listener' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop aodh_listener service
+ tags: step2
+ service: name=openstack-aodh-listener state=stopped
diff --git a/puppet/services/aodh-notifier.yaml b/puppet/services/aodh-notifier.yaml
index 2e51c639..30c67635 100644
--- a/puppet/services/aodh-notifier.yaml
+++ b/puppet/services/aodh-notifier.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Aodh Notifier service configured with Puppet
@@ -40,3 +40,10 @@ outputs:
get_attr: [AodhBase, role_data, config_settings]
step_config: |
include tripleo::profile::base::aodh::notifier
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-aodh-notifier is running"
+ shell: /usr/bin/systemctl show 'openstack-aodh-notifier' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop aodh_notifier service
+ tags: step2
+ service: name=openstack-aodh-notifier state=stopped
diff --git a/puppet/services/apache-internal-tls-certmonger.yaml b/puppet/services/apache-internal-tls-certmonger.yaml
index 87e53f13..4c21e02a 100644
--- a/puppet/services/apache-internal-tls-certmonger.yaml
+++ b/puppet/services/apache-internal-tls-certmonger.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Apache service TLS configurations.
@@ -21,6 +21,22 @@ parameters:
via parameter_defaults in the resource registry.
type: json
+resources:
+
+ ApacheNetworks:
+ type: OS::Heat::Value
+ properties:
+ value:
+ # NOTE(jaosorior) Get unique network names to create
+ # certificates for those. We skip the tenant network since
+ # we don't need a certificate for that, and the external
+ # network will be handled in another template.
+ yaql:
+ expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
+ data:
+ map:
+ get_param: ServiceNetMap
+
outputs:
role_data:
description: Role data for the Apache role.
@@ -35,16 +51,19 @@ outputs:
httpd-NETWORK:
service_certificate: '/etc/pki/tls/certs/httpd-NETWORK.crt'
service_key: '/etc/pki/tls/private/httpd-NETWORK.key'
- hostname: "%{::fqdn_NETWORK}"
- principal: "HTTP/%{::fqdn_NETWORK}"
+ hostname: "%{hiera('fqdn_NETWORK')}"
+ principal: "HTTP/%{hiera('fqdn_NETWORK')}"
for_each:
- NETWORK:
- # NOTE(jaosorior) Get unique network names to create
- # certificates for those. We skip the tenant network since
- # we don't need a certificate for that, and the external
- # network will be handled in another template.
- yaql:
- expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
- data:
- map:
- get_param: ServiceNetMap
+ NETWORK: {get_attr: [ApacheNetworks, value]}
+ metadata_settings:
+ repeat:
+ template:
+ - service: HTTP
+ network: $NETWORK
+ type: node
+ for_each:
+ $NETWORK: {get_attr: [ApacheNetworks, value]}
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service httpd is running"
+ shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
diff --git a/puppet/services/apache.yaml b/puppet/services/apache.yaml
index 382e0ff9..74ddbde8 100644
--- a/puppet/services/apache.yaml
+++ b/puppet/services/apache.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Apache service configured with Puppet. Note this is typically included
@@ -64,3 +64,9 @@ outputs:
apache::mod::prefork::serverlimit: { get_param: ApacheServerLimit }
apache::mod::remoteip::proxy_ips:
- "%{hiera('apache_remote_proxy_ips_network')}"
+ metadata_settings:
+ get_attr: [ApacheTLS, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service httpd is running"
+ shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
diff --git a/puppet/services/auditd.yaml b/puppet/services/auditd.yaml
new file mode 100644
index 00000000..639631e1
--- /dev/null
+++ b/puppet/services/auditd.yaml
@@ -0,0 +1,34 @@
+heat_template_version: ocata
+
+description: >
+ AuditD configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ AuditdRules:
+ description: Mapping of auditd rules
+ type: json
+ default: {}
+
+outputs:
+ role_data:
+ description: Role data for the auditd service
+ value:
+ service_name: auditd
+ config_settings:
+ auditd::rules: {get_param: AuditdRules}
+ step_config: |
+ include ::tripleo::profile::base::auditd
diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml
new file mode 100644
index 00000000..186af1cc
--- /dev/null
+++ b/puppet/services/barbican-api.yaml
@@ -0,0 +1,152 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Barbican API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ BarbicanPassword:
+ description: The password for the barbican service account.
+ type: string
+ hidden: true
+ BarbicanWorkers:
+ description: Set the number of workers for barbican::wsgi::apache
+ default: '%{::processorcount}'
+ type: string
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Barbican API role.
+ value:
+ service_name: barbican_api
+ config_settings:
+ map_merge:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - barbican::keystone::authtoken::password: {get_param: BarbicanPassword}
+ barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ barbican::keystone::authtoken::project_name: 'service'
+ barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]}
+ barbican::api::db_auto_create: false
+ barbican::api::enabled_certificate_plugins: ['simple_certificate']
+ barbican::api::logging::debug: {get_param: Debug}
+ barbican::api::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ barbican::api::rabbit_userid: {get_param: RabbitUserName}
+ barbican::api::rabbit_password: {get_param: RabbitPassword}
+ barbican::api::rabbit_port: {get_param: RabbitClientPort}
+ barbican::api::rabbit_heartbeat_timeout_threshold: 60
+ barbican::api::service_name: 'httpd'
+ barbican::wsgi::apache::bind_host: {get_param: [ServiceNetMap, BarbicanApiNetwork]}
+ barbican::wsgi::apache::ssl: {get_param: EnableInternalTLS}
+ barbican::wsgi::apache::workers: {get_param: BarbicanWorkers}
+ barbican::wsgi::apache::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, BarbicanApiNetwork]}
+ barbican::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://barbican:'
+ - {get_param: BarbicanPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/barbican'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ tripleo.barbican_api.firewall_rules:
+ '117 barbican':
+ dport:
+ - 9311
+ - 13311
+ step_config: |
+ include ::tripleo::profile::base::barbican::api
+ service_config_settings:
+ mysql:
+ barbican::db::mysql::password: {get_param: BarbicanPassword}
+ barbican::db::mysql::user: barbican
+ barbican::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ barbican::db::mysql::dbname: barbican
+ barbican::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+ keystone:
+ barbican::keystone::auth::public_url: {get_param: [EndpointMap, BarbicanPublic, uri]}
+ barbican::keystone::auth::internal_url: {get_param: [EndpointMap, BarbicanInternal, uri]}
+ barbican::keystone::auth::admin_url: {get_param: [EndpointMap, BarbicanAdmin, uri]}
+ barbican::keystone::auth::password: {get_param: BarbicanPassword}
+ barbican::keystone::auth::region: {get_param: KeystoneRegion}
+ barbican::keystone::auth::tenant: 'service'
+ nova_compute:
+ nova::compute::keymgr_api_class: >
+ castellan.key_manager.barbican_key_manager.BarbicanKeyManager
+ nova::compute::barbican_endpoint:
+ get_param: [EndpointMap, BarbicanInternal, uri]
+ nova::compute::barbican_auth_endpoint:
+ get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+ cinder_api:
+ cinder::api::keymgr_api_class: >
+ castellan.key_manager.barbican_key_manager.BarbicanKeyManager
+ cinder::api::keymgr_encryption_api_url:
+ get_param: [EndpointMap, BarbicanInternal, uri]
+ cinder::api::keymgr_encryption_auth_url:
+ get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
+ shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
diff --git a/puppet/services/ca-certs.yaml b/puppet/services/ca-certs.yaml
index 1a534156..735e6dde 100644
--- a/puppet/services/ca-certs.yaml
+++ b/puppet/services/ca-certs.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
HAproxy service configured with Puppet
diff --git a/puppet/services/ceilometer-agent-central.yaml b/puppet/services/ceilometer-agent-central.yaml
index c4abc307..cf8a8a8e 100644
--- a/puppet/services/ceilometer-agent-central.yaml
+++ b/puppet/services/ceilometer-agent-central.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer Central Agent service configured with Puppet
@@ -54,3 +54,10 @@ outputs:
- ceilometer_redis_password: {get_param: RedisPassword}
step_config: |
include ::tripleo::profile::base::ceilometer::agent::central
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-central is running"
+ shell: /usr/bin/systemctl show 'openstack-ceilometer-central' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop ceilometer_agent_central service
+ tags: step2
+ service: name=openstack-ceilometer-central state=stopped
diff --git a/puppet/services/ceilometer-agent-compute.yaml b/puppet/services/ceilometer-agent-compute.yaml
index 5457539c..00042914 100644
--- a/puppet/services/ceilometer-agent-compute.yaml
+++ b/puppet/services/ceilometer-agent-compute.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer Compute Agent service configured with Puppet
@@ -21,6 +21,12 @@ parameters:
MonitoringSubscriptionCeilometerCompute:
default: 'overcloud-ceilometer-agent-compute'
type: string
+ InstanceDiscoveryMethod:
+ default: 'libvirt_metadata'
+ description: Method used to discover instances running on compute node
+ type: string
+ constraints:
+ - allowed_values: ['naive', 'libvirt_metadata', 'workload_partitioning']
resources:
CeilometerServiceBase:
@@ -37,6 +43,15 @@ outputs:
service_name: ceilometer_agent_compute
monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCompute}
config_settings:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
+ map_merge:
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer::agent::compute::instance_discovery_method: {get_param: InstanceDiscoveryMethod}
step_config: |
include ::tripleo::profile::base::ceilometer::agent::compute
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-compute is running"
+ shell: /usr/bin/systemctl show 'openstack-ceilometer-compute' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop ceilometer_agent_compute service
+ tags: step2
+ service: name=openstack-ceilometer-compute state=stopped
diff --git a/puppet/services/ceilometer-agent-notification.yaml b/puppet/services/ceilometer-agent-notification.yaml
index ea403aa1..760acd65 100644
--- a/puppet/services/ceilometer-agent-notification.yaml
+++ b/puppet/services/ceilometer-agent-notification.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer Notification Agent service configured with Puppet
@@ -49,3 +49,10 @@ outputs:
get_attr: [CeilometerServiceBase, role_data, config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::agent::notification
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-notification is running"
+ shell: /usr/bin/systemctl show 'openstack-ceilometer-notification' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop ceilometer_agent_notification service
+ tags: step2
+ service: name=openstack-ceilometer-notification state=stopped
diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml
index 27c32bfd..9ee07592 100644
--- a/puppet/services/ceilometer-api.yaml
+++ b/puppet/services/ceilometer-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer API service configured with Puppet
@@ -26,7 +26,9 @@ parameters:
default:
tag: openstack.ceilometer.api
path: /var/log/ceilometer/api.log
-
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
CeilometerServiceBase:
@@ -42,6 +44,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
role_data:
@@ -69,16 +72,30 @@ outputs:
# internal_api_subnet - > IP/CIDR
- ceilometer::api::service_name: 'httpd'
ceilometer::api::enable_proxy_headers_parsing: true
- ceilometer::api::host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
+ ceilometer::api::host:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
ceilometer::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
- ceilometer::wsgi::apache::ssl: false
+ ceilometer::wsgi::apache::ssl: {get_param: EnableInternalTLS}
ceilometer::wsgi::apache::servername:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
service_config_settings:
get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::api
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-api is running"
+ shell: /usr/bin/systemctl show 'openstack-ceilometer-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop ceilometer_api service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
index 4ace7526..a86a0cdf 100644
--- a/puppet/services/ceilometer-base.yaml
+++ b/puppet/services/ceilometer-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer service configured with Puppet
@@ -31,19 +31,25 @@ parameters:
type: string
hidden: true
CeilometerMeterDispatcher:
- default: 'gnocchi'
- description: Dispatcher to process meter data
- type: string
+ default: ['gnocchi']
+ description: Comma-seperated list of Dispatcher to process meter data
+ type: comma_delimited_list
constraints:
- allowed_values: ['gnocchi', 'database']
+ CeilometerEventDispatcher:
+ default: ['gnocchi']
+ description: Comma-separated list of Dispatchers to process events data
+ type: comma_delimited_list
+ constraints:
+ - allowed_values: ['panko', 'gnocchi', 'database']
CeilometerWorkers:
default: 0
description: Number of workers for Ceilometer service.
type: number
- CeilometerStoreEvents:
- default: false
- description: Whether to store events in ceilometer.
- type: boolean
+ EventPipelinePublishers:
+ default: ['notifier://?topic=alarm.all']
+ description: A list of publishers to put in event_pipeline.yaml.
+ type: comma_delimited_list
Debug:
default: ''
description: Set to True to enable debugging on all services.
@@ -87,6 +93,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ceilometer'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
ceilometer_backend: {get_param: CeilometerBackend}
ceilometer::metering_secret: {get_param: CeilometerMeteringSecret}
# we include db_sync class in puppet-tripleo
@@ -97,11 +105,12 @@ outputs:
ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
- ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents}
+ ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers}
ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
ceilometer::agent::auth::auth_tenant_name: 'service'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
+ ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher}
ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]}
ceilometer::dispatcher::gnocchi::filter_project: 'service'
ceilometer::dispatcher::gnocchi::archive_policy: 'low'
diff --git a/puppet/services/ceilometer-collector.yaml b/puppet/services/ceilometer-collector.yaml
index e3f1ef4e..a219f9eb 100644
--- a/puppet/services/ceilometer-collector.yaml
+++ b/puppet/services/ceilometer-collector.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer Collector service configured with Puppet
@@ -59,3 +59,10 @@ outputs:
get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::collector
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-collector is running"
+ shell: /usr/bin/systemctl show 'openstack-ceilometer-collector' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop ceilometer_collector service
+ tags: step2
+ service: name=openstack-ceilometer-collector state=stopped
diff --git a/puppet/services/ceilometer-expirer.yaml b/puppet/services/ceilometer-expirer.yaml
index 3b811c4d..5341cfee 100644
--- a/puppet/services/ceilometer-expirer.yaml
+++ b/puppet/services/ceilometer-expirer.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer Expirer service configured with Puppet
@@ -40,3 +40,7 @@ outputs:
get_attr: [CeilometerServiceBase, role_data, config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::expirer
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-expirer is running"
+ shell: /usr/bin/systemctl show 'openstack-ceilometer-expirer' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml
index 786e9ddd..033d3f77 100644
--- a/puppet/services/ceph-base.yaml
+++ b/puppet/services/ceph-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Ceph base service. Shared by all Ceph services.
@@ -119,36 +119,33 @@ outputs:
NETWORK: {get_param: [ServiceNetMap, CephMonNetwork]}
ceph::profile::params::public_addr: {get_param: [ServiceNetMap, CephMonNetwork]}
ceph::profile::params::client_keys:
- str_replace:
- template: "{
- client.admin: {
- secret: 'ADMIN_KEY',
- mode: '0600',
- cap_mon: 'allow *',
- cap_osd: 'allow *',
+ map_replace:
+ - client.admin:
+ secret: {get_param: CephAdminKey}
+ mode: '0600'
+ cap_mon: 'allow *'
+ cap_osd: 'allow *'
cap_mds: 'allow *'
- },
- client.bootstrap-osd: {
- secret: 'ADMIN_KEY',
- keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring',
+ client.bootstrap-osd:
+ secret: {get_param: CephAdminKey}
+ keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring'
cap_mon: 'allow profile bootstrap-osd'
- },
- client.CLIENT_USER: {
- secret: 'CLIENT_KEY',
- mode: '0644',
- cap_mon: 'allow r',
- cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
- }
- }"
- params:
- CLIENT_USER: {get_param: CephClientUserName}
- CLIENT_KEY: {get_param: CephClientKey}
- ADMIN_KEY: {get_param: CephAdminKey}
- NOVA_POOL: {get_param: NovaRbdPoolName}
- CINDER_POOL: {get_param: CinderRbdPoolName}
- CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
- GLANCE_POOL: {get_param: GlanceRbdPoolName}
- GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ CEPH_CLIENT_KEY:
+ secret: {get_param: CephClientKey}
+ mode: '0644'
+ cap_mon: 'allow r'
+ cap_osd:
+ str_replace:
+ template: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
+ params:
+ NOVA_POOL: {get_param: NovaRbdPoolName}
+ CINDER_POOL: {get_param: CinderRbdPoolName}
+ CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
+ GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ - keys:
+ CEPH_CLIENT_KEY:
+ list_join: ['.', ['client', {get_param: CephClientUserName}]]
service_config_settings:
glance_api:
glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
diff --git a/puppet/services/ceph-client.yaml b/puppet/services/ceph-client.yaml
index b482dd2e..f972e21b 100644
--- a/puppet/services/ceph-client.yaml
+++ b/puppet/services/ceph-client.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Ceph Client service.
diff --git a/puppet/services/ceph-external.yaml b/puppet/services/ceph-external.yaml
index 7d75074c..134f47c4 100644
--- a/puppet/services/ceph-external.yaml
+++ b/puppet/services/ceph-external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Ceph External service.
@@ -27,9 +27,20 @@ parameters:
GlanceRbdPoolName:
default: images
type: string
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
GnocchiRbdPoolName:
default: metrics
type: string
+ NovaEnableRbdBackend:
+ default: false
+ description: Whether to enable or not the Rbd backend for Nova
+ type: boolean
NovaRbdPoolName:
default: vms
type: string
@@ -50,6 +61,22 @@ parameters:
MonitoringSubscriptionCephExternal:
default: 'overcloud-ceph-external'
type: string
+ RbdDefaultFeatures:
+ default: ''
+ description: The default features enabled when creating a block device
+ image. Only applies to format 2 images. Set to '1' for Jewel
+ clients using older Ceph servers.
+ type: string
+
+conditions:
+ glance_multiple_locations:
+ and:
+ - equals:
+ - get_param: GlanceBackend
+ - rbd
+ - equals:
+ - get_param: NovaEnableRbdBackend
+ - true
outputs:
role_data:
@@ -60,25 +87,35 @@ outputs:
config_settings:
tripleo::profile::base::ceph::ceph_mon_host: {get_param: CephExternalMonHost}
ceph::profile::params::fsid: {get_param: CephClusterFSID}
+ ceph::profile::params::rbd_default_features: {get_param: RbdDefaultFeatures}
ceph::profile::params::client_keys:
- str_replace:
- template: "{
- client.CLIENT_USER: {
- secret: 'CLIENT_KEY',
- mode: '0644',
- cap_mon: 'allow r',
- cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
- }
- }"
- params:
- CLIENT_USER: {get_param: CephClientUserName}
- CLIENT_KEY: {get_param: CephClientKey}
- NOVA_POOL: {get_param: NovaRbdPoolName}
- CINDER_POOL: {get_param: CinderRbdPoolName}
- CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
- GLANCE_POOL: {get_param: GlanceRbdPoolName}
- GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ map_replace:
+ - CEPH_CLIENT_KEY:
+ secret: {get_param: CephClientKey}
+ mode: '0644'
+ cap_mon: 'allow r'
+ cap_osd:
+ str_replace:
+ template: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
+ params:
+ NOVA_POOL: {get_param: NovaRbdPoolName}
+ CINDER_POOL: {get_param: CinderRbdPoolName}
+ CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
+ GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ - keys:
+ CEPH_CLIENT_KEY:
+ list_join: ['.', ['client', {get_param: CephClientUserName}]]
+ ceph::profile::params::manage_repo: false
+ # FIXME(gfidente): we should not have to list the packages explicitly in
+ # the templates, but this should stay until the following is fixed:
+ # https://bugs.launchpad.net/puppet-ceph/+bug/1629933
+ ceph::params::packages:
+ - ceph-base
+ - ceph-mon
+ - ceph-osd
service_config_settings:
- get_attr: [CephBase, role_data, service_config_settings]
+ glance_api:
+ glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
step_config: |
include ::tripleo::profile::base::ceph::client
diff --git a/puppet/services/pacemaker/ceilometer-api.yaml b/puppet/services/ceph-mds.yaml
index 4b6c18f6..b68567fb 100644
--- a/puppet/services/pacemaker/ceilometer-api.yaml
+++ b/puppet/services/ceph-mds.yaml
@@ -1,7 +1,7 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- OpenStack Ceilometer API service with Pacemaker configured with Puppet
+ Ceph MDS service.
parameters:
ServiceNetMap:
@@ -18,13 +18,15 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- MonitoringSubscriptionCeilometerApi:
- default: 'overcloud-ceilometer-api'
+ CephMdsKey:
+ description: The cephx key for the MDS service. Can be created
+ with ceph-authtool --gen-print-key.
type: string
+ hidden: true
resources:
- CeilometerServiceBase:
- type: ../ceilometer-api.yaml
+ CephBase:
+ type: ./ceph-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
@@ -32,14 +34,16 @@ resources:
outputs:
role_data:
- description: Role data for the Ceilometer API pacemaker role.
+ description: Role data for the Ceph MDS service.
value:
- service_name: ceilometer_api
- monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerApi}
+ service_name: ceph_mds
config_settings:
map_merge:
- - get_attr: [CeilometerServiceBase, role_data, config_settings]
- - ceilometer::api::manage_service: false
- ceilometer::api::enabled: false
+ - get_attr: [CephBase, role_data, config_settings]
+ - ceph::profile::params::mds_key: {get_param: CephMdsKey}
+ tripleo.ceph_mds.firewall_rules:
+ '112 ceph_mds':
+ dport:
+ - '6800-7300'
step_config: |
- include ::tripleo::profile::pacemaker::ceilometer::api
+ include ::tripleo::profile::base::ceph::mds
diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml
index 3471f16c..1ce58335 100644
--- a/puppet/services/ceph-mon.yaml
+++ b/puppet/services/ceph-mon.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Ceph Monitor service.
@@ -28,6 +28,12 @@ parameters:
CinderRbdPoolName:
default: volumes
type: string
+ ManilaCephFSDataPoolName:
+ default: manila_data
+ type: string
+ ManilaCephFSMetadataPoolName:
+ default: manila_metadata
+ type: string
CinderBackupRbdPoolName:
default: backups
type: string
@@ -87,6 +93,8 @@ outputs:
for_each:
<%pool%>:
- {get_param: CinderRbdPoolName}
+ - {get_param: ManilaCephFSDataPoolName}
+ - {get_param: ManilaCephFSMetadataPoolName}
- {get_param: CinderBackupRbdPoolName}
- {get_param: NovaRbdPoolName}
- {get_param: GlanceRbdPoolName}
@@ -105,3 +113,27 @@ outputs:
get_attr: [CephBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceph::mon
+ upgrade_batch_tasks:
+ # Note we perform these tasks in list order, but they are all step0 so
+ # we can perform a rolling upgrade of all mon nodes in step0, then a
+ # rolling upgrade of all osd nodes in step1
+ - name: Check status
+ tags: step0,validation
+ shell: ceph health | grep -qv HEALTH_ERR
+ # FIXME(shardy) I suspect we can use heat or ansible facts here instead?
+ - name: Get hostname
+ tags: step0
+ shell: hostname -s
+ register: mon_id
+ - name: Stop Ceph Mon
+ tags: step0
+ service: name=ceph-mon@{{mon_id.stdout}} pattern=ceph-mon state=stopped
+ - name: Update ceph packages
+ tags: step0
+ yum: name=ceph-mon state=latest
+ - name: Start ceph-mon service
+ tags: step0
+ service: name=ceph-mon@{{mon_id.stdout}} state=started
+ - name: ceph osd crush tunables default
+ tags: step0
+ shell: ceph osd crush tunables default
diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml
index f6378720..98f83d08 100644
--- a/puppet/services/ceph-osd.yaml
+++ b/puppet/services/ceph-osd.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Ceph OSD service.
@@ -45,3 +45,47 @@ outputs:
- '6800-7300'
step_config: |
include ::tripleo::profile::base::ceph::osd
+ upgrade_batch_tasks:
+ - name: Check status
+ tags: step1,validation
+ shell: ceph health | grep -qv HEALTH_ERR
+ - name: Get OSD IDs
+ tags: step1
+ shell: ls /var/lib/ceph/osd | awk 'BEGIN { FS = "-" } ; { print $2 }'
+ register: osd_ids
+ # "so that mirrors aren't rebalanced as if the OSD died" - gfidente / leseb
+ - name: ceph osd set noout
+ tags: step1
+ command: ceph osd set noout
+ - name: ceph osd set norebalance
+ tags: step1
+ command: ceph osd set norebalance
+ - name: ceph osd set nodeep-scrub
+ tags: step1
+ command: ceph osd set nodeep-scrub
+ - name: ceph osd set noscrub
+ tags: step1
+ command: ceph osd set noscrub
+ - name: Stop Ceph OSD
+ tags: step1
+ service: name=ceph-osd@$item state=stopped
+ with_items: "{{osd_ids.stdout.strip().split()}}"
+ - name: Update ceph OSD packages
+ tags: step1
+ yum: name=ceph-osd state=latest
+ - name: Start ceph-osd service
+ tags: step1
+ service: name=ceph-osd@$item state=started
+ with_items: "{{osd_ids.stdout.strip().split()}}"
+ - name: ceph osd unset noout
+ tags: step1
+ command: ceph osd unset noout
+ - name: ceph osd unset norebalance
+ tags: step1
+ command: ceph osd unset norebalance
+ - name: ceph osd unset nodeep-scrub
+ tags: step1
+ command: ceph osd unset nodeep-scrub
+ - name: ceph osd unset noscrub
+ tags: step1
+ command: ceph osd unset noscrub
diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml
index 18a4b780..83339f2b 100644
--- a/puppet/services/ceph-rgw.yaml
+++ b/puppet/services/ceph-rgw.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Ceph RadosGW service.
@@ -55,15 +55,13 @@ outputs:
- tripleo::profile::base::ceph::rgw::rgw_key: {get_param: CephRgwKey}
tripleo::profile::base::ceph::rgw::keystone_admin_token: {get_param: AdminToken}
tripleo::profile::base::ceph::rgw::keystone_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- ceph::profile::params::frontend_type: 'civetweb'
- ceph_rgw_civetweb_bind_address: {get_param: [ServiceNetMap, CephRgwNetwork]}
- ceph::profile::params::rgw_frontends:
- list_join:
- - ''
- - - 'civetweb port='
- - '%{hiera("ceph_rgw_civetweb_bind_address")}'
- - ':'
- - {get_param: [EndpointMap, CephRgwInternal, port]}
+ tripleo::profile::base::ceph::rgw::civetweb_bind_ip: {get_param: [ServiceNetMap, CephRgwNetwork]}
+ tripleo::profile::base::ceph::rgw::civetweb_bind_port: {get_param: [EndpointMap, CephRgwInternal, port]}
+ tripleo::profile::base::ceph::rgw::rgw_keystone_version: v3
+ ceph::profile::params::rgw_keystone_admin_domain: default
+ ceph::profile::params::rgw_keystone_admin_project: service
+ ceph::profile::params::rgw_keystone_admin_user: swift
+ ceph::profile::params::rgw_keystone_admin_password: {get_param: SwiftPassword}
tripleo.ceph_rgw.firewall_rules:
'122 ceph rgw':
dport: {get_param: [EndpointMap, CephRgwInternal, port]}
@@ -74,6 +72,19 @@ outputs:
ceph::rgw::keystone::auth::public_url: {get_param: [EndpointMap, CephRgwPublic, uri]}
ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
- ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
- ceph::rgw::keystone::auth::tenant: 'service'
+ ceph::rgw::keystone::auth::roles: [ 'admin', 'member', '_member_' ]
+ ceph::rgw::keystone::auth::tenant: service
+ ceph::rgw::keystone::auth::user: swift
+ ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
+ upgrade_tasks:
+ - name: Gather RGW instance ID
+ tags: step0
+ shell: hiera -c /etc/puppet/hiera.yaml ceph::profile::params::rgw_name radosgw.gateway
+ register: rgw_id
+ - name: Check status
+ shell: /usr/bin/systemctl show ceph-radosgw@{{rgw_id.stdout}} --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop RGW instance
+ tags: step1
+ service: name=ceph-radosgw@{{rgw_id.stdout}} state=stopped
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index 3c624e3a..bc5f080d 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Cinder API service configured with Puppet
@@ -42,7 +42,10 @@ parameters:
CinderWorkers:
type: string
description: Set the number of workers for cinder::wsgi::apache
- default: '"%{::os_workers}"'
+ default: '%{::os_workers}'
+ EnableInternalTLS:
+ type: boolean
+ default: false
conditions:
cinder_workers_zero: {equals : [{get_param: CinderWorkers}, 0]}
@@ -55,6 +58,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
CinderBase:
type: ./cinder-base.yaml
@@ -87,27 +91,32 @@ outputs:
cinder::config:
DEFAULT/swift_catalog_info:
value: 'object-store:swift:internalURL'
- cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
tripleo::profile::base::cinder::cinder_enable_db_purge: {get_param: CinderEnableDBPurge}
tripleo.cinder_api.firewall_rules:
'119 cinder':
dport:
- 8776
- 13776
+ cinder::api::bind_host:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, CinderApiNetwork]}
+ cinder::wsgi::apache::ssl: {get_param: EnableInternalTLS}
+ cinder::api::service_name: 'httpd'
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- cinder::api::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
- cinder::wsgi::apache::ssl: false
cinder::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
cinder::wsgi::apache::servername:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
- $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ $NETWORK: {get_param: [ServiceNetMap, CinderApiNetwork]}
-
if:
- cinder_workers_zero
@@ -137,3 +146,21 @@ outputs:
cinder::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
+ shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: check for cinder running under apache (post upgrade)
+ tags: step2
+ shell: "apachectl -t -D DUMP_VHOSTS | grep -q cinder"
+ register: cinder_apache
+ ignore_errors: true
+ - name: Stop cinder_api service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped
+ when: "cinder_apache.rc == 0"
+ - name: Stop and disable cinder_api service (pre-upgrade not under httpd)
+ tags: step2
+ service: name=openstack-cinder-api state=stopped enabled=no
diff --git a/puppet/services/cinder-backup.yaml b/puppet/services/cinder-backup.yaml
index 80795457..14be07af 100644
--- a/puppet/services/cinder-backup.yaml
+++ b/puppet/services/cinder-backup.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder Backup service configured with Puppet
diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml
index 59c9b844..a5d7fcf1 100644
--- a/puppet/services/cinder-base.yaml
+++ b/puppet/services/cinder-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder base service. Shared by all Cinder services.
@@ -44,6 +44,46 @@ parameters:
default: guest
description: The username for RabbitMQ
type: string
+ CinderCronDbPurgeMinute:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Minute
+ default: '1'
+ CinderCronDbPurgeHour:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Hour
+ default: '0'
+ CinderCronDbPurgeMonthday:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Month Day
+ default: '*'
+ CinderCronDbPurgeMonth:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Month
+ default: '*'
+ CinderCronDbPurgeWeekday:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Week Day
+ default: '*'
+ CinderCronDbPurgeUser:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - User
+ default: 'keystone'
+ CinderCronDbPurgeAge:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Age
+ default: '0'
+ CinderCronDbPurgeDestination:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Log destination
+ default: '/var/log/cinder/cinder-rowsflush.log'
outputs:
role_data:
@@ -60,6 +100,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/cinder'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
cinder::debug: {get_param: Debug}
cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
cinder::rabbit_userid: {get_param: RabbitUserName}
@@ -69,3 +111,12 @@ outputs:
cinder::cron::db_purge::destination: '/dev/null'
cinder::db::database_db_max_retries: -1
cinder::db::database_max_retries: -1
+ cinder::cron::db_purge::minute: {get_param: CinderCronDbPurgeMinute}
+ cinder::cron::db_purge::hour: {get_param: CinderCronDbPurgeHour}
+ cinder::cron::db_purge::monthday: {get_param: CinderCronDbPurgeMonthday}
+ cinder::cron::db_purge::month: {get_param: CinderCronDbPurgeMonth}
+ cinder::cron::db_purge::weekday: {get_param: CinderCronDbPurgeWeekday}
+ cinder::cron::db_purge::user: {get_param: CinderCronDbPurgeUser}
+ cinder::cron::db_purge::age: {get_param: CinderCronDbPurgeAge}
+ cinder::cron::db_purge::destination: {get_param: CinderCronDbPurgeDestination}
+ cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
diff --git a/puppet/services/cinder-hpelefthand-iscsi.yaml b/puppet/services/cinder-hpelefthand-iscsi.yaml
new file mode 100644
index 00000000..ca7d2838
--- /dev/null
+++ b/puppet/services/cinder-hpelefthand-iscsi.yaml
@@ -0,0 +1,56 @@
+heat_template_version: ocata
+
+description: >
+ Configure Cinder HPELeftHandISCSIDriver
+
+parameters:
+ # Config specific parameters, to be provided via parameter_defaults
+ CinderHPELeftHandISCSIApiUrl:
+ type: string
+ CinderHPELeftHandISCSIUserName:
+ type: string
+ CinderHPELeftHandISCSIPassword:
+ type: string
+ hidden: true
+ CinderHPELeftHandISCSIBackendName:
+ type: string
+ default: 'tripleo_hpelefthand'
+ CinderHPELeftHandISCSIChapEnabled:
+ type: boolean
+ default: false
+ CinderHPELeftHandClusterName:
+ type: string
+ CinderHPELeftHandDebug:
+ type: boolean
+ default: false
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+outputs:
+ role_data:
+ description: Role data for Cinder HPELeftHandISCSIDriver
+ value:
+ service_name: cinder_hpelefthand_iscsi
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_hpelefthand_backend: true
+ cinder::backend::hpelefthand_iscsi::hpelefthand_api_url: {get_param: CinderHPELeftHandISCSIApiUrl}
+ cinder::backend::hpelefthand_iscsi::hpelefthand_username: {get_param: CinderHPELeftHandISCSIUserName}
+ cinder::backend::hpelefthand_iscsi::hpelefthand_password: {get_param: CinderHPELeftHandISCSIPassword}
+ cinder::backend::hpelefthand_iscsi::volume_backend_name: {get_param: CinderHPELeftHandISCSIBackendName}
+ cinder::backend::hpelefthand_iscsi::hpelefthand_iscsi_chap_enabled: {get_param: CinderHPELeftHandISCSIChapEnabled}
+ cinder::backend::hpelefthand_iscsi::hpelefthand_clustername: {get_param: CinderHPELeftHandClusterName}
+ cinder::backend::hpelefthand_iscsi::hpelefthand_debug: {get_param: CinderHPELeftHandDebug}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-scheduler.yaml b/puppet/services/cinder-scheduler.yaml
index 94c263ea..f102810e 100644
--- a/puppet/services/cinder-scheduler.yaml
+++ b/puppet/services/cinder-scheduler.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder Scheduler service configured with Puppet
@@ -51,3 +51,10 @@ outputs:
- cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
step_config: |
include ::tripleo::profile::base::cinder::scheduler
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-cinder-scheduler is running"
+ shell: /usr/bin/systemctl show 'openstack-cinder-scheduler' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop cinder_scheduler service
+ tags: step2
+ service: name=openstack-cinder-scheduler state=stopped
diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml
index 82e16f39..dd1d3833 100644
--- a/puppet/services/cinder-volume.yaml
+++ b/puppet/services/cinder-volume.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder Volume service configured with Puppet
@@ -20,6 +20,10 @@ parameters:
default: lioadm
description: The iSCSI helper to use with cinder.
type: string
+ CinderISCSIProtocol:
+ default: iscsi
+ description: Whether to use TCP ('iscsi') or iSER RDMA ('iser') for iSCSI
+ type: string
CinderLVMLoopDeviceSize:
default: 10280
description: The size of the loopback file used by the cinder LVM driver.
@@ -97,6 +101,7 @@ outputs:
SERVERS: {get_param: CinderNfsServers}
tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
+ tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: {get_param: CephClientUserName}
tripleo.cinder_volume.firewall_rules:
@@ -110,3 +115,13 @@ outputs:
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_address: {get_param: [ServiceNetMap, CinderIscsiNetwork]}
step_config: |
include ::tripleo::profile::base::cinder::volume
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-cinder-volume is running"
+ shell: /usr/bin/systemctl show 'openstack-cinder-volume' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop cinder_volume service
+ tags: step2
+ service: name=openstack-cinder-volume state=stopped
+ - name: Sync cinder_volume DB
+ tags: step5
+ command: cinder-manage db sync
diff --git a/puppet/services/database/mongodb-base.yaml b/puppet/services/database/mongodb-base.yaml
index 3f4f106d..c27fcb7f 100644
--- a/puppet/services/database/mongodb-base.yaml
+++ b/puppet/services/database/mongodb-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Configuration details for MongoDB service using composable roles
diff --git a/puppet/services/database/mongodb.yaml b/puppet/services/database/mongodb.yaml
index 01daeafe..63ec4446 100644
--- a/puppet/services/database/mongodb.yaml
+++ b/puppet/services/database/mongodb.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
MongoDb service deployment using puppet
@@ -66,3 +66,10 @@ outputs:
mongodb::server::bind_ip: {get_param: [ServiceNetMap, MongodbNetwork]}
step_config: |
include ::tripleo::profile::base::database::mongodb
+ upgrade_tasks:
+ - name: Stop mongodb service
+ tags: step2
+ service: name=mongod state=stopped
+ - name: Start mongodb service
+ tags: step4
+ service: name=mongod state=started
diff --git a/puppet/services/database/mysql-internal-tls-certmonger.yaml b/puppet/services/database/mysql-internal-tls-certmonger.yaml
new file mode 100644
index 00000000..9f7eaf57
--- /dev/null
+++ b/puppet/services/database/mysql-internal-tls-certmonger.yaml
@@ -0,0 +1,47 @@
+heat_template_version: ocata
+
+description: >
+ MySQL configurations for using TLS via certmonger.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ # The following parameters are not needed by the template but are
+ # required to pass the pep8 tests
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: MySQL configurations for using TLS via certmonger.
+ value:
+ service_name: mysql_internal_tls_certmonger
+ config_settings:
+ generate_service_certificates: true
+ tripleo::profile::base::database::mysql::certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/mysql.crt'
+ service_key: '/etc/pki/tls/private/mysql.key'
+ hostname:
+ str_replace:
+ template: "%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ principal:
+ str_replace:
+ template: "mysql/%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ metadata_settings:
+ - service: mysql
+ network: {get_param: [ServiceNetMap, MysqlNetwork]}
+ type: vip
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
index 094a7c9f..8c4042d9 100644
--- a/puppet/services/database/mysql.yaml
+++ b/puppet/services/database/mysql.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
MySQL service deployment using puppet
@@ -35,50 +35,75 @@ parameters:
description: Whether to use Galera instead of regular MariaDB.
type: boolean
+resources:
+
+ MySQLTLS:
+ type: OS::TripleO::Services::MySQLTLS
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+
outputs:
role_data:
description: Service MySQL using composable services.
value:
service_name: mysql
config_settings:
- # The Galera package should work in cluster and
- # non-cluster modes based on the config file.
- # We set the package name here explicitly so
- # that it matches what we pre-install
- # in tripleo-puppet-elements.
- mysql::server::package_name: 'mariadb-galera-server'
- mysql::server::manage_config_file: true
- tripleo.mysql.firewall_rules:
- '104 mysql galera':
- dport:
- - 873
- - 3306
- - 4444
- - 4567
- - 4568
- - 9200
- mysql_max_connections: {get_param: MysqlMaxConnections}
- mysql::server::root_password:
- yaql:
- expression: $.data.passwords.where($ != '').first()
- data:
- passwords:
- - {get_param: MysqlRootPassword}
- - {get_param: [DefaultPasswords, mysql_root_password]}
- mysql_clustercheck_password: {get_param: MysqlClustercheckPassword}
- enable_galera: {get_param: EnableGalera}
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- mysql_bind_host: {get_param: [ServiceNetMap, MysqlNetwork]}
- tripleo::profile::base::database::mysql::bind_address:
- str_replace:
- template:
- '"%{::fqdn_$NETWORK}"'
- params:
- $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ map_merge:
+ - get_attr: [MySQLTLS, role_data, config_settings]
+ -
+ # The Galera package should work in cluster and
+ # non-cluster modes based on the config file.
+ # We set the package name here explicitly so
+ # that it matches what we pre-install
+ # in tripleo-puppet-elements.
+ mysql::server::package_name: 'mariadb-galera-server'
+ mysql::server::manage_config_file: true
+ tripleo.mysql.firewall_rules:
+ '104 mysql galera':
+ dport:
+ - 873
+ - 3306
+ - 4444
+ - 4567
+ - 4568
+ - 9200
+ mysql_max_connections: {get_param: MysqlMaxConnections}
+ mysql::server::root_password:
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: MysqlRootPassword}
+ - {get_param: [DefaultPasswords, mysql_root_password]}
+ mysql_clustercheck_password: {get_param: MysqlClustercheckPassword}
+ enable_galera: {get_param: EnableGalera}
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ mysql_bind_host: {get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::bind_address:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::client_bind_address:
+ {get_param: [ServiceNetMap, MysqlNetwork]}
step_config: |
include ::tripleo::profile::base::database::mysql
+ upgrade_tasks:
+ - name: Check for galera root password
+ tags: step0
+ file: path=/root/.my.cnf state=file
+ - name: Stop service
+ tags: step2
+ service: name=mariadb state=stopped
+ - name: Start service
+ tags: step4
+ service: name=mariadb state=started
+ metadata_settings:
+ get_attr: [MySQLTLS, role_data, metadata_settings]
+
diff --git a/puppet/services/database/redis-base.yaml b/puppet/services/database/redis-base.yaml
index 4ed3c007..2b7dd430 100644
--- a/puppet/services/database/redis-base.yaml
+++ b/puppet/services/database/redis-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Redis service configured with Puppet
@@ -39,6 +39,6 @@ outputs:
# internal_api_subnet - > IP/CIDR
redis::bind: {get_param: [ServiceNetMap, RedisNetwork]}
redis::port: 6379
- redis::sentinel::master_name: '"%{hiera(\"bootstrap_nodeid\")}"'
- redis::sentinel::redis_host: '"%{hiera(\"bootstrap_nodeid_ip\")}"'
+ redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
+ redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
diff --git a/puppet/services/database/redis.yaml b/puppet/services/database/redis.yaml
index 1c333b97..5ea25ca8 100644
--- a/puppet/services/database/redis.yaml
+++ b/puppet/services/database/redis.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Redis service configured with Puppet
diff --git a/puppet/services/disabled/glance-registry.yaml b/puppet/services/disabled/glance-registry.yaml
new file mode 100644
index 00000000..4d22bddc
--- /dev/null
+++ b/puppet/services/disabled/glance-registry.yaml
@@ -0,0 +1,30 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Glance Registry service, disabled since ocata
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the disabled Glance Registry role.
+ value:
+ service_name: glance_registry
+ upgrade_tasks:
+ - name: Stop and disable glance_registry service on upgrade
+ tags: step2
+ service: name=openstack-glance-registry state=stopped enabled=no
diff --git a/puppet/services/ec2-api.yaml b/puppet/services/ec2-api.yaml
new file mode 100644
index 00000000..7049d773
--- /dev/null
+++ b/puppet/services/ec2-api.yaml
@@ -0,0 +1,118 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack EC2-API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ Ec2ApiWorkers:
+ default: 0
+ description: Number of workers for EC2-API service.
+ type: number
+ Ec2ApiPassword:
+ description: The password for the nova service and db account, used by nova-api.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ MonitoringSubscriptionEc2Api:
+ default: 'overcloud-ec2-api'
+ type: string
+ Ec2ApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.ec2.api
+ path: /var/log/ec2api/ec2api.log
+ EnablePackageInstall:
+ default: 'false'
+ description: Set to true to enable package installation via Puppet
+ type: boolean
+
+
+conditions:
+ nova_workers_zero: {equals : [{get_param: Ec2ApiWorkers}, 0]}
+
+outputs:
+ role_data:
+ description: Role data for the EC2-API service.
+ value:
+ service_name: ec2_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionEc2Api}
+ logging_source: {get_param: Ec2ApiLoggingSource}
+ logging_groups:
+ - nova
+ config_settings:
+ map_merge:
+ - tripleo.ec2_api.firewall_rules:
+ '113 ec2_api':
+ dport:
+ - 8788
+ - 13788
+ ec2api::keystone::authtoken::project_name: 'service'
+ ec2api::keystone::authtoken::password: {get_param: Ec2ApiPassword}
+ ec2api::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ec2api::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ ec2api::api::enabled: true
+ ec2api::package_manage: {get_param: EnablePackageInstall}
+ ec2api::api::ec2api_listen:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, Ec2ApiNetwork]}
+ ec2api::metadata::metadata_listen:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, Ec2ApiMetadataNetwork]}
+ ec2api::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://ec2_api:'
+ - {get_param: Ec2ApiPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/ec2_api'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ -
+ if:
+ - nova_workers_zero
+ - {}
+ - ec2api::api::ec2api_workers: {get_param: Ec2ApiWorkers}
+ ec2api::metadata::metadata_workers: {get_param: Ec2ApiWorkers}
+ step_config: |
+ include tripleo::profile::base::nova::ec2api
+ service_config_settings:
+ keystone:
+ ec2api::keystone::auth::tenant: 'service'
+ ec2api::keystone::auth::public_url: {get_param: [EndpointMap, Ec2ApiPublic, uri]}
+ ec2api::keystone::auth::internal_url: {get_param: [EndpointMap, Ec2ApiInternal, uri]}
+ ec2api::keystone::auth::admin_url: {get_param: [EndpointMap, Ec2ApiAdmin, uri]}
+ ec2api::keystone::auth::password: {get_param: Ec2ApiPassword}
+ ec2api::keystone::auth::region: {get_param: KeystoneRegion}
+ mysql:
+ ec2api::db::mysql::password: {get_param: Ec2ApiPassword}
+ ec2api::db::mysql::user: ec2_api
+ ec2api::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ ec2api::db::mysql::dbname: ec2_api
+ ec2api::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/etcd.yaml b/puppet/services/etcd.yaml
new file mode 100644
index 00000000..f96fa723
--- /dev/null
+++ b/puppet/services/etcd.yaml
@@ -0,0 +1,58 @@
+heat_template_version: ocata
+
+description: >
+ Etcd service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ EtcdInitialClusterToken:
+ default: 'etcd-tripleo'
+ description: Initial cluster token for the etcd cluster during bootstrap.
+ type: string
+ MonitoringSubscriptionEtcd:
+ default: 'overcloud-etcd'
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the Etcd role.
+ value:
+ service_name: etcd
+ monitoring_subscription: {get_param: MonitoringSubscriptionEtcd}
+ config_settings:
+ etcd::etcd_name:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ tripleo::profile::base::etcd::bind_ip: {get_param: [ServiceNetMap, EtcdNetwork]}
+ tripleo::profile::base::etcd::client_port: '2379'
+ tripleo::profile::base::etcd::peer_port: '2380'
+ etcd::initial_cluster_token: {get_param: EtcdInitialClusterToken}
+ etcd::manage_package: false
+ tripleo.etcd.firewall_rules:
+ '141 etcd':
+ dport:
+ - 2379
+ - 2380
+ step_config: |
+ include ::tripleo::profile::base::etcd
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index 33abdbf9..b49b29f7 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Glance API service configured with Puppet
@@ -45,8 +45,23 @@ parameters:
default:
tag: openstack.glance.api
path: /var/log/glance/api.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
resources:
+
+ TLSProxyBase:
+ type: OS::TripleO::Services::TLSProxyBase
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
GlanceBase:
type: ./glance-base.yaml
properties:
@@ -66,6 +81,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [GlanceBase, role_data, config_settings]
+ - get_attr: [TLSProxyBase, role_data, config_settings]
- glance::api::database_connection:
list_join:
- ''
@@ -75,15 +91,13 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/glance'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]}
glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
glance::api::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
- glance::api::registry_host:
- str_replace:
- template: "'REGISTRY_HOST'"
- params:
- REGISTRY_HOST: {get_param: [EndpointMap, GlanceRegistryInternal, host]}
- glance::api::registry_client_protocol: {get_param: [EndpointMap, GlanceRegistryInternal, protocol] }
+ glance::api::enable_v1_api: false
+ glance::api::enable_v2_api: true
glance::api::authtoken::password: {get_param: GlancePassword}
glance::api::enable_proxy_headers_parsing: true
glance::api::debug: {get_param: Debug}
@@ -102,8 +116,37 @@ outputs:
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- glance::api::bind_host: {get_param: [ServiceNetMap, GlanceApiNetwork]}
+ tripleo::profile::base::glance::api::tls_proxy_bind_ip:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ tripleo::profile::base::glance::api::tls_proxy_fqdn:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, GlanceApiNetwork]}
+ tripleo::profile::base::glance::api::tls_proxy_port:
+ get_param: [EndpointMap, GlanceInternal, port]
+ # Bind to localhost if internal TLS is enabled, since we put a TLs
+ # proxy in front.
+ glance::api::bind_host:
+ if:
+ - use_tls_proxy
+ - 'localhost'
+ - {get_param: [ServiceNetMap, GlanceApiNetwork]}
step_config: |
include ::tripleo::profile::base::glance::api
service_config_settings:
get_attr: [GlanceBase, role_data, service_config_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-glance-api is running"
+ shell: /usr/bin/systemctl show 'openstack-glance-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop glance_api service
+ tags: step2
+ service: name=openstack-glance-api state=stopped
+ - name: Stop and disable glance registry (removed for Ocata)
+ tags: step2
+ service: name=openstack-glance-registry state=stopped enabled=no
+ - name: Sync glance_api DB
+ tags: step5
+ command: glance-manage --config-file=/etc/glance/glance-api.conf db_sync
diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml
index 3294fc0f..f5548982 100644
--- a/puppet/services/glance-base.yaml
+++ b/puppet/services/glance-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Glance Common settings with Puppet
@@ -44,6 +44,21 @@ parameters:
type: string
constraints:
- allowed_values: ['swift', 'file', 'rbd']
+ GlanceNfsEnabled:
+ default: false
+ description: >
+ When using GlanceBackend 'file', mount NFS share for image storage.
+ type: boolean
+ GlanceNfsShare:
+ default: ''
+ description: >
+ NFS share to mount for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceNfsOptions:
+ default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
+ description: >
+ NFS mount options for image storage (when GlanceNfsEnabled is true)
+ type: string
GlanceRbdPoolName:
default: images
type: string
@@ -90,8 +105,9 @@ outputs:
glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
glance::notify::rabbitmq::notification_driver: messagingv2
- glance::registry::db::database_db_max_retries: -1
- glance::registry::db::database_max_retries: -1
+ tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
+ tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
+ tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
service_config_settings:
keystone:
glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml
deleted file mode 100644
index c45582d4..00000000
--- a/puppet/services/glance-registry.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Glance Registry service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- GlancePassword:
- description: The password for the glance service and db account, used by the glance services.
- type: string
- hidden: true
- GlanceWorkers:
- default: ''
- description: |
- Number of worker processes for glance registry. If left unset (empty
- string), the default value will result in the configuration being left
- unset and a system-dependent default value will be chosen (e.g.: number of
- processors). Please note that this will create a large number of processes
- on systems with a large number of CPUs resulting in excess memory
- consumption. It is recommended that a suitable non-default value be
- selected on such systems.
- type: string
- MonitoringSubscriptionGlanceRegistry:
- default: 'overcloud-glance-registry'
- type: string
- GlanceRegistryLoggingSource:
- type: json
- default:
- tag: openstack.glance.registry
- path: /var/log/glance/registry.log
-
-resources:
- GlanceBase:
- type: ./glance-base.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Glance Registry role.
- value:
- service_name: glance_registry
- monitoring_subscription: {get_param: MonitoringSubscriptionGlanceRegistry}
- logging_source: {get_param: GlanceRegistryLoggingSource}
- logging_groups:
- - glance
- config_settings:
- map_merge:
- - get_attr: [GlanceBase, role_data, config_settings]
-
- - glance::registry::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://glance:'
- - {get_param: GlancePassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/glance'
- glance::registry::authtoken::password: {get_param: GlancePassword}
- glance::registry::authtoken::project_name: 'service'
- glance::registry::pipeline: 'keystone'
- glance::registry::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- glance::registry::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
- glance::registry::debug: {get_param: Debug}
- glance::registry::workers: {get_param: GlanceWorkers}
- tripleo.glance_registry.firewall_rules:
- '112 glance_registry':
- dport:
- - 9191
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- glance::registry::bind_host: {get_param: [ServiceNetMap, GlanceRegistryNetwork]}
- step_config: |
- include ::tripleo::profile::base::glance::registry
- service_config_settings:
- get_attr: [GlanceBase, role_data, config_settings]
diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml
index e3397769..2aea29fd 100644
--- a/puppet/services/gnocchi-api.yaml
+++ b/puppet/services/gnocchi-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Gnocchi service configured with Puppet
@@ -41,6 +41,9 @@ parameters:
default:
tag: openstack.gnocchi.api
path: /var/log/gnocchi/app.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
@@ -57,6 +60,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
role_data:
@@ -83,11 +87,11 @@ outputs:
gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
gnocchi::keystone::authtoken::project_name: 'service'
- gnocchi::wsgi::apache::ssl: false
+ gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS}
gnocchi::wsgi::apache::servername:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
tripleo::profile::base::gnocchi::api::gnocchi_backend: {get_param: GnocchiBackend}
@@ -98,7 +102,13 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
- gnocchi::api::host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
+ gnocchi::wsgi::apache::wsgi_process_display_name: 'gnocchi_wsgi'
+ gnocchi::api::host:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
@@ -121,3 +131,15 @@ outputs:
gnocchi::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-gnocchi-api is running"
+ shell: /usr/bin/systemctl show 'openstack-gnocchi-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop gnocchi_api service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped
+ - name: Run gnocchi upgrade
+ tags: step5
+ command: gnocchi-upgrade
diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml
index 556baae0..8fddae4b 100644
--- a/puppet/services/gnocchi-base.yaml
+++ b/puppet/services/gnocchi-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Gnocchi service configured with Puppet
@@ -67,7 +67,9 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/gnocchi'
- gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ gnocchi::db::sync::extra_opts: '--skip-storage'
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 2
gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword}
diff --git a/puppet/services/gnocchi-metricd.yaml b/puppet/services/gnocchi-metricd.yaml
index 1400bc98..1337b0cb 100644
--- a/puppet/services/gnocchi-metricd.yaml
+++ b/puppet/services/gnocchi-metricd.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Gnocchi service configured with Puppet
@@ -22,7 +22,7 @@ parameters:
default: 'overcloud-gnocchi-metricd'
type: string
GnocchiMetricdWorkers:
- default: ''
+ default: '%{::os_workers}'
description: Number of workers for Gnocchi MetricD
type: string
@@ -46,3 +46,10 @@ outputs:
- gnocchi::metricd::workers: {get_param: GnocchiMetricdWorkers}
step_config: |
include ::tripleo::profile::base::gnocchi::metricd
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-gnocchi-metricd is running"
+ shell: /usr/bin/systemctl show 'openstack-gnocchi-metricd' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop gnocchi_metricd service
+ tags: step2
+ service: name=openstack-gnocchi-metricd state=stopped
diff --git a/puppet/services/gnocchi-statsd.yaml b/puppet/services/gnocchi-statsd.yaml
index 04339f46..41222a79 100644
--- a/puppet/services/gnocchi-statsd.yaml
+++ b/puppet/services/gnocchi-statsd.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Gnocchi service configured with Puppet
@@ -39,5 +39,16 @@ outputs:
config_settings:
map_merge:
- get_attr: [GnocchiServiceBase, role_data, config_settings]
+ - tripleo.gnocchi_statsd.firewall_rules:
+ '140 gnocchi-statsd':
+ dport: 8125
+ proto: 'udp'
step_config: |
include ::tripleo::profile::base::gnocchi::statsd
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-gnocchi-statsd is running"
+ shell: /usr/bin/systemctl show 'openstack-gnocchi-statsd' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop gnocchi_statsd service
+ tags: step2
+ service: name=openstack-gnocchi-statsd state=stopped
diff --git a/puppet/services/haproxy-internal-tls-certmonger.yaml b/puppet/services/haproxy-internal-tls-certmonger.yaml
index c6d53542..ae226163 100644
--- a/puppet/services/haproxy-internal-tls-certmonger.yaml
+++ b/puppet/services/haproxy-internal-tls-certmonger.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
HAProxy deployment with TLS enabled, powered by certmonger
@@ -19,6 +19,22 @@ parameters:
via parameter_defaults in the resource registry.
type: json
+resources:
+
+ HAProxyNetworks:
+ type: OS::Heat::Value
+ properties:
+ value:
+ # NOTE(jaosorior) Get unique network names to create
+ # certificates for those. We skip the tenant network since
+ # we don't need a certificate for that, and the external
+ # network will be handled in another template.
+ yaql:
+ expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
+ data:
+ map:
+ get_param: ServiceNetMap
+
outputs:
role_data:
description: Role data for the HAProxy internal TLS via certmonger role.
@@ -39,13 +55,12 @@ outputs:
postsave_cmd: "" # TODO
principal: "haproxy/%{hiera('cloud_name_NETWORK')}"
for_each:
- NETWORK:
- # NOTE(jaosorior) Get unique network names to create
- # certificates for those. We skip the tenant network since
- # we don't need a certificate for that, and the external
- # network will be handled in another template.
- yaql:
- expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
- data:
- map:
- get_param: ServiceNetMap
+ NETWORK: {get_attr: [HAProxyNetworks, value]}
+ metadata_settings:
+ repeat:
+ template:
+ - service: haproxy
+ network: $NETWORK
+ type: vip
+ for_each:
+ $NETWORK: {get_attr: [HAProxyNetworks, value]}
diff --git a/puppet/services/haproxy-public-tls-certmonger.yaml b/puppet/services/haproxy-public-tls-certmonger.yaml
index 1551d16a..6013b026 100644
--- a/puppet/services/haproxy-public-tls-certmonger.yaml
+++ b/puppet/services/haproxy-public-tls-certmonger.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
HAProxy deployment with TLS enabled, powered by certmonger
@@ -35,3 +35,7 @@ outputs:
hostname: "%{hiera('cloud_name_external')}"
postsave_cmd: "" # TODO
principal: "haproxy/%{hiera('cloud_name_external')}"
+ metadata_settings:
+ - service: haproxy
+ network: external
+ type: vip
diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml
index 0813cb7e..358698dd 100644
--- a/puppet/services/haproxy.yaml
+++ b/puppet/services/haproxy.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
HAproxy service configured with Puppet
@@ -34,16 +34,6 @@ parameters:
description: The password for Redis
type: string
hidden: true
- ControlVirtualInterface:
- default: 'br-ex'
- description: Interface where virtual ip will be assigned.
- type: string
- PublicVirtualInterface:
- default: 'br-ex'
- description: >
- Specifies the interface where the public-facing virtual ip will be assigned.
- This should be int_public when a VLAN is being used.
- type: string
MonitoringSubscriptionHaproxy:
default: 'overcloud-haproxy'
type: string
@@ -81,11 +71,25 @@ outputs:
tripleo::haproxy::haproxy_stats_user: {get_param: HAProxyStatsUser}
tripleo::haproxy::haproxy_stats_password: {get_param: HAProxyStatsPassword}
tripleo::haproxy::redis_password: {get_param: RedisPassword}
- tripleo::haproxy::control_virtual_interface: {get_param: ControlVirtualInterface}
- tripleo::haproxy::public_virtual_interface: {get_param: PublicVirtualInterface}
tripleo::profile::base::haproxy::certificates_specs:
map_merge:
- get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
- get_attr: [HAProxyInternalTLS, role_data, certificates_specs]
step_config: |
include ::tripleo::profile::base::haproxy
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service haproxy is running"
+ shell: /usr/bin/systemctl show 'haproxy' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop haproxy service
+ tags: step1
+ service: name=haproxy state=stopped
+ - name: Start haproxy service
+ tags: step4 # Needed at step 4 for mysql
+ service: name=haproxy state=started
+ metadata_settings:
+ yaql:
+ expression: '[].concat(coalesce($.data.internal, []), coalesce($.data.public, []))'
+ data:
+ public: {get_attr: [HAProxyPublicTLS, role_data, metadata_settings]}
+ internal: {get_attr: [HAProxyInternalTLS, role_data, metadata_settings]}
diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml
index 1a86ec71..7bd2fcf1 100644
--- a/puppet/services/heat-api-cfn.yaml
+++ b/puppet/services/heat-api-cfn.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Heat CloudFormation API service configured with Puppet
@@ -76,9 +76,18 @@ outputs:
include ::tripleo::profile::base::heat::api_cfn
service_config_settings:
keystone:
- heat::keystone::auth_cfn::tenant: 'service'
- heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
- heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
- heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
- heat::keystone::auth_cfn::password: {get_param: HeatPassword}
- heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
+ map_merge:
+ - get_attr: [HeatBase, role_data, service_config_settings, keystone]
+ - heat::keystone::auth_cfn::tenant: 'service'
+ heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
+ heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
+ heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
+ heat::keystone::auth_cfn::password: {get_param: HeatPassword}
+ heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-heat-api-cfn is running"
+ shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop heat_api_cfn service
+ tags: step2
+ service: name=openstack-heat-api-cfn state=stopped
diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml
index 6dfeaaf3..0954ad19 100644
--- a/puppet/services/heat-api-cloudwatch.yaml
+++ b/puppet/services/heat-api-cloudwatch.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Heat CloudWatch API service configured with Puppet
@@ -66,3 +66,10 @@ outputs:
heat::api_cloudwatch::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
step_config: |
include ::tripleo::profile::base::heat::api_cloudwatch
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-heat-api-cloudwatch is running"
+ shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop heat_api_cloudwatch service
+ tags: step2
+ service: name=openstack-heat-api-cloudwatch state=stopped
diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml
index 2ea96fc0..ae656b1e 100644
--- a/puppet/services/heat-api.yaml
+++ b/puppet/services/heat-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Heat API service configured with Puppet
@@ -76,9 +76,18 @@ outputs:
include ::tripleo::profile::base::heat::api
service_config_settings:
keystone:
- heat::keystone::auth::tenant: 'service'
- heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
- heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
- heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
- heat::keystone::auth::password: {get_param: HeatPassword}
- heat::keystone::auth::region: {get_param: KeystoneRegion}
+ map_merge:
+ - get_attr: [HeatBase, role_data, service_config_settings, keystone]
+ - heat::keystone::auth::tenant: 'service'
+ heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
+ heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
+ heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
+ heat::keystone::auth::password: {get_param: HeatPassword}
+ heat::keystone::auth::region: {get_param: KeystoneRegion}
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-heat-api is running"
+ shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop heat_api service
+ tags: step2
+ service: name=openstack-heat-api state=stopped
diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml
index 7eb58f56..90943751 100644
--- a/puppet/services/heat-base.yaml
+++ b/puppet/services/heat-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Heat base service. Shared for all Heat services.
@@ -44,6 +44,61 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ HeatCronPurgeDeletedEnsure:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Ensure
+ default: 'present'
+ HeatCronPurgeDeletedMinute:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Minute
+ default: '1'
+ HeatCronPurgeDeletedHour:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Hour
+ default: '0'
+ HeatCronPurgeDeletedMonthday:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Month Day
+ default: '*'
+ HeatCronPurgeDeletedMonth:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Month
+ default: '*'
+ HeatCronPurgeDeletedWeekday:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Week Day
+ default: '*'
+ HeatCronPurgeDeletedMaxDelay:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Max Delay
+ default: '3600'
+ HeatCronPurgeDeletedUser:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - User
+ default: 'heat'
+ HeatCronPurgeDeletedAge:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Age
+ default: '30'
+ HeatCronPurgeDeletedAgeType:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Age type
+ default: 'days'
+ HeatCronPurgeDeletedDestination:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Log destination
+ default: '/dev/null'
outputs:
role_data:
@@ -57,6 +112,7 @@ outputs:
heat::rabbit_port: {get_param: RabbitClientPort}
heat::debug: {get_param: Debug}
heat::enable_proxy_headers_parsing: true
+ heat::rpc_response_timeout: 600
# We need this because the default heat policy.json no longer works on TripleO
# https://git.openstack.org/cgit/openstack/heat/commit/?id=ac86702172ddf01f5bdc3f3cd99d2e32ad9b7024
heat::policy::policies:
@@ -71,9 +127,23 @@ outputs:
heat::keystone::domain::domain_name: 'heat_stack'
heat::keystone::domain::domain_admin: 'heat_stack_domain_admin'
heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost'
- heat::cron::purge_deleted::age: 30
- heat::cron::purge_deleted::age_type: 'days'
- heat::cron::purge_deleted::maxdelay: 3600
- heat::cron::purge_deleted::destination: '/dev/null'
heat::db::database_db_max_retries: -1
heat::db::database_max_retries: -1
+ heat::yaql_memory_quota: 100000
+ heat::yaql_limit_iterators: 1000
+ heat::cron::purge_deleted::ensure: {get_param: HeatCronPurgeDeletedEnsure}
+ heat::cron::purge_deleted::minute: {get_param: HeatCronPurgeDeletedMinute}
+ heat::cron::purge_deleted::hour: {get_param: HeatCronPurgeDeletedHour}
+ heat::cron::purge_deleted::monthday: {get_param: HeatCronPurgeDeletedMonthday}
+ heat::cron::purge_deleted::month: {get_param: HeatCronPurgeDeletedMonth}
+ heat::cron::purge_deleted::weekday: {get_param: HeatCronPurgeDeletedWeekday}
+ heat::cron::purge_deleted::maxdelay: {get_param: HeatCronPurgeDeletedMaxDelay}
+ heat::cron::purge_deleted::user: {get_param: HeatCronPurgeDeletedUser}
+ heat::cron::purge_deleted::age: {get_param: HeatCronPurgeDeletedAge}
+ heat::cron::purge_deleted::age_type: {get_param: HeatCronPurgeDeletedAgeType}
+ heat::cron::purge_deleted::destination: {get_param: HeatCronPurgeDeletedDestination}
+ service_config_settings:
+ keystone:
+ tripleo::profile::base::keystone::heat_admin_domain: 'heat_stack'
+ tripleo::profile::base::keystone::heat_admin_user: 'heat_stack_domain_admin'
+ tripleo::profile::base::keystone::heat_admin_email: 'heat_stack_domain_admin@localhost'
diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml
index 20415eef..3b73eb88 100644
--- a/puppet/services/heat-engine.yaml
+++ b/puppet/services/heat-engine.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Openstack Heat Engine service configured with Puppet
@@ -48,6 +48,15 @@ parameters:
default:
tag: openstack.heat.engine
path: /var/log/heat/heat-engine.log
+ HeatConvergenceEngine:
+ type: boolean
+ default: true
+ description: Enables the heat engine with the convergence architecture.
+ HeatMaxResourcesPerStack:
+ type: number
+ default: 1000
+ description: Maximum resources allowed per top-level stack. -1 stands for unlimited.
+
resources:
HeatBase:
@@ -72,6 +81,26 @@ outputs:
- heat::engine::num_engine_workers: {get_param: HeatWorkers}
heat::engine::configure_delegated_roles: false
heat::engine::trusts_delegated_roles: []
+ heat::engine::max_nested_stack_depth: 6
+ heat::engine::max_resources_per_stack: {get_param: HeatMaxResourcesPerStack}
+ heat::engine::heat_metadata_server_url:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, HeatCfnPublic, protocol]}
+ - '://'
+ - {get_param: [EndpointMap, HeatCfnPublic, host]}
+ - ':'
+ - {get_param: [EndpointMap, HeatCfnPublic, port]}
+ heat::engine::heat_waitcondition_server_url:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, HeatCfnPublic, protocol]}
+ - '://'
+ - {get_param: [EndpointMap, HeatCfnPublic, host]}
+ - ':'
+ - {get_param: [EndpointMap, HeatCfnPublic, port]}
+ - '/v1/waitcondition'
+ heat::engine::convergence_engine: {get_param: HeatConvergenceEngine}
tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge}
heat::database_connection:
list_join:
@@ -82,6 +111,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/heat'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
heat::engine::auth_encryption_key:
@@ -105,4 +136,14 @@ outputs:
- "%{hiera('mysql_bind_host')}"
keystone:
# This is needed because the keystone profile handles creating the domain
- heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
+ tripleo::profile::base::keystone::heat_admin_password: {get_param: HeatStackDomainAdminPassword}
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-heat-engine is running"
+ shell: echo o/ #TODO uncomment when /#/c/423302/ : /usr/bin/systemctl show 'openstack-heat-engine' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop heat_engine service
+ tags: step2
+ service: name=openstack-heat-engine state=stopped
+ - name: Sync heat_engine DB
+ tags: step5
+ command: heat-manage --config-file /etc/heat/heat.conf db_sync
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index 1e08415c..2111021b 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Horizon service configured with Puppet
@@ -24,19 +24,22 @@ parameters:
type: json
HorizonAllowedHosts:
default: '*'
- description: A list of IP/Hostname for the server Horizonis running on.
+ description: A list of IP/Hostname for the server Horizon is running on.
Used for header checks.
type: comma_delimited_list
+ HorizonPasswordValidator:
+ description: Regex for password validation
+ type: string
+ default: ''
+ HorizonPasswordValidatorHelp:
+ description: Help text for password validation
+ type: string
+ default: ''
HorizonSecret:
description: Secret key for Django
type: string
hidden: true
default: ''
- NeutronMechanismDrivers:
- default: 'openvswitch'
- description: |
- The mechanism drivers for the Neutron tenant network.
- type: comma_delimited_list
MemcachedIPv6:
default: false
description: Enable IPv6 features in Memcached.
@@ -45,6 +48,10 @@ parameters:
default: 'overcloud-horizon'
type: string
+conditions:
+
+ debug_empty: {equals : [{get_param: Debug}, '']}
+
outputs:
role_data:
description: Role data for the Horizon role.
@@ -52,33 +59,40 @@ outputs:
service_name: horizon
monitoring_subscription: {get_param: MonitoringSubscriptionHorizon}
config_settings:
- horizon::allowed_hosts: {get_param: HorizonAllowedHosts}
- neutron::plugins::ml2::mechanism_drivers:
- str_replace:
- template: MECHANISMS
- params:
- MECHANISMS: {get_param: NeutronMechanismDrivers}
- tripleo.horizon.firewall_rules:
- '126 horizon':
- dport:
- - 80
- - 443
- horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
- horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
- horizon::vhost_extra_params:
- add_listen: false
- priority: 10
- access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
- horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
- horizon::django_debug: {get_param: Debug}
- horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
- horizon::secret_key:
- yaql:
- expression: $.data.passwords.where($ != '').first()
- data:
- passwords:
- - {get_param: HorizonSecret}
- - {get_param: [DefaultPasswords, horizon_secret]}
- memcached_ipv6: {get_param: MemcachedIPv6}
+ map_merge:
+ - horizon::allowed_hosts: {get_param: HorizonAllowedHosts}
+ tripleo.horizon.firewall_rules:
+ '126 horizon':
+ dport:
+ - 80
+ - 443
+ horizon::enable_secure_proxy_ssl_header: true
+ horizon::disable_password_reveal: true
+ horizon::enforce_password_check: true
+ horizon::disallow_iframe_embed: true
+ horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
+ horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
+ horizon::vhost_extra_params:
+ add_listen: false
+ priority: 10
+ access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
+ options: ['FollowSymLinks','MultiViews']
+ horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
+ horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ horizon::password_validator: {get_param: [HorizonPasswordValidator]}
+ horizon::password_validator_help: {get_param: [HorizonPasswordValidatorHelp]}
+ horizon::secret_key:
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: HorizonSecret}
+ - {get_param: [DefaultPasswords, horizon_secret]}
+ memcached_ipv6: {get_param: MemcachedIPv6}
+ -
+ if:
+ - debug_empty
+ - {}
+ - horizon::django_debug: {get_param: Debug}
step_config: |
include ::tripleo::profile::base::horizon
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
index c8a2e833..ff91eb63 100644
--- a/puppet/services/ironic-api.yaml
+++ b/puppet/services/ironic-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ironic API configured with Puppet
@@ -81,3 +81,7 @@ outputs:
ironic::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ upgrade_tasks:
+ - name: Stop ironic_api service
+ tags: step2
+ service: name=openstack-ironic-api state=stopped
diff --git a/puppet/services/ironic-base.yaml b/puppet/services/ironic-base.yaml
index 0ff393c6..ad7ef6ea 100644
--- a/puppet/services/ironic-base.yaml
+++ b/puppet/services/ironic-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ironic services configured with Puppet
@@ -60,6 +60,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ironic'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
ironic::debug: {get_param: Debug}
ironic::rabbit_userid: {get_param: RabbitUserName}
ironic::rabbit_password: {get_param: RabbitPassword}
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index 4ac9fc30..a10c03a5 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ironic conductor configured with Puppet
@@ -68,7 +68,7 @@ outputs:
list_join:
- ''
- - 'http://'
- - '%{hiera("ironic_conductor_http_host")}:'
+ - "%{hiera('ironic_conductor_http_host')}:"
- {get_param: IronicIPXEPort}
ironic::drivers::pxe::ipxe_enabled: {get_param: IronicIPXEEnabled}
ironic::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
@@ -98,3 +98,10 @@ outputs:
step_config: |
include ::tripleo::profile::base::ironic::conductor
+ upgrade_tasks:
+ - name: Stop ironic_conductor service
+ tags: step2
+ service: name=openstack-ironic-conductor state=stopped
+ - name: Sync ironic_conductor DB
+ tags: step5
+ command: ironic-dbsync
diff --git a/puppet/services/keepalived.yaml b/puppet/services/keepalived.yaml
index 38cfbe22..38f9f3be 100644
--- a/puppet/services/keepalived.yaml
+++ b/puppet/services/keepalived.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Keepalived service configured with Puppet
@@ -19,19 +19,28 @@ parameters:
via parameter_defaults in the resource registry.
type: json
ControlVirtualInterface:
- default: 'br-ex'
- description: Interface where virtual ip will be assigned.
+ default: ''
+ description: >
+ Interface where virtual ip will be assigned. This value will be
+ automatically set by the deployment tool. Overriding here will
+ override automatic setting.
type: string
PublicVirtualInterface:
- default: 'br-ex'
+ default: ''
description: >
- Specifies the interface where the public-facing virtual ip will be assigned.
- This should be int_public when a VLAN is being used.
+ Interface where virtual ip will be assigned. This value will be
+ automatically set by the deployment tool. Overriding here will
+ override automatic setting.
type: string
MonitoringSubscriptionKeepalived:
default: 'overcloud-keepalived'
type: string
+conditions:
+
+ control_iface_empty: {equals : [{get_param: ControlVirtualInterface}, '']}
+ public_iface_empty: {equals : [{get_param: PublicVirtualInterface}, '']}
+
outputs:
role_data:
description: Role data for the Keepalived role.
@@ -39,10 +48,27 @@ outputs:
service_name: keepalived
monitoring_subscription: {get_param: MonitoringSubscriptionKeepalived}
config_settings:
- tripleo::keepalived::control_virtual_interface: {get_param: ControlVirtualInterface}
- tripleo::keepalived::public_virtual_interface: {get_param: PublicVirtualInterface}
- tripleo.keepalived.firewall_rules:
- '106 keepalived vrrp':
- proto: vrrp
+ map_merge:
+ - tripleo.keepalived.firewall_rules:
+ '106 keepalived vrrp':
+ proto: vrrp
+ -
+ if:
+ - control_iface_empty
+ - {}
+ - tripleo::keepalived::control_virtual_interface: {get_param: ControlVirtualInterface}
+ -
+ if:
+ - public_iface_empty
+ - {}
+ - tripleo::keepalived::public_virtual_interface: {get_param: PublicVirtualInterface}
step_config: |
include ::tripleo::profile::base::keepalived
+ upgrade_tasks:
+ - name: Stop keepalived service
+ tags: step1
+ service: name=keepalived state=stopped
+ - name: Start keepalived service
+ tags: step4 # Needed at step 4 for mysql
+ service: name=keepalived state=started
+
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
index 1fc88bf1..29157959 100644
--- a/puppet/services/kernel.yaml
+++ b/puppet/services/kernel.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Load kernel modules with kmod and configure kernel options with sysctl.
@@ -18,6 +18,10 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ KernelPidMax:
+ default: 1048576
+ description: Configures sysctl kernel.pid_max key
+ type: number
outputs:
role_data:
@@ -49,5 +53,7 @@ outputs:
value: 0
net.core.netdev_max_backlog:
value: 10000
+ kernel.pid_max:
+ value: {get_param: KernelPidMax}
step_config: |
include ::tripleo::profile::base::kernel
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index 1f83b680..b989d502 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Keystone service configured with Puppet
@@ -32,6 +32,12 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ KeystoneTokenProvider:
+ description: The keystone token format
+ type: string
+ default: 'uuid'
+ constraints:
+ - allowed_values: ['uuid', 'fernet']
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -83,9 +89,9 @@ parameters:
KeystoneWorkers:
type: string
description: Set the number of workers for keystone::wsgi::apache
- default: '"%{::os_workers}"'
+ default: '%{::os_workers}'
MonitoringSubscriptionKeystone:
- default: 'overcloud-kestone'
+ default: 'overcloud-keystone'
type: string
KeystoneCredential0:
type: string
@@ -93,6 +99,12 @@ parameters:
KeystoneCredential1:
type: string
description: The second Keystone credential key. Must be a valid key.
+ KeystoneFernetKey0:
+ type: string
+ description: The first Keystone fernet key. Must be a valid key.
+ KeystoneFernetKey1:
+ type: string
+ description: The second Keystone fernet key. Must be a valid key.
KeystoneLoggingSource:
type: json
default:
@@ -101,6 +113,51 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ KeystoneCronTokenFlushEnsure:
+ type: string
+ description: >
+ Cron to purge expired tokens - Ensure
+ default: 'present'
+ KeystoneCronTokenFlushMinute:
+ type: string
+ description: >
+ Cron to purge expired tokens - Minute
+ default: '1'
+ KeystoneCronTokenFlushHour:
+ type: string
+ description: >
+ Cron to purge expired tokens - Hour
+ default: '0'
+ KeystoneCronTokenFlushMonthday:
+ type: string
+ description: >
+ Cron to purge expired tokens - Month Day
+ default: '*'
+ KeystoneCronTokenFlushMonth:
+ type: string
+ description: >
+ Cron to purge expired tokens - Month
+ default: '*'
+ KeystoneCronTokenFlushWeekday:
+ type: string
+ description: >
+ Cron to purge expired tokens - Week Day
+ default: '*'
+ KeystoneCronTokenFlushMaxDelay:
+ type: string
+ description: >
+ Cron to purge expired tokens - Max Delay
+ default: '0'
+ KeystoneCronTokenFlushDestination:
+ type: string
+ description: >
+ Cron to purge expired tokens - Log destination
+ default: '/var/log/keystone/keystone-tokenflush.log'
+ KeystoneCronTokenFlushUser:
+ type: string
+ description: >
+ Cron to purge expired tokens - User
+ default: 'keystone'
resources:
@@ -112,6 +169,9 @@ resources:
EndpointMap: {get_param: EndpointMap}
EnableInternalTLS: {get_param: EnableInternalTLS}
+conditions:
+ keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]}
+
outputs:
role_data:
description: Role data for the Keystone role.
@@ -133,11 +193,15 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/keystone'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
keystone::admin_token: {get_param: AdminToken}
keystone::admin_password: {get_param: AdminPassword}
keystone::roles::admin::password: {get_param: AdminPassword}
keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
+ keystone::token_provider: {get_param: KeystoneTokenProvider}
+ keystone::enable_fernet_setup: {if: [keystone_fernet_tokens, true, false]}
keystone::enable_proxy_headers_parsing: true
keystone::enable_credential_setup: true
keystone::credential_keys:
@@ -145,6 +209,11 @@ outputs:
content: {get_param: KeystoneCredential0}
'/etc/keystone/credential-keys/1':
content: {get_param: KeystoneCredential1}
+ keystone::fernet_keys:
+ '/etc/keystone/fernet-keys/0':
+ content: {get_param: KeystoneFernetKey0}
+ '/etc/keystone/fernet-keys/1':
+ content: {get_param: KeystoneFernetKey1}
keystone::debug: {get_param: Debug}
keystone::rabbit_userid: {get_param: RabbitUserName}
keystone::rabbit_password: {get_param: RabbitPassword}
@@ -173,13 +242,13 @@ outputs:
keystone::wsgi::apache::servername:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}
keystone::wsgi::apache::servername_admin:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
keystone::wsgi::apache::workers: {get_param: KeystoneWorkers}
@@ -197,13 +266,13 @@ outputs:
keystone::admin_bind_host:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
keystone::public_bind_host:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}
# NOTE: bind IP is found in Heat replacing the network name with the
@@ -215,6 +284,16 @@ outputs:
# NOTE: this applies to all 2 bind IP settings below...
keystone::wsgi::apache::bind_host: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}
keystone::wsgi::apache::admin_bind_host: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
+ keystone::cron::token_flush::ensure: {get_param: KeystoneCronTokenFlushEnsure}
+ keystone::cron::token_flush::minute: {get_param: KeystoneCronTokenFlushMinute}
+ keystone::cron::token_flush::hour: {get_param: KeystoneCronTokenFlushHour}
+ keystone::cron::token_flush::monthday: {get_param: KeystoneCronTokenFlushMonthday}
+ keystone::cron::token_flush::month: {get_param: KeystoneCronTokenFlushMonth}
+ keystone::cron::token_flush::weekday: {get_param: KeystoneCronTokenFlushWeekday}
+ keystone::cron::token_flush::maxdelay: {get_param: KeystoneCronTokenFlushMaxDelay}
+ keystone::cron::token_flush::destination: {get_param: KeystoneCronTokenFlushDestination}
+ keystone::cron::token_flush::user: {get_param: KeystoneCronTokenFlushUser}
+
step_config: |
include ::tripleo::profile::base::keystone
service_config_settings:
@@ -226,3 +305,16 @@ outputs:
keystone::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ # Ansible tasks to handle upgrade
+ upgrade_tasks:
+ - name: Stop keystone service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped
+ - name: Sync keystone DB
+ tags: step5
+ command: keystone-manage db_sync
+ - name: Start keystone service (running under httpd)
+ tags: step6
+ service: name=httpd state=started
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
diff --git a/puppet/services/logging/fluentd-base.yaml b/puppet/services/logging/fluentd-base.yaml
index c8f67556..65ad80ed 100644
--- a/puppet/services/logging/fluentd-base.yaml
+++ b/puppet/services/logging/fluentd-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: Fluentd base service
diff --git a/puppet/services/logging/fluentd-client.yaml b/puppet/services/logging/fluentd-client.yaml
index 3ae7110f..769ab68f 100644
--- a/puppet/services/logging/fluentd-client.yaml
+++ b/puppet/services/logging/fluentd-client.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: Fluentd client configured with Puppet
diff --git a/puppet/services/logging/fluentd-config.yaml b/puppet/services/logging/fluentd-config.yaml
index 58b423fd..68f98aff 100644
--- a/puppet/services/logging/fluentd-config.yaml
+++ b/puppet/services/logging/fluentd-config.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: Fluentd logging configuration
diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml
index 4d3fd47c..f1cddbd0 100644
--- a/puppet/services/manila-api.yaml
+++ b/puppet/services/manila-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Manila-api service configured with Puppet
@@ -51,6 +51,11 @@ outputs:
manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
manila::keystone::authtoken::project_name: 'service'
+ tripleo.manila_api.firewall_rules:
+ '150 manila':
+ dport:
+ - 8786
+ - 13786
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
# (eg. for internal_api):
@@ -59,24 +64,19 @@ outputs:
# internal_api_subnet - > IP/CIDR
manila::api::bind_host: {get_param: [ServiceNetMap, ManilaApiNetwork]}
manila::api::enable_proxy_headers_parsing: true
+ manila::api::default_share_type: 'default'
step_config: |
include ::tripleo::profile::base::manila::api
service_config_settings:
- keystone:
- manila::keystone::auth::tenant: 'service'
- manila::keystone::auth::public_url: {get_param: [EndpointMap, ManilaV1Public, uri]}
- manila::keystone::auth::internal_url: {get_param: [EndpointMap, ManilaV1Internal, uri]}
- manila::keystone::auth::admin_url: {get_param: [EndpointMap, ManilaV1Admin, uri]}
- manila::keystone::auth::public_url_v2: {get_param: [EndpointMap, ManilaPublic, uri]}
- manila::keystone::auth::internal_url_v2: {get_param: [EndpointMap, ManilaInternal, uri]}
- manila::keystone::auth::admin_url_v2: {get_param: [EndpointMap, ManilaAdmin, uri]}
- manila::keystone::auth::password: {get_param: ManilaPassword}
- manila::keystone::auth::region: {get_param: KeystoneRegion}
- mysql:
- manila::db::mysql::password: {get_param: ManilaPassword}
- manila::db::mysql::user: manila
- manila::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- manila::db::mysql::dbname: manila
- manila::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
+ map_merge:
+ - get_attr: [ManilaBase, role_data, service_config_settings]
+ - keystone:
+ manila::keystone::auth::tenant: 'service'
+ manila::keystone::auth::public_url: {get_param: [EndpointMap, ManilaV1Public, uri]}
+ manila::keystone::auth::internal_url: {get_param: [EndpointMap, ManilaV1Internal, uri]}
+ manila::keystone::auth::admin_url: {get_param: [EndpointMap, ManilaV1Admin, uri]}
+ manila::keystone::auth::public_url_v2: {get_param: [EndpointMap, ManilaPublic, uri]}
+ manila::keystone::auth::internal_url_v2: {get_param: [EndpointMap, ManilaInternal, uri]}
+ manila::keystone::auth::admin_url_v2: {get_param: [EndpointMap, ManilaAdmin, uri]}
+ manila::keystone::auth::password: {get_param: ManilaPassword}
+ manila::keystone::auth::region: {get_param: KeystoneRegion}
diff --git a/puppet/services/manila-backend-cephfs.yaml b/puppet/services/manila-backend-cephfs.yaml
index 0fc39e2a..36ef1ea9 100644
--- a/puppet/services/manila-backend-cephfs.yaml
+++ b/puppet/services/manila-backend-cephfs.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Manila Cephfs backend
@@ -40,6 +40,20 @@ parameters:
ManilaCephFSNativeCephFSEnableSnapshots:
type: boolean
default: true
+ ManilaCephFSDataPoolName:
+ default: manila_data
+ type: string
+ ManilaCephFSMetadataPoolName:
+ default: manila_metadata
+ type: string
+ # (jprovazn) default value is set to assure this templates works with an
+ # external ceph too (user/key is created only when ceph is deployed by
+ # TripleO)
+ CephManilaClientKey:
+ default: ''
+ description: The Ceph client key. Can be created with ceph-authtool --gen-print-key.
+ type: string
+ hidden: true
outputs:
role_data:
@@ -54,4 +68,8 @@ outputs:
manila::backend::cephfsnative::cephfs_auth_id: {get_param: ManilaCephFSNativeCephFSAuthId}
manila::backend::cephfsnative::cephfs_cluster_name: {get_param: ManilaCephFSNativeCephFSClusterName}
manila::backend::cephfsnative::cephfs_enable_snapshots: {get_param: ManilaCephFSNativeCephFSEnableSnapshots}
+ manila::backend::cephfsnative::ceph_client_key: {get_param: CephManilaClientKey}
+ ceph::profile::params::fs_data_pool: {get_param: ManilaCephFSDataPoolName}
+ ceph::profile::params::fs_metadata_pool: {get_param: ManilaCephFSMetadataPoolName}
+ ceph::profile::params::fs_name: {get_param: ManilaCephFSNativeShareBackendName}
step_config:
diff --git a/puppet/services/manila-backend-generic.yaml b/puppet/services/manila-backend-generic.yaml
index c527666e..23831a6a 100644
--- a/puppet/services/manila-backend-generic.yaml
+++ b/puppet/services/manila-backend-generic.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Manila generic backend.
diff --git a/puppet/services/manila-backend-netapp.yaml b/puppet/services/manila-backend-netapp.yaml
index e6d2f250..1f6fcf4f 100644
--- a/puppet/services/manila-backend-netapp.yaml
+++ b/puppet/services/manila-backend-netapp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Manila netapp backend.
diff --git a/puppet/services/manila-base.yaml b/puppet/services/manila-base.yaml
index d228577a..2a9745a2 100644
--- a/puppet/services/manila-base.yaml
+++ b/puppet/services/manila-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Manila base service. Shared by manila-api/scheduler/share services
@@ -40,6 +40,10 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ ManilaPassword:
+ description: The password for the manila service account.
+ type: string
+ hidden: true
outputs:
role_data:
@@ -54,3 +58,23 @@ outputs:
manila::debug: {get_param: Debug}
manila::db::database_db_max_retries: -1
manila::db::database_max_retries: -1
+ manila::sql_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://manila:'
+ - {get_param: ManilaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/manila'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ service_config_settings:
+ mysql:
+ manila::db::mysql::password: {get_param: ManilaPassword}
+ manila::db::mysql::user: manila
+ manila::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ manila::db::mysql::dbname: manila
+ manila::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/manila-scheduler.yaml b/puppet/services/manila-scheduler.yaml
index 474cc24f..c8114f2b 100644
--- a/puppet/services/manila-scheduler.yaml
+++ b/puppet/services/manila-scheduler.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Manila-scheduler service configured with Puppet
@@ -57,14 +57,5 @@ outputs:
manila::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
manila::network::neutron::neutron_admin_auth_url: {get_param: [EndpointMap, NeutronAdmin, uri]}
manila::network::neutron::neutron_admin_password: {get_param: NeutronPassword}
- manila::sql_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://manila:'
- - {get_param: ManilaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/manila'
step_config: |
include ::tripleo::profile::base::manila::scheduler
diff --git a/puppet/services/manila-share.yaml b/puppet/services/manila-share.yaml
index e42d2fae..e38fe675 100644
--- a/puppet/services/manila-share.yaml
+++ b/puppet/services/manila-share.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Manila-share service configured with Puppet
@@ -21,6 +21,10 @@ parameters:
MonitoringSubscriptionManilaShare:
default: 'overcloud-manila-share'
type: string
+ ManilaPassword:
+ description: The password for the manila service account.
+ type: string
+ hidden: true
resources:
ManilaBase:
@@ -40,5 +44,11 @@ outputs:
map_merge:
- get_attr: [ManilaBase, role_data, config_settings]
- manila::volume::cinder::cinder_admin_tenant_name: 'service'
+ manila::keystone::authtoken::password: {get_param: ManilaPassword}
+ manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ manila::keystone::authtoken::project_name: 'service'
+ service_config_settings:
+ get_attr: [ManilaBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::manila::share
diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml
index 9e3f6375..ffa969e0 100644
--- a/puppet/services/memcached.yaml
+++ b/puppet/services/memcached.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Memcached service configured with Puppet
diff --git a/puppet/services/mistral-api.yaml b/puppet/services/mistral-api.yaml
index 44d30358..daa1dc7c 100644
--- a/puppet/services/mistral-api.yaml
+++ b/puppet/services/mistral-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Mistral API service configured with Puppet
diff --git a/puppet/services/mistral-base.yaml b/puppet/services/mistral-base.yaml
index a11624c0..e678b14f 100644
--- a/puppet/services/mistral-base.yaml
+++ b/puppet/services/mistral-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Mistral base service. Shared for all Mistral services.
@@ -65,6 +65,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/mistral'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
mistral::rabbit_userid: {get_param: RabbitUserName}
mistral::rabbit_password: {get_param: RabbitPassword}
mistral::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
diff --git a/puppet/services/mistral-engine.yaml b/puppet/services/mistral-engine.yaml
index 10af670d..4a92b863 100644
--- a/puppet/services/mistral-engine.yaml
+++ b/puppet/services/mistral-engine.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Mistral Engine service configured with Puppet
diff --git a/puppet/services/mistral-executor.yaml b/puppet/services/mistral-executor.yaml
index 7afaf0db..6e273b92 100644
--- a/puppet/services/mistral-executor.yaml
+++ b/puppet/services/mistral-executor.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Mistral API service configured with Puppet
diff --git a/puppet/services/monitoring/sensu-base.yaml b/puppet/services/monitoring/sensu-base.yaml
index d7350d07..a8303a59 100644
--- a/puppet/services/monitoring/sensu-base.yaml
+++ b/puppet/services/monitoring/sensu-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: Sensu base service
@@ -43,7 +43,19 @@ parameters:
description: The RabbitMQ vhost used for monitoring purposes.
type: string
default: '/sensu'
-
+ SensuRedactVariables:
+ description: Variables from Sensu configuration, which have to be redacted.
+ type: comma_delimited_list
+ default:
+ - password
+ - passwd
+ - pass
+ - api_key
+ - api_token
+ - access_key
+ - secret_key
+ - private_key
+ - secret
outputs:
role_data:
@@ -61,8 +73,7 @@ outputs:
sensu::rabbitmq_ssl: {get_param: MonitoringRabbitUseSSL}
sensu::rabbitmq_user: {get_param: MonitoringRabbitUserName}
sensu::rabbitmq_vhost: {get_param: MonitoringRabbitVhost}
- #sensu::redis_host: {get_param: MonitoringRedisHost}
- #sensu::redis_password: {get_param: MonitoringRedisPassword}
+ sensu::redact: {get_param: SensuRedactVariables}
sensu::sensu_plugin_provider: 'yum'
sensu::sensu_plugin_name: 'rubygem-sensu-plugin'
sensu::version: 'present'
diff --git a/puppet/services/monitoring/sensu-client.yaml b/puppet/services/monitoring/sensu-client.yaml
index a26c7458..76ba59c1 100644
--- a/puppet/services/monitoring/sensu-client.yaml
+++ b/puppet/services/monitoring/sensu-client.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: Sensu client configured with Puppet
diff --git a/puppet/services/network/contrail-analytics.yaml b/puppet/services/network/contrail-analytics.yaml
index 1c2331fa..ad14d315 100644
--- a/puppet/services/network/contrail-analytics.yaml
+++ b/puppet/services/network/contrail-analytics.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Contrail Analytics service deployment using puppet, this YAML file
diff --git a/puppet/services/network/contrail-base.yaml b/puppet/services/network/contrail-base.yaml
index 03dbea5b..b49b2add 100644
--- a/puppet/services/network/contrail-base.yaml
+++ b/puppet/services/network/contrail-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Base parameters for all Contrail Services.
diff --git a/puppet/services/network/contrail-config.yaml b/puppet/services/network/contrail-config.yaml
index 0987fc75..03774480 100644
--- a/puppet/services/network/contrail-config.yaml
+++ b/puppet/services/network/contrail-config.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Contrail Config service deployment using puppet, this YAML file
diff --git a/puppet/services/network/contrail-control.yaml b/puppet/services/network/contrail-control.yaml
index 9356e9e9..7c28d283 100644
--- a/puppet/services/network/contrail-control.yaml
+++ b/puppet/services/network/contrail-control.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Contrail Control service deployment using puppet, this YAML file
diff --git a/puppet/services/network/contrail-database.yaml b/puppet/services/network/contrail-database.yaml
index e5712618..c56b90a2 100644
--- a/puppet/services/network/contrail-database.yaml
+++ b/puppet/services/network/contrail-database.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Contrail Database service deployment using puppet, this YAML file
diff --git a/puppet/services/network/contrail-webui.yaml b/puppet/services/network/contrail-webui.yaml
index 72b9e1c0..72cc6fa5 100644
--- a/puppet/services/network/contrail-webui.yaml
+++ b/puppet/services/network/contrail-webui.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Contrail WebUI service deployment using puppet, this YAML file
diff --git a/puppet/services/neutron-api.yaml b/puppet/services/neutron-api.yaml
index 408eb795..65fa0d8f 100644
--- a/puppet/services/neutron-api.yaml
+++ b/puppet/services/neutron-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Neutron Server configured with Puppet
@@ -57,26 +57,23 @@ parameters:
default:
tag: openstack.neutron.api
path: /var/log/neutron/server.log
- ControllerCount:
- description: |
- Under normal conditions, this should not be overridden manually and is
- set at deployment time. The default value is present to allow the
- template to be used in environments that do not override it.
- default: 1
- type: number
# DEPRECATED: the following options are deprecated and are currently maintained
# for backwards compatibility. They will be removed in the Ocata cycle.
NeutronL3HA:
- default: false
+ default: ''
+ type: string
description: |
- Whether to enable HA for virtual routers. While the default value is
- 'false', L3 HA will be automatically enabled if the number of nodes
- hosting controller configurations and DVR is disabled. This parameter is
- being deprecated in Newton and is scheduled to be removed in Ocata.
- Future releases will enable L3 HA by default if it is appropriate for the
- deployment type. Alternate mechanisms will be available to override.
+ Whether to enable HA for virtual routers. When not set, L3 HA will be
+ automatically enabled if the number of nodes hosting controller
+ configurations and DVR is disabled. Valid values are 'true' or 'false'
+ This parameter is being deprecated in Newton and is scheduled to be
+ removed in Ocata. Future releases will enable L3 HA by default if it is
+ appropriate for the deployment type. Alternate mechanisms will be
+ available to override.
+ EnableInternalTLS:
type: boolean
+ default: false
parameter_groups:
- label: deprecated
@@ -88,8 +85,19 @@ parameter_groups:
parameters:
- NeutronL3HA
+conditions:
+ use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
+
resources:
+ TLSProxyBase:
+ type: OS::TripleO::Services::TLSProxyBase
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
NeutronBase:
type: ./neutron-base.yaml
properties:
@@ -97,18 +105,6 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
-conditions:
-
- auto_enable_l3_ha:
- and:
- - not:
- equals:
- - get_param: ControllerCount
- - 1
- - equals:
- - get_param: NeutronEnableDVR
- - false
-
outputs:
role_data:
description: Role data for the Neutron Server agent service.
@@ -121,6 +117,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
+ - get_attr: [TLSProxyBase, role_data, config_settings]
- neutron::server::database_connection:
list_join:
- ''
@@ -130,12 +127,13 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ovs_neutron'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
neutron::server::api_workers: {get_param: NeutronWorkers}
neutron::server::rpc_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
- neutron::server::l3_ha: {if: ["auto_enable_l3_ha", true, {get_param: NeutronL3HA}]}
neutron::server::enable_proxy_headers_parsing: true
neutron::keystone::authtoken::password: {get_param: NeutronPassword}
@@ -157,7 +155,24 @@ outputs:
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- neutron::bind_host: {get_param: [ServiceNetMap, NeutronApiNetwork]}
+ tripleo::profile::base::neutron::server::tls_proxy_bind_ip:
+ get_param: [ServiceNetMap, NeutronApiNetwork]
+ tripleo::profile::base::neutron::server::tls_proxy_fqdn:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NeutronApiNetwork]}
+ tripleo::profile::base::neutron::server::tls_proxy_port:
+ get_param: [EndpointMap, NeutronInternal, port]
+ # Bind to localhost if internal TLS is enabled, since we put a TLS
+ # proxy in front.
+ neutron::bind_host:
+ if:
+ - use_tls_proxy
+ - 'localhost'
+ - {get_param: [ServiceNetMap, NeutronApiNetwork]}
+ tripleo::profile::base::neutron::server::l3_ha_override: {get_param: NeutronL3HA}
step_config: |
include tripleo::profile::base::neutron::server
service_config_settings:
@@ -176,3 +191,13 @@ outputs:
neutron::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service neutron-server is running"
+ shell: /usr/bin/systemctl show 'neutron-server' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop neutron_api service
+ tags: step2
+ service: name=neutron-server state=stopped
+ - name: Sync neutron_api DB
+ tags: step5
+ command: neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml
index 6bb4ba08..43657bd9 100644
--- a/puppet/services/neutron-base.yaml
+++ b/puppet/services/neutron-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron base service. Shared for all Neutron agents.
@@ -50,16 +50,13 @@ parameters:
to false may result in configuration remnants after updates/upgrades.
NeutronGlobalPhysnetMtu:
type: number
- default: 1496
+ default: 1500
description: |
MTU of the underlying physical network. Neutron uses this value to
calculate MTU for all virtual network components. For flat and VLAN
networks, neutron uses this value without modification. For overlay
networks such as VXLAN, neutron automatically subtracts the overlay
- protocol overhead from this value. The default value of 1496 is
- currently in effect to compensate for some additional overhead when
- deploying with some network configurations (e.g. network isolation over
- single network interfaces)
+ protocol overhead from this value.
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -87,16 +84,12 @@ outputs:
neutron::rabbit_port: {get_param: RabbitClientPort}
neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
neutron::core_plugin: {get_param: NeutronCorePlugin}
- neutron::service_plugins:
- str_replace:
- template: PLUGINS
- params:
- PLUGINS: {get_param: NeutronServicePlugins}
+ neutron::service_plugins: {get_param: NeutronServicePlugins}
neutron::debug: {get_param: Debug}
neutron::purge_config: {get_param: EnableConfigPurge}
neutron::allow_overlapping_ips: true
neutron::rabbit_heartbeat_timeout_threshold: 60
- neutron::host: '"%{::fqdn}"' #NOTE: extra quoting is needed
+ neutron::host: '%{::fqdn}'
neutron::db::database_db_max_retries: -1
neutron::db::database_max_retries: -1
neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu}
diff --git a/puppet/services/neutron-compute-plugin-midonet.yaml b/puppet/services/neutron-compute-plugin-midonet.yaml
index 26b6fa6b..5b6fcca6 100644
--- a/puppet/services/neutron-compute-plugin-midonet.yaml
+++ b/puppet/services/neutron-compute-plugin-midonet.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Compute Midonet plugin
diff --git a/puppet/services/neutron-compute-plugin-nuage.yaml b/puppet/services/neutron-compute-plugin-nuage.yaml
index c4f8ad12..04431e28 100644
--- a/puppet/services/neutron-compute-plugin-nuage.yaml
+++ b/puppet/services/neutron-compute-plugin-nuage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Compute Nuage plugin
diff --git a/puppet/services/neutron-compute-plugin-opencontrail.yaml b/puppet/services/neutron-compute-plugin-opencontrail.yaml
index 9f2fd13c..bbe4a051 100644
--- a/puppet/services/neutron-compute-plugin-opencontrail.yaml
+++ b/puppet/services/neutron-compute-plugin-opencontrail.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Compute OpenContrail plugin
diff --git a/puppet/services/neutron-compute-plugin-ovn.yaml b/puppet/services/neutron-compute-plugin-ovn.yaml
index 95e05dd4..ce28b5c3 100644
--- a/puppet/services/neutron-compute-plugin-ovn.yaml
+++ b/puppet/services/neutron-compute-plugin-ovn.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Compute OVN agent
diff --git a/puppet/services/neutron-compute-plugin-plumgrid.yaml b/puppet/services/neutron-compute-plugin-plumgrid.yaml
index 31a0a08b..09aa6191 100644
--- a/puppet/services/neutron-compute-plugin-plumgrid.yaml
+++ b/puppet/services/neutron-compute-plugin-plumgrid.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Compute Plumgrid plugin
diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml
index 2cd08f98..062edaa4 100644
--- a/puppet/services/neutron-dhcp.yaml
+++ b/puppet/services/neutron-dhcp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron DHCP agent configured with Puppet
@@ -39,6 +39,10 @@ parameters:
default:
tag: openstack.neutron.agent.dhcp
path: /var/log/neutron/dhcp-agent.log
+ NeutronDhcpAgentDnsmasqDnsServers:
+ default: []
+ description: List of servers to use as dnsmasq forwarders
+ type: comma_delimited_list
resources:
@@ -64,6 +68,7 @@ outputs:
- neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
neutron::agents::dhcp::enable_force_metadata: {get_param: NeutronEnableForceMetadata}
neutron::agents::dhcp::enable_metadata_network: {get_param: NeutronEnableMetadataNetwork}
+ neutron::agents::dhcp::dnsmasq_dns_servers: {get_param: NeutronDhcpAgentDnsmasqDnsServers}
tripleo.neutron_dhcp.firewall_rules:
'115 neutron dhcp input':
proto: 'udp'
@@ -74,3 +79,10 @@ outputs:
dport: 68
step_config: |
include tripleo::profile::base::neutron::dhcp
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service neutron-dhcp-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-dhcp-agent' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop neutron_dhcp service
+ tags: step2
+ service: name=neutron-dhcp-agent state=stopped
diff --git a/puppet/services/neutron-l3-compute-dvr.yaml b/puppet/services/neutron-l3-compute-dvr.yaml
index b6c29116..06927fe0 100644
--- a/puppet/services/neutron-l3-compute-dvr.yaml
+++ b/puppet/services/neutron-l3-compute-dvr.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron L3 agent for DVR enabled compute nodes
diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml
index a89e3d75..69803551 100644
--- a/puppet/services/neutron-l3.yaml
+++ b/puppet/services/neutron-l3.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron L3 agent configured with Puppet
@@ -43,6 +43,10 @@ parameters:
tag: openstack.neutron.agent.l3
path: /var/log/neutron/l3-agent.log
+conditions:
+
+ external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]}
+
resources:
NeutronBase:
@@ -63,12 +67,22 @@ outputs:
- neutron
config_settings:
map_merge:
- - get_attr: [NeutronBase, role_data, config_settings]
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - neutron::agents::l3::agent_mode: {get_param: NeutronL3AgentMode}
+ tripleo.neutron_l3.firewall_rules:
+ '106 neutron_l3 vrrp':
+ proto: vrrp
+ -
+ if:
+ - external_network_bridge_empty
+ - {}
- neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
- neutron::agents::l3::router_delete_namespaces: True
- neutron::agents::l3::agent_mode : {get_param: NeutronL3AgentMode}
- tripleo.neutron_l3.firewall_rules:
- '106 neutron_l3 vrrp':
- proto: vrrp
step_config: |
include tripleo::profile::base::neutron::l3
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service neutron-l3-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-l3-agent' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop neutron_l3 service
+ tags: step2
+ service: name=neutron-l3-agent state=stopped
diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml
index 8be4c6d6..199b5809 100644
--- a/puppet/services/neutron-metadata.yaml
+++ b/puppet/services/neutron-metadata.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Metadata agent configured with Puppet
@@ -72,6 +72,13 @@ outputs:
neutron::agents::metadata::auth_password: {get_param: NeutronPassword}
neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
neutron::agents::metadata::auth_tenant: 'service'
- neutron::agents::metadata::metadata_ip: '"%{hiera(\"nova_metadata_vip\")}"'
+ neutron::agents::metadata::metadata_ip: "%{hiera('nova_metadata_vip')}"
step_config: |
include tripleo::profile::base::neutron::metadata
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service neutron-metadata-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-metadata-agent' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop neutron_metadata service
+ tags: step2
+ service: name=neutron-metadata-agent state=stopped
diff --git a/puppet/services/neutron-midonet.yaml b/puppet/services/neutron-midonet.yaml
index 0de256c0..9198f352 100644
--- a/puppet/services/neutron-midonet.yaml
+++ b/puppet/services/neutron-midonet.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Midonet plugin and services
diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml
index cca0deee..c27bb909 100644
--- a/puppet/services/neutron-ovs-agent.yaml
+++ b/puppet/services/neutron-ovs-agent.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron OVS agent configured with Puppet
@@ -70,6 +70,9 @@ parameters:
tag: openstack.neutron.agent.openvswitch
path: /var/log/neutron/openvswitch-agent.log
+conditions:
+ no_firewall_driver: {equals : [{get_param: NeutronOVSFirewallDriver}, '']}
+
resources:
NeutronBase:
@@ -94,21 +97,9 @@ outputs:
- neutron::agents::ml2::ovs::l2_population: {get_param: NeutronEnableL2Pop}
neutron::agents::ml2::ovs::enable_distributed_routing: {get_param: NeutronEnableDVR}
neutron::agents::ml2::ovs::arp_responder: {get_param: NeutronEnableARPResponder}
- neutron::agents::ml2::ovs::bridge_mappings:
- str_replace:
- template: MAPPINGS
- params:
- MAPPINGS: {get_param: NeutronBridgeMappings}
- neutron::agents::ml2::ovs::tunnel_types:
- str_replace:
- template: TYPES
- params:
- TYPES: {get_param: NeutronTunnelTypes}
- neutron::agents::ml2::ovs::extensions:
- str_replace:
- template: AGENT_EXTENSIONS
- params:
- AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions}
+ neutron::agents::ml2::ovs::bridge_mappings: {get_param: NeutronBridgeMappings}
+ neutron::agents::ml2::ovs::tunnel_types: {get_param: NeutronTunnelTypes}
+ neutron::agents::ml2::ovs::extensions: {get_param: NeutronAgentExtensions}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
# (eg. for internal_api):
@@ -116,12 +107,23 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::agents::ml2::ovs::firewall_driver: {get_param: NeutronOVSFirewallDriver}
tripleo.neutron_ovs_agent.firewall_rules:
'118 neutron vxlan networks':
proto: 'udp'
dport: 4789
'136 neutron gre networks':
proto: 'gre'
+ -
+ if:
+ - no_firewall_driver
+ - {}
+ - neutron::agents::ml2::ovs::firewall_driver: {get_param: NeutronOVSFirewallDriver}
step_config: |
include ::tripleo::profile::base::neutron::ovs
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop neutron_ovs_agent service
+ tags: step2
+ service: name=neutron-openvswitch-agent state=stopped
diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml
index fdfa1c03..e25bc495 100644
--- a/puppet/services/neutron-ovs-dpdk-agent.yaml
+++ b/puppet/services/neutron-ovs-dpdk-agent.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron OVS DPDK configured with Puppet for Compute Role
@@ -18,6 +18,11 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "'[0-9,-]+'"
NeutronDpdkCoreList:
description: List of cores to be used for DPDK Poll Mode Driver
type: string
@@ -68,7 +73,8 @@ outputs:
- neutron::agents::ml2::ovs::enable_dpdk: true
neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType}
neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir}
- vswitch::dpdk::core_list: {get_param: NeutronDpdkCoreList}
+ vswitch::dpdk::host_core_list: {get_param: HostCpusList}
+ vswitch::dpdk::pmd_core_list: {get_param: NeutronDpdkCoreList}
vswitch::dpdk::memory_channels: {get_param: NeutronDpdkMemoryChannels}
vswitch::dpdk::socket_mem: {get_param: NeutronDpdkSocketMemory}
vswitch::dpdk::driver_type: {get_param: NeutronDpdkDriverType}
diff --git a/puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml b/puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml
new file mode 100644
index 00000000..becd25c9
--- /dev/null
+++ b/puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml
@@ -0,0 +1,73 @@
+heat_template_version: ocata
+
+description: >
+ Configure hieradata for Fujitsu C-Fabric plugin configuration
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronFujitsuCfabAddress:
+ description: 'The address of the C-Fabric to telnet to.'
+ type: string
+ NeutronFujitsuCfabUserName:
+ description: 'The C-Fabric username to use.'
+ type: string
+ NeutronFujitsuCfabPassword:
+ description: 'The C-Fabric password to use.'
+ type: string
+ hidden: true
+ NeutronFujitsuCfabPhysicalNetworks:
+ description: 'List of <physical_network>:<vfab_id> tuples specifying physical_network names and corresponding vfab ids.'
+ type: comma_delimited_list
+ default: ''
+ NeutronFujitsuCfabSharePprofile:
+ description: '"Whether to share a C-Fabric pprofile among Neutron ports using the same VLAN ID.'
+ type: boolean
+ default: false
+ NeutronFujitsuCfabPprofilePrefix:
+ description: 'The prefix string for pprofile name.'
+ type: string
+ default: ''
+ NeutronFujitsuCfabSaveConfig:
+ description: 'Whether to save configuration.'
+ type: boolean
+ default: true
+
+resources:
+
+ NeutronMl2Base:
+ type: ./neutron-plugin-ml2.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for Fujitsu Cfab ML2 Driver
+ value:
+ service_name: neutron_plugin_ml2_fujitsu_cfab
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMl2Base, role_data, config_settings]
+ - neutron::plugins::ml2::fujitsu::cfab::address: {get_param: NeutronFujitsuCfabAddress}
+ neutron::plugins::ml2::fujitsu::cfab::username: {get_param: NeutronFujitsuCfabUserName}
+ neutron::plugins::ml2::fujitsu::cfab::password: {get_param: NeutronFujitsuCfabPassword}
+ neutron::plugins::ml2::fujitsu::cfab::physical_networks: {get_param: NeutronFujitsuCfabPhysicalNetworks}
+ neutron::plugins::ml2::fujitsu::cfab::share_pprofile: {get_param: NeutronFujitsuCfabSharePprofile}
+ neutron::plugins::ml2::fujitsu::cfab::pprofile_prefix: {get_param: NeutronFujitsuCfabPprofilePrefix}
+ neutron::plugins::ml2::fujitsu::cfab::save_config: {get_param: NeutronFujitsuCfabSaveConfig}
+ step_config: |
+ include ::tripleo::profile::base::neutron::plugins::ml2
diff --git a/puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml b/puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml
new file mode 100644
index 00000000..85971f17
--- /dev/null
+++ b/puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml
@@ -0,0 +1,78 @@
+heat_template_version: ocata
+
+description: Configure hieradata for Fujitsu fossw plugin configuration
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronFujitsuFosswIps:
+ description: 'The List of IP address of all fos switches.'
+ type: comma_delimited_list
+ NeutronFujitsuFosswUserName:
+ description: 'The username of the fos switches.'
+ type: string
+ NeutronFujitsuFosswPassword:
+ description: 'The password of the fos switches.'
+ type: string
+ hidden: true
+ NeutronFujitsuFosswPort:
+ description: 'The port number used for SSH connection.'
+ type: number
+ default: 22
+ NeutronFujitsuFosswTimeout:
+ description: 'The timeout os SSH connection.'
+ type: number
+ default: 30
+ NeutronFujitsuFosswUdpDestPort:
+ description: 'The port number of VXLAN UDP destination on the fos switches.'
+ type: number
+ default: 4789
+ NeutronFujitsuFosswOvsdbVlanidRangeMin:
+ description: 'The minimum VLAN ID in the range that is used for binding VNI and physical port.'
+ type: number
+ default: 2
+ NeutronFujitsuFosswOvsdbPort:
+ description: 'The port number which OVSDB server on the fos switches listen.'
+ type: number
+ default: 6640
+
+resources:
+
+ NeutronMl2Base:
+ type: ./neutron-plugin-ml2.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for Fujitsu Fossw ML2 Driver
+ value:
+ service_name: neutron_plugin_ml2_fujitsu_fossw
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMl2Base, role_data, config_settings]
+ - neutron::plugins::ml2::fujitsu::fossw::fossw_ips: {get_param: NeutronFujitsuFosswIps}
+ neutron::plugins::ml2::fujitsu::fossw::username: {get_param: NeutronFujitsuFosswUserName}
+ neutron::plugins::ml2::fujitsu::fossw::password: {get_param: NeutronFujitsuFosswPassword}
+ neutron::plugins::ml2::fujitsu::fossw::port: {get_param: NeutronFujitsuFosswPort}
+ neutron::plugins::ml2::fujitsu::fossw::timeout: {get_param: NeutronFujitsuFosswTimeout}
+ neutron::plugins::ml2::fujitsu::fossw::udp_dest_port: {get_param: NeutronFujitsuFosswUdpDestPort}
+ neutron::plugins::ml2::fujitsu::fossw::ovsdb_vlanid_range_min: {get_param: NeutronFujitsuFosswOvsdbVlanidRangeMin}
+ neutron::plugins::ml2::fujitsu::fossw::ovsdb_port: {get_param: NeutronFujitsuFosswOvsdbPort}
+ step_config: |
+ include ::tripleo::profile::base::neutron::plugins::ml2
+
diff --git a/puppet/services/neutron-plugin-ml2-ovn.yaml b/puppet/services/neutron-plugin-ml2-ovn.yaml
index e98ed497..59346edc 100644
--- a/puppet/services/neutron-plugin-ml2-ovn.yaml
+++ b/puppet/services/neutron-plugin-ml2-ovn.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron ML2/OVN plugin configured with Puppet
@@ -18,13 +18,6 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- OVNDbHost:
- description: IP address on which the OVN DB servers are listening
- type: string
- OVNNorthboundServerPort:
- description: Port of the OVN Northbound DB server
- type: number
- default: 6641
OVNDbConnectionTimeout:
description: Timeout in seconds for the OVSDB connection transaction
type: number
@@ -68,9 +61,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronMl2Base, role_data, config_settings]
- - ovn::northbound::port: {get_param: OVNNorthboundServerPort}
- tripleo::profile::base::neutron::plugins::ml2::ovn::ovn_db_host: {get_param: OVNDbHost}
- neutron::plugins::ovn::ovsdb_connection_timeout: {get_param: OVNDbConnectionTimeout}
+ - neutron::plugins::ovn::ovsdb_connection_timeout: {get_param: OVNDbConnectionTimeout}
neutron::plugins::ovn::neutron_sync_mode: {get_param: OVNNeutronSyncMode}
neutron::plugins::ovn::ovn_l3_mode: true
neutron::plugins::ovn::vif_type: {get_param: OVNVifType}
diff --git a/puppet/services/neutron-plugin-ml2.yaml b/puppet/services/neutron-plugin-ml2.yaml
index 5dbae3dc..71a0076f 100644
--- a/puppet/services/neutron-plugin-ml2.yaml
+++ b/puppet/services/neutron-plugin-ml2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron ML2 Plugin configured with Puppet
@@ -83,47 +83,18 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::plugins::ml2::mechanism_drivers:
- str_replace:
- template: MECHANISMS
- params:
- MECHANISMS: {get_param: NeutronMechanismDrivers}
- neutron::plugins::ml2::type_drivers:
- str_replace:
- template: DRIVERS
- params:
- DRIVERS: {get_param: NeutronTypeDrivers}
- neutron::plugins::ml2::flat_networks:
- str_replace:
- template: NETWORKS
- params:
- NETWORKS: {get_param: NeutronFlatNetworks}
- neutron::plugins::ml2::extension_drivers:
- str_replace:
- template: PLUGIN_EXTENSIONS
- params:
- PLUGIN_EXTENSIONS: {get_param: NeutronPluginExtensions}
- neutron::plugins::ml2::network_vlan_ranges:
- str_replace:
- template: RANGES
- params:
- RANGES: {get_param: NeutronNetworkVLANRanges}
- neutron::plugins::ml2::tunnel_id_ranges:
- str_replace:
- template: RANGES
- params:
- RANGES: {get_param: NeutronTunnelIdRanges}
- neutron::plugins::ml2::vni_ranges:
- str_replace:
- template: RANGES
- params:
- RANGES: {get_param: NeutronVniRanges}
- neutron::plugins::ml2::tenant_network_types:
- str_replace:
- template: TYPES
- params:
- TYPES: {get_param: NeutronNetworkType}
+ - neutron::plugins::ml2::mechanism_drivers: {get_param: NeutronMechanismDrivers}
+ neutron::plugins::ml2::type_drivers: {get_param: NeutronTypeDrivers}
+ neutron::plugins::ml2::flat_networks: {get_param: NeutronFlatNetworks}
+ neutron::plugins::ml2::extension_drivers: {get_param: NeutronPluginExtensions}
+ neutron::plugins::ml2::network_vlan_ranges: {get_param: NeutronNetworkVLANRanges}
+ neutron::plugins::ml2::tunnel_id_ranges: {get_param: NeutronTunnelIdRanges}
+ neutron::plugins::ml2::vni_ranges: {get_param: NeutronVniRanges}
+ neutron::plugins::ml2::tenant_network_types: {get_param: NeutronNetworkType}
neutron::plugins::ml2::supported_pci_vendor_devs: {get_param: NeutronSupportedPCIVendorDevs}
step_config: |
include ::tripleo::profile::base::neutron::plugins::ml2
+ service_config_settings:
+ horizon:
+ neutron::plugins::ml2::mechanism_drivers: {get_param: NeutronMechanismDrivers}
diff --git a/puppet/services/neutron-plugin-nuage.yaml b/puppet/services/neutron-plugin-nuage.yaml
index 838ec5ea..e09cd704 100644
--- a/puppet/services/neutron-plugin-nuage.yaml
+++ b/puppet/services/neutron-plugin-nuage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Nuage plugin
diff --git a/puppet/services/neutron-plugin-opencontrail.yaml b/puppet/services/neutron-plugin-opencontrail.yaml
index 098c9d05..976e5f19 100644
--- a/puppet/services/neutron-plugin-opencontrail.yaml
+++ b/puppet/services/neutron-plugin-opencontrail.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Opencontrail plugin
diff --git a/puppet/services/neutron-plugin-plumgrid.yaml b/puppet/services/neutron-plugin-plumgrid.yaml
index 30af8a3f..bd078074 100644
--- a/puppet/services/neutron-plugin-plumgrid.yaml
+++ b/puppet/services/neutron-plugin-plumgrid.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Plumgrid plugin
@@ -100,6 +100,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ovs_neutron'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
neutron::plugins::plumgrid::controller_priv_host: {get_param: [EndpointMap, KeystoneAdmin, host]}
neutron::plugins::plumgrid::admin_password: {get_param: AdminPassword}
neutron::plugins::plumgrid::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
diff --git a/puppet/services/neutron-sriov-agent.yaml b/puppet/services/neutron-sriov-agent.yaml
index 44f7f242..d3c82d88 100644
--- a/puppet/services/neutron-sriov-agent.yaml
+++ b/puppet/services/neutron-sriov-agent.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron SR-IOV nic agent configured with Puppet
@@ -25,6 +25,7 @@ parameters:
All physical networks listed in network_vlan_ranges
on the server should have mappings to appropriate
interfaces on each agent.
+ Example "tenant0:ens2f0,tenant1:ens2f1"
type: comma_delimited_list
default: ""
NeutronExcludeDevices:
@@ -40,8 +41,8 @@ parameters:
NeutronSriovNumVFs:
description: >
Provide the list of VFs to be reserved for each SR-IOV interface.
- Format "<interface_name1>:<numvfs1>","<interface_name2>:<numvfs2>"
- Example "eth1:4096","eth2:128"
+ Format "<interface_name1>:<numvfs1>,<interface_name2>:<numvfs2>"
+ Example "eth1:4096,eth2:128"
type: comma_delimited_list
default: ""
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index ba7fb2e1..892e63dd 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Nova API service configured with Puppet
@@ -51,6 +51,13 @@ parameters:
default:
tag: openstack.nova.api
path: /var/log/nova/nova-api.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ NovaDefaultFloatingPool:
+ default: 'public'
+ description: Default pool for floating IP addresses
+ type: string
conditions:
nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
@@ -62,6 +69,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
NovaBase:
type: ./nova-base.yaml
@@ -83,13 +91,11 @@ outputs:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- get_attr: [ApacheServiceBase, role_data, config_settings]
- - nova::cron::archive_deleted_rows::hour: '"*/12"'
- nova::cron::archive_deleted_rows::destination: '"/dev/null"'
+ - nova::cron::archive_deleted_rows::hour: '*/12'
+ nova::cron::archive_deleted_rows::destination: '/dev/null'
tripleo.nova_api.firewall_rules:
'113 nova_api':
dport:
- - 6080
- - 13080
- 8773
- 3773
- 8774
@@ -100,25 +106,29 @@ outputs:
nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
nova::api::enabled: true
- nova::api::default_floating_pool: 'public'
+ nova::api::default_floating_pool: {get_param: NovaDefaultFloatingPool}
nova::api::sync_db_api: true
nova::api::enable_proxy_headers_parsing: true
+ nova::api::api_bind_address:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::api::service_name: 'httpd'
+ nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- nova::api::api_bind_address: {get_param: [ServiceNetMap, NovaApiNetwork]}
- nova::api::service_name: 'httpd'
- nova::wsgi::apache::ssl: false
- nova::wsgi::apache::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
- nova::wsgi::apache::servername:
+ nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::wsgi::apache_api::servername:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
- $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- nova::wsgi::apache::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
nova::api::instance_name_template: {get_param: InstanceNameTemplate}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
@@ -127,10 +137,27 @@ outputs:
- nova_workers_zero
- {}
- nova::api::osapi_compute_workers: {get_param: NovaWorkers}
- nova::wsgi::apache::workers: {get_param: NovaWorkers}
+ nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
step_config: |
include tripleo::profile::base::nova::api
service_config_settings:
+ mysql:
+ map_merge:
+ - {get_attr: [NovaBase, role_data, service_config_settings, mysql]}
+ - nova::db::mysql::password: {get_param: NovaPassword}
+ nova::db::mysql::user: nova
+ nova::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ nova::db::mysql::dbname: nova
+ nova::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+ nova::db::mysql_api::password: {get_param: NovaPassword}
+ nova::db::mysql_api::user: nova_api
+ nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ nova::db::mysql_api::dbname: nova_api
+ nova::db::mysql_api::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
keystone:
nova::keystone::auth::tenant: 'service'
nova::keystone::auth::public_url: {get_param: [EndpointMap, NovaPublic, uri]}
@@ -138,18 +165,5 @@ outputs:
nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
nova::keystone::auth::password: {get_param: NovaPassword}
nova::keystone::auth::region: {get_param: KeystoneRegion}
- mysql:
- nova::db::mysql::password: {get_param: NovaPassword}
- nova::db::mysql::user: nova
- nova::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- nova::db::mysql::dbname: nova
- nova::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
- nova::db::mysql_api::password: {get_param: NovaPassword}
- nova::db::mysql_api::user: nova_api
- nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- nova::db::mysql_api::dbname: nova_api
- nova::db::mysql_api::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml
index 8db00d8f..c448bf49 100644
--- a/puppet/services/nova-base.yaml
+++ b/puppet/services/nova-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova base service. Shared for all Nova services.
@@ -18,6 +18,10 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
NovaPassword:
description: The password for the nova service and db account, used by nova-api.
type: string
@@ -66,6 +70,56 @@ parameters:
type: string
description: Nova Compute upgrade level
default: ''
+ NovaCronArchiveDeleteRowsMinute:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Minute
+ default: '1'
+ NovaCronArchiveDeleteRowsHour:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Hour
+ default: '0'
+ NovaCronArchiveDeleteRowsMonthday:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Month Day
+ default: '*'
+ NovaCronArchiveDeleteRowsMonth:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Month
+ default: '*'
+ NovaCronArchiveDeleteRowsWeekday:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Week Day
+ default: '*'
+ NovaCronArchiveDeleteRowsMaxRows:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Max Rows
+ default: '100'
+ NovaCronArchiveDeleteRowsUser:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - User
+ default: 'nova'
+ NovaCronArchiveDeleteRowsDestination:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Log destination
+ default: '/var/log/nova/nova-rowsflush.log'
+ NovaCronArchiveDeleteRowsUntilComplete:
+ type: boolean
+ description: >
+ Cron to move deleted instances to another table - Until complete
+ default: false
+
+
+conditions:
+
+ compute_upgrade_level_empty: {equals : [{get_param: UpgradeLevelNovaCompute}, '']}
outputs:
role_data:
@@ -73,45 +127,88 @@ outputs:
value:
service_name: nova_base
config_settings:
- nova::rabbit_password: {get_param: RabbitPassword}
- nova::rabbit_userid: {get_param: RabbitUserName}
- nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- nova::rabbit_port: {get_param: RabbitClientPort}
- nova::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova'
- nova::api_database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova_api:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova_api'
- nova::debug: {get_param: Debug}
- nova::purge_config: {get_param: EnableConfigPurge}
- nova::network::neutron::neutron_project_name: 'service'
- nova::network::neutron::neutron_username: 'neutron'
- nova::network::neutron::dhcp_domain: ''
- nova::network::neutron::neutron_password: {get_param: NeutronPassword}
- nova::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
- nova::network::neutron::neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]}
- nova::rabbit_heartbeat_timeout_threshold: 60
- nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL'
- nova::host: '"%{::fqdn}"' # NOTE: extra quoting is needed.
- nova::notify_on_state_change: 'vm_and_task_state'
- nova::notification_driver: messagingv2
- nova::network::neutron::neutron_auth_type: 'v3password'
- nova::db::database_db_max_retries: -1
- nova::db::database_max_retries: -1
- nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
- nova::use_ipv6: {get_param: NovaIPv6}
- nova::upgrade_level_compute: {get_param: UpgradeLevelNovaCompute}
- nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge}
+ map_merge:
+ - nova::rabbit_password: {get_param: RabbitPassword}
+ nova::rabbit_userid: {get_param: RabbitUserName}
+ nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ nova::rabbit_port: {get_param: RabbitClientPort}
+ nova::placement::project_name: 'service'
+ nova::placement::password: {get_param: NovaPassword}
+ nova::placement::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ nova::placement::os_region_name: {get_param: KeystoneRegion}
+ nova::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://nova:'
+ - {get_param: NovaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/nova'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ nova::api_database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://nova_api:'
+ - {get_param: NovaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/nova_api'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ nova::placement_database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://nova_placement:'
+ - {get_param: NovaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/nova_placement'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ nova::debug: {get_param: Debug}
+ nova::purge_config: {get_param: EnableConfigPurge}
+ nova::network::neutron::neutron_project_name: 'service'
+ nova::network::neutron::neutron_username: 'neutron'
+ nova::network::neutron::dhcp_domain: ''
+ nova::network::neutron::neutron_password: {get_param: NeutronPassword}
+ nova::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
+ nova::network::neutron::neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]}
+ nova::rabbit_heartbeat_timeout_threshold: 60
+ nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL'
+ nova::host: '%{::fqdn}'
+ nova::notify_on_state_change: 'vm_and_task_state'
+ nova::notification_driver: messagingv2
+ nova::network::neutron::neutron_auth_type: 'v3password'
+ nova::db::database_db_max_retries: -1
+ nova::db::database_max_retries: -1
+ nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
+ nova::use_ipv6: {get_param: NovaIPv6}
+ nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge}
+ nova::cron::archive_deleted_rows::minute: {get_param: NovaCronArchiveDeleteRowsMinute}
+ nova::cron::archive_deleted_rows::hour: {get_param: NovaCronArchiveDeleteRowsHour}
+ nova::cron::archive_deleted_rows::monthday: {get_param: NovaCronArchiveDeleteRowsMonthday}
+ nova::cron::archive_deleted_rows::month: {get_param: NovaCronArchiveDeleteRowsMonth}
+ nova::cron::archive_deleted_rows::weekday: {get_param: NovaCronArchiveDeleteRowsWeekday}
+ nova::cron::archive_deleted_rows::max_rows: {get_param: NovaCronArchiveDeleteRowsMaxRows}
+ nova::cron::archive_deleted_rows::user: {get_param: NovaCronArchiveDeleteRowsUser}
+ nova::cron::archive_deleted_rows::destination: {get_param: NovaCronArchiveDeleteRowsDestination}
+ nova::cron::archive_deleted_rows::until_complete: {get_param: NovaCronArchiveDeleteRowsUntilComplete}
+ -
+ if:
+ - compute_upgrade_level_empty
+ - {}
+ - nova::upgrade_level_compute: {get_param: UpgradeLevelNovaCompute}
+ service_config_settings:
+ mysql:
+ # NOTE(aschultz): this should be configurable if/when we support more
+ # complex cell v2 configurations. For now, this is the default cell
+ # created for the cell v2 configuration
+ nova::db::mysql_api::setup_cell0: true
+ nova::rabbit_password: {get_param: RabbitPassword}
+ nova::rabbit_userid: {get_param: RabbitUserName}
+ nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ nova::rabbit_port: {get_param: RabbitClientPort}
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index f7f2510e..2312b635 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova Compute service configured with Puppet
@@ -52,7 +52,7 @@ parameters:
For different formats, refer to the nova.conf documentation for
pci_passthrough_whitelist configuration
type: json
- default: ''
+ default: {}
NovaVcpuPinSet:
description: >
A list or range of physical CPU cores to reserve for virtual machine
@@ -97,11 +97,7 @@ outputs:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- nova::compute::libvirt::manage_libvirt_services: false
- nova::compute::pci_passthrough:
- str_replace:
- template: "'JSON_PARAM'"
- params:
- JSON_PARAM: {get_param: NovaPCIPassthrough}
+ nova::compute::pci_passthrough: {get_param: NovaPCIPassthrough}
nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
# we manage migration in nova common puppet profile
@@ -117,7 +113,7 @@ outputs:
- '.'
- - 'client'
- {get_param: CephClientUserName}
- nova::compute::rbd::libvirt_rbd_secret_uuid: '"%{hiera(\"ceph::profile::params::fsid\")}"'
+ nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
nova::compute::instance_usage_audit: true
nova::compute::instance_usage_audit_period: 'hour'
nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend}
diff --git a/puppet/services/nova-conductor.yaml b/puppet/services/nova-conductor.yaml
index a10d9560..b96bf6e6 100644
--- a/puppet/services/nova-conductor.yaml
+++ b/puppet/services/nova-conductor.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Nova Conductor service configured with Puppet
diff --git a/puppet/services/nova-consoleauth.yaml b/puppet/services/nova-consoleauth.yaml
index 85e60420..79969ded 100644
--- a/puppet/services/nova-consoleauth.yaml
+++ b/puppet/services/nova-consoleauth.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova Consoleauth service configured with Puppet
diff --git a/puppet/services/nova-ironic.yaml b/puppet/services/nova-ironic.yaml
index bf7639dd..306c6b6f 100644
--- a/puppet/services/nova-ironic.yaml
+++ b/puppet/services/nova-ironic.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova Compute service configured with Puppet and using Ironic
diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml
index 31732580..a9b2b3f9 100644
--- a/puppet/services/nova-libvirt.yaml
+++ b/puppet/services/nova-libvirt.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Libvirt service configured with Puppet
@@ -21,6 +21,14 @@ parameters:
NovaComputeLibvirtType:
type: string
default: kvm
+ LibvirtEnabledPerfEvents:
+ type: comma_delimited_list
+ default: []
+ description: This is a performance event list which could be used as monitor.
+ For example - ``enabled_perf_events = cmt, mbml, mbmt``
+ The supported events list can be found in
+ https://libvirt.org/html/libvirt-libvirt-domain.html ,
+ which you may need to search key words ``VIR_PERF_PARAM_*``
MonitoringSubscriptionNovaLibvirt:
default: 'overcloud-nova-libvirt'
type: string
@@ -50,10 +58,17 @@ outputs:
tripleo::profile::base::nova::libvirt_enabled: true
nova::compute::libvirt::services::libvirt_virt_type: {get_param: NovaComputeLibvirtType}
nova::compute::libvirt::libvirt_virt_type: {get_param: NovaComputeLibvirtType}
+ nova::compute::libvirt::libvirt_enabled_perf_events: {get_param: LibvirtEnabledPerfEvents}
+ nova::compute::libvirt::qemu::configure_qemu: true
+ nova::compute::libvirt::qemu::max_files: 32768
+ nova::compute::libvirt::qemu::max_processes: 131072
tripleo.nova_libvirt.firewall_rules:
'200 nova_libvirt':
dport:
- 16509
+ - 16514
+ - '49152-49215'
+ - '5900-5999'
step_config: |
include tripleo::profile::base::nova::libvirt
diff --git a/puppet/services/nova-metadata.yaml b/puppet/services/nova-metadata.yaml
index 40931da6..376f95b1 100644
--- a/puppet/services/nova-metadata.yaml
+++ b/puppet/services/nova-metadata.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Nova API service configured with Puppet
diff --git a/puppet/services/nova-placement.yaml b/puppet/services/nova-placement.yaml
new file mode 100644
index 00000000..82b83561
--- /dev/null
+++ b/puppet/services/nova-placement.yaml
@@ -0,0 +1,120 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Nova Placement API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NovaWorkers:
+ default: 0
+ description: Number of workers for Nova Placement API service.
+ type: number
+ NovaPassword:
+ description: The password for the nova service and db account, used by nova-placement.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ MonitoringSubscriptionNovaPlacement:
+ default: 'overcloud-nova-placement'
+ type: string
+ NovaPlacementLoggingSource:
+ type: json
+ default:
+ tag: openstack.nova.placement
+ path: /var/log/httpd/nova_placement_wsgi_error_ssl.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
+
+resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
+ NovaBase:
+ type: ./nova-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Placement API service.
+ value:
+ service_name: nova_placement
+ monitoring_subscription: {get_param: MonitoringSubscriptionNovaPlacement}
+ logging_source: {get_param: NovaPlacementLoggingSource}
+ logging_groups:
+ - nova
+ config_settings:
+ map_merge:
+ - get_attr: [NovaBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.nova_placement.firewall_rules:
+ '138 nova_placement':
+ dport:
+ - 8778
+ - 13778
+ nova::wsgi::apache_placement::api_port: '8778'
+ nova::wsgi::apache_placement::ssl: {get_param: EnableInternalTLS}
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ nova::wsgi::apache_placement::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::wsgi::apache_placement::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ -
+ if:
+ - nova_workers_zero
+ - {}
+ - nova::wsgi::apache_placement::workers: {get_param: NovaWorkers}
+ step_config: |
+ include tripleo::profile::base::nova::placement
+ service_config_settings:
+ keystone:
+ nova::keystone::auth_placement::tenant: 'service'
+ nova::keystone::auth_placement::public_url: {get_param: [EndpointMap, NovaPlacementPublic, uri]}
+ nova::keystone::auth_placement::internal_url: {get_param: [EndpointMap, NovaPlacementInternal, uri]}
+ nova::keystone::auth_placement::admin_url: {get_param: [EndpointMap, NovaPlacementAdmin, uri]}
+ nova::keystone::auth_placement::password: {get_param: NovaPassword}
+ nova::keystone::auth_placement::region: {get_param: KeystoneRegion}
+ mysql:
+ map_merge:
+ - {get_attr: [NovaBase, role_data, service_config_settings, mysql]}
+ - nova::db::mysql_placement::password: {get_param: NovaPassword}
+ nova::db::mysql_placement::user: nova_placement
+ nova::db::mysql_placement::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ nova::db::mysql_placement::dbname: nova_placement
+ nova::db::mysql_placement::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml
index d89e3e11..353a75ac 100644
--- a/puppet/services/nova-scheduler.yaml
+++ b/puppet/services/nova-scheduler.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova Scheduler service configured with Puppet
@@ -58,7 +58,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- - nova::scheduler::filter::ram_allocation_ratio: '1.0'
+ - nova::ram_allocation_ratio: '1.0'
nova::scheduler::filter::scheduler_available_filters: {get_param: NovaSchedulerAvailableFilters}
nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters}
step_config: |
diff --git a/puppet/services/nova-vnc-proxy.yaml b/puppet/services/nova-vnc-proxy.yaml
index 85d59ae6..bf244943 100644
--- a/puppet/services/nova-vnc-proxy.yaml
+++ b/puppet/services/nova-vnc-proxy.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova Vncproxy service configured with Puppet
@@ -57,5 +57,10 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
nova::vncproxy::host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ tripleo.nova_vnc_proxy.firewall_rules:
+ '137 nova_vnc_proxy':
+ dport:
+ - 6080
+ - 13080
step_config: |
include tripleo::profile::base::nova::vncproxy
diff --git a/puppet/services/octavia-api.yaml b/puppet/services/octavia-api.yaml
new file mode 100644
index 00000000..4c6f4c37
--- /dev/null
+++ b/puppet/services/octavia-api.yaml
@@ -0,0 +1,97 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Octavia API service.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OctaviaPassword:
+ description: The password for the Octavia's database account.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ MonitoringSubscriptionOctaviaApi:
+ default: 'overcloud-octavia-api'
+ type: string
+ OctaviaApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.octavia.api
+ path: /var/log/octavia/api.log
+
+resources:
+
+ OctaviaBase:
+ type: ./octavia-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Octavia API service.
+ value:
+ service_name: octavia_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionOctaviaApi}
+ logging_source: {get_param: OctaviaApiLoggingSource}
+ logging_groups:
+ - octavia
+ config_settings:
+ map_merge:
+ - get_attr: [OctaviaBase, role_data, config_settings]
+ - octavia::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ octavia::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://octavia:'
+ - {get_param: OctaviaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/octavia'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ octavia::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ octavia::keystone::authtoken::project_name: 'service'
+ octavia::api::sync_db: true
+ tripleo.octavia_api.firewall_rules:
+ '120 octavia api':
+ dport:
+ - 9876
+ - 13876
+ octavia::host: {get_param: [ServiceNetMap, OctaviaApiNetwork]}
+ step_config: |
+ include tripleo::profile::base::octavia::api
+ service_config_settings:
+ keystone:
+ octavia::keystone::auth::tenant: 'service'
+ octavia::keystone::auth::public_url: {get_param: [EndpointMap, OctaviaPublic, uri]}
+ octavia::keystone::auth::internal_url: { get_param: [ EndpointMap, OctaviaInternal, uri ] }
+ octavia::keystone::auth::admin_url: { get_param: [ EndpointMap, OctaviaAdmin, uri ] }
+ octavia::keystone::auth::password: {get_param: OctaviaPassword}
+ octavia::keystone::auth::region: {get_param: KeystoneRegion}
+ mysql:
+ octavia::db::mysql::password: {get_param: OctaviaPassword}
+ octavia::db::mysql::user: octavia
+ octavia::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ octavia::db::mysql::dbname: octavia
+ octavia::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/octavia-base.yaml b/puppet/services/octavia-base.yaml
new file mode 100644
index 00000000..b537a2bc
--- /dev/null
+++ b/puppet/services/octavia-base.yaml
@@ -0,0 +1,62 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Octavia base service. Shared for all Octavia services
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ Debug:
+ type: string
+ default: ''
+ description: Set to True to enable debugging on all services.
+ EnableConfigPurge:
+ type: boolean
+ default: true
+ description: >
+ Remove configuration that is not generated by TripleO. Setting
+ to false may result in configuration remnants after updates/upgrades.
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+
+outputs:
+ role_data:
+ description: Base role data for Octavia services
+ value:
+ service_name: octavia_base
+ config_settings:
+ octavia::debug: {get_param: Debug}
+ octavia::purge_config: {get_param: EnableConfigPurge}
+ octavia::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ tripleo::profile::base::octavia::rabbit_user: {get_param: RabbitUserName}
+ tripleo::profile::base::octavia::rabbit_password: {get_param: RabbitPassword}
+ tripleo::profile::base::octavia::rabbit_port: {get_param: RabbitClientPort}
+
diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml
index 318c898e..0ed9d206 100644
--- a/puppet/services/opendaylight-api.yaml
+++ b/puppet/services/opendaylight-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenDaylight SDN Controller.
@@ -17,10 +17,6 @@ parameters:
type: string
description: The password for the opendaylight server.
hidden: true
- OpenDaylightEnableL3:
- description: Knob to enable/disable ODL L3
- type: string
- default: 'no'
OpenDaylightEnableDHCP:
description: Knob to enable/disable ODL DHCP Server
type: boolean
@@ -56,9 +52,14 @@ outputs:
opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
opendaylight::username: {get_param: OpenDaylightUsername}
opendaylight::password: {get_param: OpenDaylightPassword}
- opendaylight::enable_l3: {get_param: OpenDaylightEnableL3}
opendaylight::extra_features: {get_param: OpenDaylightFeatures}
opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
- opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpenDaylightApiNetwork]}
+ opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
+ tripleo.opendaylight_api.firewall_rules:
+ '137 opendaylight api':
+ dport:
+ - {get_param: OpenDaylightPort}
+ - 6640
+ - 6653
step_config: |
include tripleo::profile::base::neutron::opendaylight
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index 268ca244..cfec3c48 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenDaylight OVS Configuration.
@@ -8,6 +8,15 @@ parameters:
default: 8081
description: Set opendaylight service port
type: number
+ OpenDaylightUsername:
+ default: 'admin'
+ description: The username for the opendaylight server.
+ type: string
+ OpenDaylightPassword:
+ default: 'admin'
+ type: string
+ description: The password for the opendaylight server.
+ hidden: true
OpenDaylightConnectionProtocol:
description: L7 protocol used for REST access
type: string
@@ -46,6 +55,8 @@ outputs:
service_name: opendaylight_ovs
config_settings:
opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
+ opendaylight::username: {get_param: OpenDaylightUsername}
+ opendaylight::password: {get_param: OpenDaylightPassword}
opendaylight_check_url: {get_param: OpenDaylightCheckURL}
opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
@@ -54,5 +65,11 @@ outputs:
template: MAPPINGS
params:
MAPPINGS: {get_param: OpenDaylightProviderMappings}
+ tripleo.opendaylight_ovs.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '136 neutron gre networks':
+ proto: 'gre'
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
diff --git a/puppet/services/ovn-dbs.yaml b/puppet/services/ovn-dbs.yaml
new file mode 100644
index 00000000..7f81afde
--- /dev/null
+++ b/puppet/services/ovn-dbs.yaml
@@ -0,0 +1,40 @@
+heat_template_version: ocata
+
+description: >
+ OVN databases configured with puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OVNNorthboundServerPort:
+ description: Port of the OVN Northbound DB server
+ type: number
+ default: 6641
+ OVNSouthboundServerPort:
+ description: Port of the OVN Southbound DB server
+ type: number
+ default: 6642
+
+outputs:
+ role_data:
+ description: Role data for the OVN northd service
+ value:
+ service_name: ovn_dbs
+ config_settings:
+ ovn::northbound::port: {get_param: OVNNorthboundServerPort}
+ ovn::southbound::port: {get_param: OVNSouthboundServerPort}
+ ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]}
+ step_config: |
+ include ::tripleo::profile::base::neutron::ovn_northd
diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml
index abfb9c80..a8a9fb99 100644
--- a/puppet/services/pacemaker.yaml
+++ b/puppet/services/pacemaker.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Pacemaker service configured with Puppet
@@ -29,6 +29,11 @@ parameters:
default: false
description: Whether to enable fencing in Pacemaker or not.
type: boolean
+ PacemakerRemoteAuthkey:
+ type: string
+ description: The authkey for the pacemaker remote service.
+ hidden: true
+ default: ''
PcsdPassword:
type: string
description: The password for the 'pcsd' user for pacemaker.
@@ -112,5 +117,6 @@ outputs:
passwords:
- {get_param: PcsdPassword}
- {get_param: [DefaultPasswords, pcsd_password]}
+ tripleo::profile::base::pacemaker::remote_authkey: {get_param: PacemakerRemoteAuthkey}
step_config: |
include ::tripleo::profile::base::pacemaker
diff --git a/puppet/services/pacemaker/ceilometer-agent-central.yaml b/puppet/services/pacemaker/ceilometer-agent-central.yaml
deleted file mode 100644
index 5dcb62ca..00000000
--- a/puppet/services/pacemaker/ceilometer-agent-central.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Ceilometer Central Agent service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- MonitoringSubscriptionCeilometerCentral:
- default: 'overcloud-ceilometer-agent-central'
- type: string
-
-resources:
- CeilometerServiceBase:
- type: ../ceilometer-agent-central.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Ceilometer Central Agent pacemaker role.
- value:
- service_name: ceilometer_agent_central
- monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCentral}
- config_settings:
- map_merge:
- - get_attr: [CeilometerServiceBase, role_data, config_settings]
- - ceilometer::agent::central::manage_service: false
- ceilometer::agent::central::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::ceilometer::agent::central
diff --git a/puppet/services/pacemaker/ceilometer-agent-notification.yaml b/puppet/services/pacemaker/ceilometer-agent-notification.yaml
deleted file mode 100644
index dbe14499..00000000
--- a/puppet/services/pacemaker/ceilometer-agent-notification.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Ceilometer Notification Agent service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- MonitoringSubscriptionCeilometerNotification:
- default: 'overcloud-ceilometer-agent-notification'
- type: string
-
-resources:
- CeilometerServiceBase:
- type: ../ceilometer-agent-notification.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Ceilometer Notification Agent pacemaker role.
- value:
- service_name: ceilometer_agent_notification
- monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerNotification}
- config_settings:
- map_merge:
- - get_attr: [CeilometerServiceBase, role_data, config_settings]
- - ceilometer::agent::notification::manage_service: false
- ceilometer::agent::notification::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::ceilometer::agent::notification
diff --git a/puppet/services/pacemaker/ceilometer-collector.yaml b/puppet/services/pacemaker/ceilometer-collector.yaml
deleted file mode 100644
index 4c919515..00000000
--- a/puppet/services/pacemaker/ceilometer-collector.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Ceilometer Collector service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- MonitoringSubscriptionCeilometerCollector:
- default: 'overcloud-ceilometer-collector'
- type: string
-
-resources:
- CeilometerServiceBase:
- type: ../ceilometer-collector.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Ceilometer Collector pacemaker role.
- value:
- service_name: ceilometer_collector
- monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCollector}
- config_settings:
- map_merge:
- - get_attr: [CeilometerServiceBase, role_data, config_settings]
- - ceilometer::collector::manage_service: false
- ceilometer::collector::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::ceilometer::collector
diff --git a/puppet/services/pacemaker/gnocchi-api.yaml b/puppet/services/pacemaker/ceph-rbdmirror.yaml
index 6a9161fa..7686028d 100644
--- a/puppet/services/pacemaker/gnocchi-api.yaml
+++ b/puppet/services/pacemaker/ceph-rbdmirror.yaml
@@ -1,7 +1,7 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- Gnocchi service configured with Puppet
+ Ceph RBD mirror service.
parameters:
ServiceNetMap:
@@ -18,13 +18,13 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- MonitoringSubscriptionGnocchiApi:
- default: 'overcloud-gnocchi-api'
+ CephClientUserName:
+ default: openstack
type: string
resources:
- GnocchiServiceBase:
- type: ../gnocchi-api.yaml
+ CephBase:
+ type: ../ceph-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
@@ -32,14 +32,16 @@ resources:
outputs:
role_data:
- description: Role data for the Gnocchi role.
+ description: Role data for the Ceph RBD mirrror service.
value:
- service_name: gnocchi_api
- monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiApi}
+ service_name: ceph_rbdmirror
config_settings:
map_merge:
- - get_attr: [GnocchiServiceBase, role_data, config_settings]
- - gnocchi::metricd::manage_service: false
- gnocchi::metricd::enabled: false
+ - get_attr: [CephBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::ceph::rbdmirror::client_name: {get_param: CephClientUserName}
+ tripleo.ceph_rbdmirror.firewall_rules:
+ '113 ceph_rbdmirror':
+ dport:
+ - '6800-7300'
step_config: |
- include ::tripleo::profile::pacemaker::gnocchi::api
+ include ::tripleo::profile::pacemaker::ceph::rbdmirror \ No newline at end of file
diff --git a/puppet/services/pacemaker/cinder-api.yaml b/puppet/services/pacemaker/cinder-api.yaml
deleted file mode 100644
index 6823789e..00000000
--- a/puppet/services/pacemaker/cinder-api.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Cinder API service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- CinderApiBase:
- type: ../cinder-api.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Cinder API role.
- value:
- service_name: cinder_api
- monitoring_subscription: {get_attr: [CinderApiBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [CinderApiBase, role_data, logging_source]}
- logging_groups: {get_attr: [CinderApiBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [CinderApiBase, role_data, config_settings]
- - cinder::api::manage_service: false
- cinder::api::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::cinder::api
diff --git a/puppet/services/pacemaker/cinder-backup.yaml b/puppet/services/pacemaker/cinder-backup.yaml
index 2ebc7680..e75ac973 100644
--- a/puppet/services/pacemaker/cinder-backup.yaml
+++ b/puppet/services/pacemaker/cinder-backup.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder Backup service with Pacemaker configured with Puppet
diff --git a/puppet/services/pacemaker/cinder-scheduler.yaml b/puppet/services/pacemaker/cinder-scheduler.yaml
deleted file mode 100644
index 15e44be2..00000000
--- a/puppet/services/pacemaker/cinder-scheduler.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Cinder Scheduler service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- CinderSchedulerBase:
- type: ../cinder-scheduler.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Cinder Scheduler role.
- value:
- service_name: cinder_scheduler
- monitoring_subscription: {get_attr: [CinderSchedulerBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [CinderSchedulerBase, role_data, logging_source]}
- logging_groups: {get_attr: [CinderSchedulerBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [CinderSchedulerBase, role_data, config_settings]
- - cinder::scheduler::manage_service: false
- cinder::scheduler::enabled: false
- step_config:
- include ::tripleo::profile::pacemaker::cinder::scheduler
diff --git a/puppet/services/pacemaker/cinder-volume.yaml b/puppet/services/pacemaker/cinder-volume.yaml
index d91a0181..bef47a57 100644
--- a/puppet/services/pacemaker/cinder-volume.yaml
+++ b/puppet/services/pacemaker/cinder-volume.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder Volume service with Pacemaker configured with Puppet
diff --git a/puppet/services/pacemaker/database/mongodb.yaml b/puppet/services/pacemaker/database/mongodb.yaml
deleted file mode 100644
index 982b6064..00000000
--- a/puppet/services/pacemaker/database/mongodb.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- MongoDb service deployment using puppet
-
-parameters:
- #Parameters not used EndpointMap
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
- MongoDbBase:
- type: ../../database/mongodb.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Service mongodb using composable services.
- value:
- service_name: mongodb
- config_settings:
- map_merge:
- - get_attr: [MongoDbBase, role_data, config_settings]
- - tripleo::profile::pacemaker::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]}
- mongodb::server::service_manage: False
- step_config: |
- include ::tripleo::profile::pacemaker::database::mongodb
diff --git a/puppet/services/pacemaker/database/mysql.yaml b/puppet/services/pacemaker/database/mysql.yaml
index 7deaf0ca..93bf5967 100644
--- a/puppet/services/pacemaker/database/mysql.yaml
+++ b/puppet/services/pacemaker/database/mysql.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
MySQL with Pacemaker service deployment using puppet
@@ -40,7 +40,7 @@ outputs:
- tripleo::profile::pacemaker::database::mysql::bind_address:
str_replace:
template:
- '"%{::fqdn_$NETWORK}"'
+ "%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
# NOTE: bind IP is found in Heat replacing the network name with the
@@ -53,3 +53,9 @@ outputs:
get_param: [ServiceNetMap, MysqlNetwork]
step_config: |
include ::tripleo::profile::pacemaker::database::mysql
+ metadata_settings:
+ get_attr: [MysqlBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: Check for galera root password
+ tags: step0
+ file: path=/root/.my.cnf state=file
diff --git a/puppet/services/pacemaker/database/redis.yaml b/puppet/services/pacemaker/database/redis.yaml
index 196754eb..e702d28b 100644
--- a/puppet/services/pacemaker/database/redis.yaml
+++ b/puppet/services/pacemaker/database/redis.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Redis service configured with Puppet
diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml
deleted file mode 100644
index 20a439f6..00000000
--- a/puppet/services/pacemaker/glance-api.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Glance API service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- GlanceFilePcmkDevice:
- default: ''
- description: >
- An exported storage device that should be mounted by Pacemaker
- as Glance storage. Effective when GlanceFilePcmkManage is true.
- type: string
- GlanceFilePcmkFstype:
- default: 'nfs'
- description: >
- Filesystem type for Pacemaker mount used as Glance storage.
- Effective when GlanceFilePcmkManage is true.
- type: string
- GlanceFilePcmkManage:
- default: false
- description: >
- Whether to make Glance file backend a mount managed by Pacemaker.
- Effective when GlanceBackend is 'file'.
- type: boolean
- GlanceFilePcmkOptions:
- default: ''
- description: >
- Mount options for Pacemaker mount used as Glance storage.
- Effective when GlanceFilePcmkManage is true.
- type: string
-
-resources:
-
- GlanceApiBase:
- type: ../glance-api.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Glance role.
- value:
- service_name: glance_api
- monitoring_subscription: {get_attr: [GlanceApiBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [GlanceApiBase, role_data, logging_source]}
- logging_groups: {get_attr: [GlanceApiBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [GlanceApiBase, role_data, config_settings]
- - glance_file_pcmk_device: {get_param: GlanceFilePcmkDevice}
- glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype}
- glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage}
- glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions}
- glance_file_pcmk_directory: '/var/lib/glance/images'
- glance::api::manage_service: false
- glance::api::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::glance
diff --git a/puppet/services/pacemaker/glance-registry.yaml b/puppet/services/pacemaker/glance-registry.yaml
deleted file mode 100644
index 41f89fdd..00000000
--- a/puppet/services/pacemaker/glance-registry.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Glance Registry service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- GlanceRegistryBase:
- type: ../glance-registry.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Glance role.
- value:
- service_name: glance_registry
- monitoring_subscription: {get_attr: [GlanceRegistryBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [GlanceRegistryBase, role_data, logging_source]}
- logging_groups: {get_attr: [GlanceRegistryBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [GlanceRegistryBase, role_data, config_settings]
- - glance::registry::manage_service: false
- glance::registry::enabled: false
- # No puppet manifests since glance-registry is included in
- # ::tripleo::profile::pacemaker::glance which is maintained alongside of
- # pacemaker/glance-api.yaml.
- step_config:
diff --git a/puppet/services/pacemaker/gnocchi-metricd.yaml b/puppet/services/pacemaker/gnocchi-metricd.yaml
deleted file mode 100644
index 0f36b5d5..00000000
--- a/puppet/services/pacemaker/gnocchi-metricd.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Gnocchi service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- MonitoringSubscriptionGnocchiMetricd:
- default: 'overcloud-gnocchi-metricd'
- type: string
-
-resources:
- GnocchiServiceBase:
- type: ../gnocchi-metricd.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Gnocchi role.
- value:
- service_name: gnocchi_metricd
- monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiMetricd}
- config_settings:
- map_merge:
- - get_attr: [GnocchiServiceBase, role_data, config_settings]
- - gnocchi::metricd::manage_service: false
- gnocchi::metricd::enabled: false
- tripleo::profile::pacemaker::gnocchi::gnocchi_indexer_backend: {get_attr: [GnocchiServiceBase, aux_parameters, gnocchi_indexer_backend]}
-
- step_config: |
- include ::tripleo::profile::pacemaker::gnocchi::metricd
diff --git a/puppet/services/pacemaker/gnocchi-statsd.yaml b/puppet/services/pacemaker/gnocchi-statsd.yaml
deleted file mode 100644
index b9afc590..00000000
--- a/puppet/services/pacemaker/gnocchi-statsd.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Gnocchi service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- MonitoringSubscriptionGnocchiStatsd:
- default: 'overcloud-gnocchi-statsd'
- type: string
-
-resources:
- GnocchiServiceBase:
- type: ../gnocchi-statsd.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Gnocchi role.
- value:
- service_name: gnocchi_statsd
- monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiStatsd}
- config_settings:
- map_merge:
- - get_attr: [GnocchiServiceBase, role_data, config_settings]
- - gnocchi::statsd::manage_service: false
- gnocchi::statsd::enabled: false
- tripleo::profile::pacemaker::gnocchi::gnocchi_indexer_backend: {get_attr: [GnocchiServiceBase, aux_parameters, gnocchi_indexer_backend]}
- step_config: |
- include ::tripleo::profile::pacemaker::gnocchi::statsd
diff --git a/puppet/services/pacemaker/haproxy.yaml b/puppet/services/pacemaker/haproxy.yaml
index 52104a71..598deaef 100644
--- a/puppet/services/pacemaker/haproxy.yaml
+++ b/puppet/services/pacemaker/haproxy.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
HAproxy service with Pacemaker configured with Puppet
@@ -38,7 +38,7 @@ outputs:
- get_attr: [LoadbalancerServiceBase, role_data, config_settings]
- tripleo::haproxy::haproxy_service_manage: false
tripleo::haproxy::mysql_clustercheck: true
- enable_keepalived: false
- tripleo::haproxy::keepalived: false
step_config: |
include ::tripleo::profile::pacemaker::haproxy
+ metadata_settings:
+ get_attr: [LoadbalancerServiceBase, role_data, metadata_settings]
diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml
deleted file mode 100644
index dd25905b..00000000
--- a/puppet/services/pacemaker/heat-api-cfn.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Openstack Heat CloudFormation API service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
- HeatApiCfnBase:
- type: ../heat-api-cfn.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Heat CloudFormation API role.
- value:
- service_name: heat_api_cfn
- monitoring_subscription: {get_attr: [HeatApiCfnBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [HeatApiCfnBase, role_data, logging_source]}
- logging_groups: {get_attr: [HeatApiCfnBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [HeatApiCfnBase, role_data, config_settings]
- - heat::api_cfn::manage_service: false
- heat::api_cfn::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::heat::api_cfn
diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
deleted file mode 100644
index 18d2a0d5..00000000
--- a/puppet/services/pacemaker/heat-api-cloudwatch.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Openstack Heat CloudWatch API service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
- HeatApiCloudwatchBase:
- type: ../heat-api-cloudwatch.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Heat Cloudwatch API role.
- value:
- service_name: heat_api_cloudwatch
- monitoring_subscription: {get_attr: [HeatApiCloudwatchBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [HeatApiCloudwatchBase, role_data, logging_source]}
- logging_groups: {get_attr: [HeatApiCloudwatchBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [HeatApiCloudwatchBase, role_data, config_settings]
- - heat::api_cloudwatch::manage_service: false
- heat::api_cloudwatch::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::heat::api_cloudwatch
diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml
deleted file mode 100644
index 43122cb0..00000000
--- a/puppet/services/pacemaker/heat-api.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Openstack Heat API service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
- HeatApiBase:
- type: ../heat-api.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Heat API role.
- value:
- service_name: heat_api
- monitoring_subscription: {get_attr: [HeatApiBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [HeatApiBase, role_data, logging_source]}
- logging_groups: {get_attr: [HeatApiBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [HeatApiBase, role_data, config_settings]
- - heat::api::manage_service: false
- heat::api::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::heat::api
diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml
deleted file mode 100644
index 54bfdad2..00000000
--- a/puppet/services/pacemaker/heat-engine.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Openstack Heat Engine service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
- HeatEngineBase:
- type: ../heat-engine.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-
-outputs:
- role_data:
- description: Role data for the Heat engine role.
- value:
- service_name: heat_engine
- monitoring_subscription: {get_attr: [HeatEngineBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [HeatEngineBase, role_data, logging_source]}
- logging_groups: {get_attr: [HeatEngineBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [HeatEngineBase, role_data, config_settings]
- - heat::engine::manage_service: false
- heat::engine::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::heat::engine
diff --git a/puppet/services/pacemaker/horizon.yaml b/puppet/services/pacemaker/horizon.yaml
deleted file mode 100644
index 18de23ae..00000000
--- a/puppet/services/pacemaker/horizon.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Horizon service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- HorizonBase:
- type: ../horizon.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Horizon role.
- value:
- service_name: horizon
- monitoring_subscription: {get_attr: [HorizonBase, role_data, monitoring_subscription]}
- config_settings:
- get_attr: [HorizonBase, role_data, config_settings]
- step_config: |
- include ::tripleo::profile::base::horizon
- include ::tripleo::profile::pacemaker::apache
diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml
deleted file mode 100644
index 908b9bbd..00000000
--- a/puppet/services/pacemaker/keystone.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Keystone service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- KeystoneServiceBase:
- type: ../keystone.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Keystone pacemaker role.
- value:
- service_name: keystone
- monitoring_subscription: {get_attr: [KeystoneServiceBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [KeystoneServiceBase, role_data, logging_source]}
- logging_groups: {get_attr: [KeystoneServiceBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [KeystoneServiceBase, role_data, config_settings]
- - keystone::manage_service: false
- keystone::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::keystone
diff --git a/puppet/services/pacemaker/manila-share.yaml b/puppet/services/pacemaker/manila-share.yaml
index cabc31a0..ddc13df3 100644
--- a/puppet/services/pacemaker/manila-share.yaml
+++ b/puppet/services/pacemaker/manila-share.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
The manila-share service with Pacemaker configured with Puppet
diff --git a/puppet/services/pacemaker/memcached.yaml b/puppet/services/pacemaker/memcached.yaml
deleted file mode 100644
index 04b895b6..00000000
--- a/puppet/services/pacemaker/memcached.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Mecached service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- MemcachedServiceBase:
- type: ../memcached.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Memcached pacemaker role.
- value:
- service_name: memcached
- monitoring_subscription: {get_attr: [MemcachedServiceBase, role_data, monitoring_subscription]}
- config_settings:
- map_merge:
- - get_attr: [MemcachedServiceBase, role_data, config_settings]
- - memcached::service_manage: false
- step_config: |
- include ::tripleo::profile::pacemaker::memcached
diff --git a/puppet/services/pacemaker/neutron-dhcp.yaml b/puppet/services/pacemaker/neutron-dhcp.yaml
deleted file mode 100644
index 7fca73d6..00000000
--- a/puppet/services/pacemaker/neutron-dhcp.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron DHCP service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronDhcpBase:
- type: ../neutron-dhcp.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron DHCP role.
- value:
- service_name: neutron_dhcp
- monitoring_subscription: {get_attr: [NeutronDhcpBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NeutronDhcpBase, role_data, logging_source]}
- logging_groups: {get_attr: [NeutronDhcpBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NeutronDhcpBase, role_data, config_settings]
- - tripleo::profile::pacemaker::neutron::enable_dhcp: True
- neutron::agents::dhcp::enabled: false
- neutron::agents::dhcp::manage_service: false
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::dhcp
diff --git a/puppet/services/pacemaker/neutron-l3.yaml b/puppet/services/pacemaker/neutron-l3.yaml
deleted file mode 100644
index cdb87f50..00000000
--- a/puppet/services/pacemaker/neutron-l3.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron L3 service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronL3Base:
- type: ../neutron-l3.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron L3 role.
- value:
- service_name: neutron_l3
- monitoring_subscription: {get_attr: [NeutronL3Base, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NeutronL3Base, role_data, logging_source]}
- logging_groups: {get_attr: [NeutronL3Base, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NeutronL3Base, role_data, config_settings]
- - tripleo::profile::pacemaker::neutron::enable_l3: True
- neutron::agents::l3::enabled: false
- neutron::agents::l3::manage_service: false
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::l3
diff --git a/puppet/services/pacemaker/neutron-metadata.yaml b/puppet/services/pacemaker/neutron-metadata.yaml
deleted file mode 100644
index 49a31eb5..00000000
--- a/puppet/services/pacemaker/neutron-metadata.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron Metadata service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronMetadataBase:
- type: ../neutron-metadata.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron Metadata role.
- value:
- service_name: neutron_metadata
- monitoring_subscription: {get_attr: [NeutronMetadataBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NeutronMetadataBase, role_data, logging_source]}
- logging_groups: {get_attr: [NeutronMetadataBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NeutronMetadataBase, role_data, config_settings]
- - tripleo::profile::pacemaker::neutron::enable_metadata: True
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::metadata
diff --git a/puppet/services/pacemaker/neutron-midonet.yaml b/puppet/services/pacemaker/neutron-midonet.yaml
deleted file mode 100644
index fdd5dafb..00000000
--- a/puppet/services/pacemaker/neutron-midonet.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron Midonet with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronMidonetBase:
- type: ../neutron-midonet.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron Midonet plugin.
- value:
- service_name: neutron_midonet
- monitoring_subscription: {get_attr: [NeutronMidonetBase, role_data, monitoring_subscription]}
- config_settings:
- map_merge:
- - get_attr: [NeutronMidonetBase, role_data, config_settings]
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::plugins::midonet
diff --git a/puppet/services/pacemaker/neutron-ovs-agent.yaml b/puppet/services/pacemaker/neutron-ovs-agent.yaml
deleted file mode 100644
index a2bd7c83..00000000
--- a/puppet/services/pacemaker/neutron-ovs-agent.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron OVS agent with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronOvsBase:
- type: ../neutron-ovs-agent.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron OVS agent service.
- value:
- service_name: neutron_ovs_agent
- monitoring_subscription: {get_attr: [NeutronOvsBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NeutronOvsBase, role_data, logging_source]}
- logging_groups: {get_attr: [NeutronOvsBase, role_data, logging_groups]}
- config_settings:
- get_attr: [NeutronOvsBase, role_data, config_settings]
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::ovs
diff --git a/puppet/services/pacemaker/neutron-plugin-ml2.yaml b/puppet/services/pacemaker/neutron-plugin-ml2.yaml
deleted file mode 100644
index 234f116e..00000000
--- a/puppet/services/pacemaker/neutron-plugin-ml2.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron ML2 Plugin with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronMl2Base:
- type: ../neutron-plugin-ml2.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron ML2 plugin.
- value:
- service_name: neutron_plugin_ml2
- config_settings:
- map_merge:
- - get_attr: [NeutronMl2Base, role_data, config_settings]
- - neutron::agents::ml2::ovs::enabled: false
- neutron::agents::ml2::ovs::manage_service: false
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::plugins::ml2
diff --git a/puppet/services/pacemaker/neutron-plugin-nuage.yaml b/puppet/services/pacemaker/neutron-plugin-nuage.yaml
deleted file mode 100644
index 9fca2cc3..00000000
--- a/puppet/services/pacemaker/neutron-plugin-nuage.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron Nuage Plugin with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronPluginNuageBase:
- type: ../neutron-plugin-nuage.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron Nuage plugin.
- value:
- service_name: neutron_plugin_nuage
- config_settings:
- map_merge:
- - get_attr: [NeutronPluginNuageBase, role_data, config_settings]
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::plugins::nuage
diff --git a/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml b/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml
deleted file mode 100644
index 80d6ed92..00000000
--- a/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron OpenContrail Plugin with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronPluginOpenContrail:
- type: ../neutron-plugin-nuage.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron OpenContrail plugin.
- value:
- service_name: neutron_plugin_opencontrail
- config_settings:
- map_merge:
- - get_attr: [NeutronPluginOpenContrail, role_data, config_settings]
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::plugins::opencontrail
diff --git a/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml b/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml
deleted file mode 100644
index 5dd4e588..00000000
--- a/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron PLUMgrid Plugin with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronPluginPlumgridBase:
- type: ../neutron-plugin-ml2.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron PLUMgrid plugin.
- value:
- service_name: neutron_plugin_plumgrid
- config_settings:
- map_merge:
- - get_attr: [NeutronPluginPlumgridBase, role_data, config_settings]
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::plugins::plumgrid
diff --git a/puppet/services/pacemaker/neutron-server.yaml b/puppet/services/pacemaker/neutron-server.yaml
deleted file mode 100644
index 33bc2d99..00000000
--- a/puppet/services/pacemaker/neutron-server.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron Server with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- NeutronL3HA:
- default: true
- description: Whether to enable HA for virtual routers
- type: boolean
-
-resources:
-
- NeutronServerBase:
- type: ../neutron-server.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron Server.
- value:
- service_name: neutron_server
- monitoring_subscription: {get_attr: [NeutronServerBase, role_data, monitoring_subscription]}
- config_settings:
- map_merge:
- - get_attr: [NeutronServerBase, role_data, config_settings]
- - neutron::server::enabled: false
- neutron::server::manage_service: false
- neutron::server::l3_ha: {get_param: NeutronL3HA}
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::server
diff --git a/puppet/services/pacemaker/nova-api.yaml b/puppet/services/pacemaker/nova-api.yaml
deleted file mode 100644
index b86e438a..00000000
--- a/puppet/services/pacemaker/nova-api.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Nova API service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NovaApiBase:
- type: ../nova-api.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Nova API role.
- value:
- service_name: nova_api
- monitoring_subscription: {get_attr: [NovaApiBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NovaApiBase, role_data, logging_source]}
- logging_groups: {get_attr: [NovaApiBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NovaApiBase, role_data, config_settings]
- - nova::api::manage_service: false
- nova::api::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::nova::api
diff --git a/puppet/services/pacemaker/nova-conductor.yaml b/puppet/services/pacemaker/nova-conductor.yaml
deleted file mode 100644
index a0a766ec..00000000
--- a/puppet/services/pacemaker/nova-conductor.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Nova Conductor service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NovaConductorBase:
- type: ../nova-conductor.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Nova Conductor role.
- value:
- service_name: nova_conductor
- monitoring_subscription: {get_attr: [NovaConductorBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NovaConductorBase, role_data, logging_source]}
- logging_groups: {get_attr: [NovaConductorBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NovaConductorBase, role_data, config_settings]
- - nova::conductor::manage_service: false
- nova::conductor::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::nova::conductor
diff --git a/puppet/services/pacemaker/nova-consoleauth.yaml b/puppet/services/pacemaker/nova-consoleauth.yaml
deleted file mode 100644
index 5d51eb47..00000000
--- a/puppet/services/pacemaker/nova-consoleauth.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Nova Consoleauth service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NovaConsoleauthBase:
- type: ../nova-consoleauth.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Nova Consoleauth role.
- value:
- service_name: nova_consoleauth
- monitoring_subscription: {get_attr: [NovaConsoleauthBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NovaConsoleauthBase, role_data, logging_source]}
- logging_groups: {get_attr: [NovaConsoleauthBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NovaConsoleauthBase, role_data, config_settings]
- - nova::consoleauth::manage_service: false
- nova::consoleauth::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::nova::consoleauth
diff --git a/puppet/services/pacemaker/nova-scheduler.yaml b/puppet/services/pacemaker/nova-scheduler.yaml
deleted file mode 100644
index 8828ee11..00000000
--- a/puppet/services/pacemaker/nova-scheduler.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Nova Scheduler service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NovaSchedulerBase:
- type: ../nova-scheduler.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Nova Scheduler role.
- value:
- service_name: nova_scheduler
- monitoring_subscription: {get_attr: [NovaSchedulerBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NovaSchedulerBase, role_data, logging_source]}
- logging_groups: {get_attr: [NovaSchedulerBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NovaSchedulerBase, role_data, config_settings]
- - nova::scheduler::manage_service: false
- nova::scheduler::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::nova::scheduler
diff --git a/puppet/services/pacemaker/nova-vnc-proxy.yaml b/puppet/services/pacemaker/nova-vnc-proxy.yaml
deleted file mode 100644
index ebe84a03..00000000
--- a/puppet/services/pacemaker/nova-vnc-proxy.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Nova Vncproxy service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NovaVncproxyBase:
- type: ../nova-vnc-proxy.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Nova Vncproxy role.
- value:
- service_name: nova_vnc_proxy
- monitoring_subscription: {get_attr: [NovaVncproxyBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NovaVncproxyBase, role_data, logging_source]}
- logging_groups: {get_attr: [NovaVncproxyBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NovaVncproxyBase, role_data, config_settings]
- - nova::vncproxy::manage_service: false
- nova::vncproxy::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::nova::vncproxy
diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml
index f3fa2d28..03c2c83f 100644
--- a/puppet/services/pacemaker/rabbitmq.yaml
+++ b/puppet/services/pacemaker/rabbitmq.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
RabbitMQ service with Pacemaker configured with Puppet
diff --git a/puppet/services/pacemaker/sahara-api.yaml b/puppet/services/pacemaker/sahara-api.yaml
deleted file mode 100644
index 3dfb7d94..00000000
--- a/puppet/services/pacemaker/sahara-api.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Sahara API service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- SaharaApiBase:
- type: ../sahara-api.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Sahara API role.
- value:
- service_name: sahara_api
- monitoring_subscription: {get_attr: [SaharaApiBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [SaharaApiBase, role_data, logging_source]}
- logging_groups: {get_attr: [SaharaApiBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [SaharaApiBase, role_data, config_settings]
- - sahara::service::api::manage_service: false
- sahara::service::api::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::sahara::api
diff --git a/puppet/services/pacemaker/sahara-engine.yaml b/puppet/services/pacemaker/sahara-engine.yaml
deleted file mode 100644
index a06d11b3..00000000
--- a/puppet/services/pacemaker/sahara-engine.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Sahara Engine service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- SaharaEngineBase:
- type: ../sahara-engine.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Sahara Engine role.
- value:
- service_name: sahara_engine
- monitoring_subscription: {get_attr: [SaharaEngineBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [SaharaEngineBase, role_data, logging_source]}
- logging_groups: {get_attr: [SaharaEngineBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [SaharaEngineBase, role_data, config_settings]
- - sahara::service::engine::manage_service: false
- sahara::service::engine::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::sahara::engine
diff --git a/puppet/services/pacemaker_remote.yaml b/puppet/services/pacemaker_remote.yaml
new file mode 100644
index 00000000..daee43e6
--- /dev/null
+++ b/puppet/services/pacemaker_remote.yaml
@@ -0,0 +1,57 @@
+heat_template_version: ocata
+
+description: >
+ Pacemaker remote service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ PacemakerRemoteAuthkey:
+ type: string
+ description: The authkey for the pacemaker remote service.
+ hidden: true
+ default: ''
+ MonitoringSubscriptionPacemakerRemote:
+ default: 'overcloud-pacemaker_remote'
+ type: string
+ PacemakerRemoteLoggingSource:
+ type: json
+ default:
+ tag: system.pacemaker_remote
+ path: /var/log/pacemaker.log
+ format: >-
+ /^(?<time>[^ ]*\s*[^ ]* [^ ]*)
+ \[(?<pid>[^ ]*)\]
+ (?<host>[^ ]*)
+ (?<message>.*)$/
+
+outputs:
+ role_data:
+ description: Role data for the Pacemaker remote role.
+ value:
+ service_name: pacemaker_remote
+ monitoring_subscription: {get_param: MonitoringSubscriptionPacemakerRemote}
+ logging_groups:
+ - haclient
+ logging_source: {get_param: PacemakerRemoteLoggingSource}
+ config_settings:
+ tripleo.pacemaker_remote.firewall_rules:
+ '130 pacemaker_remote tcp':
+ proto: 'tcp'
+ dport:
+ - 3121
+ tripleo::profile::base::pacemaker_remote::remote_authkey: {get_param: PacemakerRemoteAuthkey}
+ step_config: |
+ include ::tripleo::profile::base::pacemaker_remote
diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml
new file mode 100644
index 00000000..4b74ad45
--- /dev/null
+++ b/puppet/services/panko-api.yaml
@@ -0,0 +1,86 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Panko API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MonitoringSubscriptionPankoApi:
+ default: 'overcloud-ceilometer-panko-api'
+ type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+resources:
+ PankoBase:
+ type: ./panko-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
+outputs:
+ role_data:
+ description: Role data for the Panko API service.
+ value:
+ service_name: panko_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionPankoApi}
+ config_settings:
+ map_merge:
+ - get_attr: [PankoBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - panko::wsgi::apache::ssl: {get_param: EnableInternalTLS}
+ panko::wsgi::apache::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, PankoApiNetwork]}
+ panko::api::service_name: 'httpd'
+ panko::api::enable_proxy_headers_parsing: true
+ tripleo.panko_api.firewall_rules:
+ '140 panko-api':
+ dport:
+ - 8779
+ - 13779
+ panko::api::host:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, PankoApiNetwork]}
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ panko::wsgi::apache::bind_host: {get_param: [ServiceNetMap, PankoApiNetwork]}
+ service_config_settings:
+ get_attr: [PankoBase, role_data, service_config_settings]
+ step_config: |
+ include tripleo::profile::base::panko::api
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
diff --git a/puppet/services/panko-base.yaml b/puppet/services/panko-base.yaml
new file mode 100644
index 00000000..6e25d796
--- /dev/null
+++ b/puppet/services/panko-base.yaml
@@ -0,0 +1,75 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Panko service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ PankoPassword:
+ description: The password for the panko services.
+ type: string
+ hidden: true
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+
+outputs:
+ role_data:
+ description: Role data for the Panko role.
+ value:
+ service_name: panko_base
+ config_settings:
+ panko::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://panko:'
+ - {get_param: PankoPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/panko'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
+ panko::debug: {get_param: Debug}
+ panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
+ panko::keystone::authtoken::project_name: 'service'
+ panko::keystone::authtoken::password: {get_param: PankoPassword}
+ panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ panko::auth::auth_password: {get_param: PankoPassword}
+ panko::auth::auth_region: 'regionOne'
+ panko::auth::auth_tenant_name: 'service'
+ service_config_settings:
+ keystone:
+ panko::keystone::auth::public_url: {get_param: [EndpointMap, PankoPublic, uri]}
+ panko::keystone::auth::internal_url: {get_param: [EndpointMap, PankoInternal, uri]}
+ panko::keystone::auth::admin_url: {get_param: [EndpointMap, PankoAdmin, uri]}
+ panko::keystone::auth::password: {get_param: PankoPassword}
+ panko::keystone::auth::region: {get_param: KeystoneRegion}
+ panko::keystone::auth::tenant: 'service'
+ mysql:
+ panko::db::mysql::user: panko
+ panko::db::mysql::password: {get_param: PankoPassword}
+ panko::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ panko::db::mysql::dbname: panko
+ panko::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index 5387529d..2c4ccbc9 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
RabbitMQ service configured with Puppet
@@ -69,16 +69,20 @@ outputs:
rabbitmq::delete_guest_user: false
rabbitmq::wipe_db_on_cookie_change: true
rabbitmq::port: '5672'
+ rabbitmq::package_provider: yum
rabbitmq::package_source: undef
rabbitmq::repos_ensure: false
+ rabbitmq::tcp_keepalive: true
rabbitmq_environment:
+ NODE_PORT: ''
+ NODE_IP_ADDRESS: ''
RABBITMQ_NODENAME: "rabbit@%{::hostname}"
RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+ 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}"
rabbitmq_kernel_variables:
inet_dist_listen_min: '25672'
inet_dist_listen_max: '25672'
rabbitmq_config_variables:
- tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]'
cluster_partition_handling: 'pause_minority'
queue_master_locator: '<<"min-masters">>'
loopback_users: '[]'
@@ -95,8 +99,15 @@ outputs:
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- rabbitmq::node_ip_address: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
-
step_config: |
include ::tripleo::profile::base::rabbitmq
+ upgrade_tasks:
+ - name: Stop rabbitmq service
+ tags: step2
+ service: name=rabbitmq-server state=stopped
+ - name: Start rabbitmq service
+ tags: step4
+ service: name=rabbitmq-server state=started
+
diff --git a/puppet/services/sahara-api.yaml b/puppet/services/sahara-api.yaml
index 54e63df4..8573ea81 100644
--- a/puppet/services/sahara-api.yaml
+++ b/puppet/services/sahara-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Sahara API service configured with Puppet
@@ -90,3 +90,7 @@ outputs:
sahara::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ upgrade_tasks:
+ - name: Stop sahara_api service
+ tags: step2
+ service: name=openstack-sahara-api state=stopped
diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml
index 5fc8ed61..b4307053 100644
--- a/puppet/services/sahara-base.yaml
+++ b/puppet/services/sahara-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Sahara base service. Shared for all Sahara services.
@@ -44,6 +44,10 @@ parameters:
type: string
default: ''
description: Set to True to enable debugging on all services.
+ SaharaPlugins:
+ default: ["ambari","cdh","mapr","vanilla","spark","storm"]
+ description: Sahara enabled plugin list
+ type: comma_delimited_list
outputs:
role_data:
@@ -60,6 +64,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/sahara'
+ - '?bind_address='
+ - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
sahara::rabbit_password: {get_param: RabbitPassword}
sahara::rabbit_user: {get_param: RabbitUserName}
sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
@@ -69,13 +75,7 @@ outputs:
sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
sahara::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
sahara::use_neutron: true
- sahara::plugins:
- - ambari
- - cdh
- - mapr
- - vanilla
- - spark
- - storm
+ sahara::plugins: {get_param: SaharaPlugins}
sahara::rpc_backend: rabbit
sahara::admin_tenant_name: 'service'
sahara::db::database_db_max_retries: -1
diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml
index 287c1c05..987fe25b 100644
--- a/puppet/services/sahara-engine.yaml
+++ b/puppet/services/sahara-engine.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Sahara Engine service configured with Puppet
@@ -49,3 +49,10 @@ outputs:
- get_attr: [SaharaBase, role_data, config_settings]
step_config: |
include ::tripleo::profile::base::sahara::engine
+ upgrade_tasks:
+ - name: Stop sahara_engine service
+ tags: step2
+ service: name=openstack-sahara-engine state=stopped
+ - name: Sync sahara_engine DB
+ tags: step5
+ command: sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
index 176fd235..80da5352 100644
--- a/puppet/services/services.yaml
+++ b/puppet/services/services.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Utility stack to convert an array of services into a set of combined
@@ -42,6 +42,11 @@ resources:
LoggingConfiguration:
type: OS::TripleO::LoggingConfiguration
+ ServiceServerMetadataHook:
+ type: OS::TripleO::ServiceServerMetadataHook
+ properties:
+ RoleData: {get_attr: [ServiceChain, role_data]}
+
outputs:
role_data:
description: Combined Role data for this set of services.
@@ -54,8 +59,8 @@ outputs:
data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}}
monitoring_subscriptions:
yaql:
- expression: list($.data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
logging_sources:
# Transform the individual logging_source configuration from
# each service in the chain into a global list, adding some
@@ -78,8 +83,9 @@ outputs:
sources:
- {get_attr: [LoggingConfiguration, LoggingDefaultSources]}
- yaql:
- expression: list($.data.where($ != null).select($.get('logging_source')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('logging_source')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
+
- {get_attr: [LoggingConfiguration, LoggingExtraSources]}
default_format: {get_attr: [LoggingConfiguration, LoggingDefaultFormat]}
pos_file_path: {get_attr: [LoggingConfiguration, LoggingPosFilePath]}
@@ -93,17 +99,28 @@ outputs:
groups:
- [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}]
- yaql:
- expression: list($.data.where($ != null).select($.get('logging_groups')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('logging_groups')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
- [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}]
config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
global_config_settings:
map_merge:
yaql:
- expression: list($.data.where($ != null).select($.get('global_config_settings')).where($ != null))
- data: {get_attr: [ServiceChain, role_data]}
+ expression: list($.data.role_data.where($ != null).select($.get('global_config_settings')).where($ != null))
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
service_config_settings:
yaql:
- expression: $.data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
- data: {get_attr: [ServiceChain, role_data]}
+ expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
+ upgrade_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ upgrade_batch_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
diff --git a/puppet/services/snmp.yaml b/puppet/services/snmp.yaml
index 4d01632d..fd6ed818 100644
--- a/puppet/services/snmp.yaml
+++ b/puppet/services/snmp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
SNMP client configured with Puppet, to facilitate Ceilometer Hardware
@@ -43,3 +43,7 @@ outputs:
proto: 'udp'
step_config: |
include ::tripleo::profile::base::snmp
+ upgrade_tasks:
+ - name: Stop snmp service
+ tags: step2
+ service: name=snmpd state=stopped
diff --git a/puppet/services/pacemaker/core.yaml b/puppet/services/sshd.yaml
index 9eca1de3..41e144a0 100644
--- a/puppet/services/pacemaker/core.yaml
+++ b/puppet/services/sshd.yaml
@@ -1,7 +1,7 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- OpenStack Core (fake) service with Pacemaker configured with Puppet.
+ Configure sshd_config
parameters:
ServiceNetMap:
@@ -18,12 +18,17 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ BannerText:
+ default: ''
+ description: Configures Banner text in sshd_config
+ type: string
outputs:
role_data:
- description: Role data for the Core role.
+ description: Role data for the ssh
value:
- service_name: core
- config_settings: {}
+ service_name: sshd
+ config_settings:
+ BannerText: {get_param: BannerText}
step_config: |
- include ::tripleo::profile::pacemaker::core \ No newline at end of file
+ include ::tripleo::profile::base::sshd
diff --git a/puppet/services/swift-base.yaml b/puppet/services/swift-base.yaml
index 741adb4d..6046d5e8 100644
--- a/puppet/services/swift-base.yaml
+++ b/puppet/services/swift-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Swift Proxy service configured with Puppet
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index de8daea5..31a4c178 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Swift Proxy service configured with Puppet
@@ -49,6 +49,24 @@ parameters:
default: guest
description: The username for RabbitMQ
type: string
+ SwiftCeilometerPipelineEnabled:
+ description: Set to False to disable the swift proxy ceilometer pipeline.
+ default: True
+ type: boolean
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+
+conditions:
+
+ ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
resources:
SwiftBase:
@@ -74,9 +92,13 @@ outputs:
swift::proxy::authtoken::project_name: 'service'
swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
- swift::proxy::ceilometer::rabbit_host: {get_param: [ServiceNetMap, RabbitmqNetwork]}
swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
+ swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
+ swift::proxy::ceilometer::nonblocking_notify: true
+ tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
+ tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: {get_param: RabbitClientUseSSL}
+ tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled}
tripleo.swift_proxy.firewall_rules:
'122 swift proxy':
dport:
@@ -88,21 +110,34 @@ outputs:
- ResellerAdmin
swift::proxy::versioned_writes::allow_versioned_writes: true
swift::proxy::pipeline:
- - 'ceilometer'
- - 'catch_errors'
- - 'healthcheck'
- - 'proxy-logging'
- - 'cache'
- - 'ratelimit'
- - 'bulk'
- - 'tempurl'
- - 'formpost'
- - 'authtoken'
- - 'keystone'
- - 'staticweb'
- - 'versioned_writes'
- - 'proxy-logging'
- - 'proxy-server'
+ yaql:
+ expression: $.data.pipeline.where($ != '')
+ data:
+ pipeline:
+ - 'catch_errors'
+ - 'healthcheck'
+ - 'proxy-logging'
+ - 'cache'
+ - 'ratelimit'
+ - 'bulk'
+ - 'tempurl'
+ - 'formpost'
+ - 'authtoken'
+ - 'keystone'
+ - 'staticweb'
+ - 'copy'
+ - 'container_quotas'
+ - 'account_quotas'
+ - 'slo'
+ - 'dlo'
+ - 'versioned_writes'
+ -
+ if:
+ - ceilometer_pipeline_enabled
+ - 'ceilometer'
+ - ''
+ - 'proxy-logging'
+ - 'proxy-server'
swift::proxy::account_autocreate: true
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
@@ -129,3 +164,7 @@ outputs:
- admin
- swiftoperator
- ResellerAdmin
+ upgrade_tasks:
+ - name: Stop swift_proxy service
+ tags: step2
+ service: name=openstack-swift-proxy state=stopped
diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml
index 8ed4e9f4..2e3c818f 100644
--- a/puppet/services/swift-ringbuilder.yaml
+++ b/puppet/services/swift-ringbuilder.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Swift Ringbuilder
@@ -38,7 +38,20 @@ parameters:
default: {}
description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
type: json
+ SwiftUseLocalDir:
+ default: true
+ description: 'Use a local directory for Swift storage services when building rings'
+ type: boolean
+conditions:
+ swift_use_local_dir:
+ and:
+ - equals:
+ - get_param: SwiftUseLocalDir
+ - true
+ - equals:
+ - get_param: SwiftRawDisks
+ - {}
outputs:
role_data:
@@ -56,7 +69,7 @@ outputs:
expression: $.data.raw_disk_lists.flatten()
data:
raw_disk_lists:
- - [':%PORT%/d1']
+ - {if: [swift_use_local_dir, [':%PORT%/d1'], []]}
- repeat:
template: ':%PORT%/DEVICE'
for_each:
diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml
index 7fbb8d90..247b23ff 100644
--- a/puppet/services/swift-storage.yaml
+++ b/puppet/services/swift-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Swift Storage service configured with Puppet
@@ -56,6 +56,17 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+conditions:
+ swift_mount_check:
+ or:
+ - equals:
+ - get_param: SwiftMountCheck
+ - true
+ - not:
+ equals:
+ - get_param: SwiftRawDisks
+ - {}
+
outputs:
role_data:
description: Role data for the Swift Proxy role.
@@ -65,7 +76,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [SwiftBase, role_data, config_settings]
- - swift::storage::all::mount_check: {get_param: SwiftMountCheck}
+ - swift::storage::all::mount_check: {if: [swift_mount_check, true, false]}
tripleo::profile::base::swift::storage::enable_swift_storage: {get_param: ControllerEnableSwiftStorage}
tripleo.swift_storage.firewall_rules:
'123 swift storage':
@@ -86,7 +97,24 @@ outputs:
swift::storage::all::account_pipeline:
- healthcheck
- account-server
- swift::storage::disks: {get_param: SwiftRawDisks}
+ swift::storage::disks::args: {get_param: SwiftRawDisks}
swift::storage::all::storage_local_net_ip: {get_param: [ServiceNetMap, SwiftStorageNetwork]}
step_config: |
include ::tripleo::profile::base::swift::storage
+ upgrade_tasks:
+ - name: Stop swift storage services
+ tags: step2
+ service: name={{ item }} state=stopped
+ with_items:
+ - openstack-swift-account-auditor
+ - openstack-swift-account-reaper
+ - openstack-swift-account-replicator
+ - openstack-swift-account
+ - openstack-swift-container-auditor
+ - openstack-swift-container-replicator
+ - openstack-swift-container-updater
+ - openstack-swift-container
+ - openstack-swift-object-auditor
+ - openstack-swift-object-replicator
+ - openstack-swift-object-updater
+ - openstack-swift-object
diff --git a/puppet/services/time/ntp.yaml b/puppet/services/time/ntp.yaml
index eb5237fe..88ab90cb 100644
--- a/puppet/services/time/ntp.yaml
+++ b/puppet/services/time/ntp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
NTP service deployment using puppet, this YAML file
@@ -38,4 +38,4 @@ outputs:
dport: 123
proto: udp
step_config: |
- include ::ntp
+ include ::tripleo::profile::base::time::ntp
diff --git a/puppet/services/time/timezone.yaml b/puppet/services/time/timezone.yaml
index 384b5191..5d0eeae3 100644
--- a/puppet/services/time/timezone.yaml
+++ b/puppet/services/time/timezone.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Composable Timezone service
diff --git a/puppet/services/tripleo-firewall.yaml b/puppet/services/tripleo-firewall.yaml
index 7eb39905..67e14d9c 100644
--- a/puppet/services/tripleo-firewall.yaml
+++ b/puppet/services/tripleo-firewall.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
TripleO Firewall settings
diff --git a/puppet/services/tripleo-packages.yaml b/puppet/services/tripleo-packages.yaml
index 124f5fe8..737be829 100644
--- a/puppet/services/tripleo-packages.yaml
+++ b/puppet/services/tripleo-packages.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
TripleO Package installation settings
@@ -32,3 +32,15 @@ outputs:
tripleo::packages::enable_install: {get_param: EnablePackageInstall}
step_config: |
include ::tripleo::packages
+ upgrade_tasks:
+ - name: Check yum for rpm-python present
+ tags: step0
+ yum: "name=rpm-python state=present"
+ register: rpm_python_check
+ - name: Fail when rpm-python wasn't present
+ fail: msg="rpm-python package was not present before this run! Check environment before re-running"
+ when: rpm_python_check.changed != false
+ tags: step0
+ - name: Update all packages
+ tags: step3
+ yum: name=* state=latest
diff --git a/puppet/services/vip-hosts.yaml b/puppet/services/vip-hosts.yaml
deleted file mode 100644
index a9d757ee..00000000
--- a/puppet/services/vip-hosts.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- If the deployer doesn't have a DNS server for the overcloud nodes. This will
- populate the node-names and IPs for the VIPs of the overcloud.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-outputs:
- role_data:
- description: role data for the VIP hosts role
- value:
- service_name: vip_hosts
- config_settings:
- tripleo::vip_hosts::hosts_spec:
- external:
- name: "%{hiera('cloud_name_external')}"
- ip: "%{hiera('public_virtual_ip')}"
- ensure: present
- comment: FQDN of the external VIP
- internal_api:
- name: "%{hiera('cloud_name_internal_api')}"
- ip: "%{hiera('internal_api_virtual_ip')}"
- ensure: present
- comment: FQDN of the internal api VIP
- storage:
- name: "%{hiera('cloud_name_storage')}"
- ip: "%{hiera('storage_virtual_ip')}"
- ensure: present
- comment: FQDN of the storage VIP
- storage_mgmt:
- name: "%{hiera('cloud_name_storage_mgmt')}"
- ip: "%{hiera('storage_mgmt_virtual_ip')}"
- ensure: present
- comment: FQDN of the storage mgmt VIP
- ctlplane:
- name: "%{hiera('cloud_name_ctlplane')}"
- ip: "%{hiera('controller_virtual_ip')}"
- ensure: present
- comment: FQDN of the ctlplane VIP
- step_config: |
- include ::tripleo::vip_hosts
diff --git a/puppet/services/zaqar.yaml b/puppet/services/zaqar.yaml
new file mode 100644
index 00000000..0224ac13
--- /dev/null
+++ b/puppet/services/zaqar.yaml
@@ -0,0 +1,66 @@
+heat_template_version: ocata
+
+description: >
+ Openstack Zaqar service. Shared for all Heat services.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ ZaqarPassword:
+ description: The password for Zaqar
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+
+
+outputs:
+ role_data:
+ description: Shared role data for the Heat services.
+ value:
+ service_name: zaqar
+ config_settings:
+ zaqar::keystone::authtoken::password: {get_param: ZaqarPassword}
+ zaqar::keystone::authtoken::project_name: 'service'
+ zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ zaqar::debug: {get_param: Debug}
+ zaqar::transport::websocket::bind: {get_param: [EndpointMap, ZaqarInternal, host]}
+ zaqar::transport::wsgi::bind: {get_param: [ServiceNetMap, ZaqarApiNetwork]}
+ zaqar::message_pipeline: 'zaqar.notification.notifier'
+ zaqar::unreliable: true
+ service_config_settings:
+ keystone:
+ zaqar::keystone::auth::password: {get_param: ZaqarPassword}
+ zaqar::keystone::auth::public_url: {get_param: [EndpointMap, ZaqarPublic, uri]}
+ zaqar::keystone::auth::admin_url: {get_param: [EndpointMap, ZaqarAdmin, uri]}
+ zaqar::keystone::auth::internal_url: {get_param: [EndpointMap, ZaqarInternal, uri]}
+ zaqar::keystone::auth::region: {get_param: KeystoneRegion}
+ zaqar::keystone::auth::tenant: 'service'
+ zaqar::keystone::auth_websocket::password: {get_param: ZaqarPassword}
+ zaqar::keystone::auth_websocket::public_url: {get_param: [EndpointMap, ZaqarWebSocketPublic, uri]}
+ zaqar::keystone::auth_websocket::admin_url: {get_param: [EndpointMap, ZaqarWebSocketAdmin, uri]}
+ zaqar::keystone::auth_websocket::internal_url: {get_param: [EndpointMap, ZaqarWebSocketInternal, uri]}
+ zaqar::keystone::auth_websocket::region: {get_param: KeystoneRegion}
+ zaqar::keystone::auth_websocket::tenant: 'service'
+
+ step_config: |
+ include ::tripleo::profile::base::zaqar
diff --git a/puppet/upgrade_config.yaml b/puppet/upgrade_config.yaml
new file mode 100644
index 00000000..c37cc033
--- /dev/null
+++ b/puppet/upgrade_config.yaml
@@ -0,0 +1,58 @@
+heat_template_version: ocata
+description: 'Upgrade for via ansible by applying a step related tag'
+
+parameters:
+ UpgradeStepConfig:
+ type: json
+ description: Config (ansible yaml) that will be used to step through the deployment.
+ default: ''
+
+ step:
+ type: string
+ description: Step number of the upgrade
+
+ SkipUpgradeConfigTags:
+ type: comma_delimited_list
+ description: Ansible tags to skip during upgrade, e.g validation skips pre-upgrade validations
+ default: []
+
+resources:
+
+ AnsibleConfig:
+ type: OS::Heat::Value
+ properties:
+ value:
+ str_replace:
+ template: CONFIG
+ params:
+ CONFIG:
+ - hosts: localhost
+ connection: local
+ tasks: {get_param: UpgradeStepConfig}
+
+ AnsibleUpgradeConfigImpl:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ansible
+ options:
+ skip_tags:
+ list_join:
+ - ","
+ - {get_param: SkipUpgradeConfigTags}
+ tags:
+ str_replace:
+ template: "stepSTEP"
+ params:
+ STEP: {get_param: step}
+ modulepath: /usr/share/ansible-modules
+ inputs:
+ - name: role
+ config: {get_attr: [AnsibleConfig, value]}
+
+outputs:
+ OS::stack_id:
+ description: The software config which runs ansible with tags
+ value: {get_resource: AnsibleUpgradeConfigImpl}
+ upgrade_config:
+ description: The configuration file used for upgrade
+ value: {get_attr: [AnsibleConfig, value]}
diff --git a/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml
new file mode 100644
index 00000000..069cbd23
--- /dev/null
+++ b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml
@@ -0,0 +1,95 @@
+---
+prelude: >
+ 6.0.0 is the final release for Ocata.
+ It's the first release where release notes are added.
+features:
+ - Fujitsu Neutron plugin for FOS support. Users can deploy
+ Neutron with this plugin by using
+ environments/neutron-ml2-fujitsu-fossw.yaml environment file.
+ - Expose InstanceDiscoveryMethod parameter to configure Ceilometer
+ method used to discover instances running on compute node.
+ Default value to 'libvirt_metadata'. Allowed values are 'naive',
+ 'libvirt_metadata' and 'workload_partitioning'.
+ - Make ServiceNetMap support custom network names.
+ Note that operators will still be expected to pass any ServiceNetMap
+ overrides with the "new" network name, e.g whatever NetName specifies,
+ otherwise environment files could get very confusing.
+ - Nova Placement API support. As this new service is required, deploy it
+ by default in WSGI with Apache, like other API services.
+ - Cinder pass-through iSER backend support.
+ - etcd composable services, used by networking-vpp ML2 driver as the
+ messaging mechanism.
+ - Allow to configure cron parameters for Cinder, Heat, Keystone and Nova
+ crontabs.
+ - Export NovaDefaultFloatingPool parameter to configure the default pool
+ of floating IP addressed available. Default to 'public' for backward
+ compatibility.
+ - Bump Heat Templates to 'ocata' version, to match Heat requirements.
+ - Configure OVS agent firewall driver only if NeutronOVSFirewallDriver
+ is set.
+ - Expose RbdDefaultFeatures parameter to configure the default features
+ enabled when creating a block device image.
+ Only applies to format '2' images. Set to '1' for Jewel clients using
+ older Ceph servers.
+ - Cinder HPELeftHandISCSIDriver backend support.
+ - Pacemaker stopped to manage Ceilometer, Cinder API,
+ Cinder Scheduler, MongoDB, Glance, Gnocchi, Heat, Apache, Memcached,
+ Neutron, Nova and Sahara.
+ - Ceph MDS service support. Service can be enable with
+ environments/services/ceph-mds.yaml environment file.
+ - Expose HeatConvergenceEngine and HeatMaxResourcesPerStack parameters
+ to configure Heat.
+ - Add pre-network hook and example showing config-then-reboot.
+ - Expose LibvirtEnabledPerfEvents parameter in Nova Compute service.
+ Default to an empty array.
+ This is a performance event list which could be used as monitor.
+ - Increase libvirt/qemu.conf max_files to 32768 and max_processes to
+ 131072.
+ - Split OVN northd and ml2 plugin, so we can deploy OVNDBs and Northd
+ services on different nodes.
+ - Add hook to generate metadata from service profiles.
+ This is useful for nova vendordata plugins that can parse said metadata.
+ - Expose EventPipelinePublishers to Ceilometer and set the default to
+ 'notifier://?topic=alarm.all'.
+ - Add Panko service support. This service is not enabled by default. Use
+ environments/services/enable-panko.yaml to include it in your deployment.
+ - Add EC2-API composable service support.
+upgrade:
+ - Update OpenDaylight deployment to use networking-odl v2 as a mechanism
+ driver.
+deprecations:
+ - Glance Registry service has been removed and Glance API v2 is now deploy
+ by default. Glance API v1 is not supported anymore in TripleO.
+ - Remove CeilometerStoreEvents parameter, which has been removed
+ in Ceilometer.
+ - Ceilometer API service is deprecated and will be removed in a future
+ release. If you would like to disable it, use
+ environments/services/disable-ceilometer-api.yaml environment file.
+ - Removes deprecated OpenDaylight L2 only deployments.
+ Deploying ODL without L3 DVR is no longer supported.
+security:
+ - Disallow iframe embed in Horizon configuration to prevent dashboard being
+ embedded within an iframe and exposed to Cross-Frame Scripting (XFS)
+ vulnerability on legacy browsers.
+ - Allow management of enforce_password_check in Horizons configuration to
+ display an 'Admin Password' field on the Change Password form to verify that
+ it is indeed the admin logged-in who wants to change the password.
+ - Allow management of disable_password_reveal in Horizon, to remove the
+ password reveal option.
+ - Enable secure_proxy_ssl_header option in Horizons configuration to take
+ X-Forwarded-Proto header into account when forming URLs.
+fixes:
+ - Fixes `bug 1645898
+ <https://bugs.launchpad.net/tripleo/+bug/1645898>`__ so epmd is binded on
+ the right address, where RabbitMQ is listening too.
+ - Fixes `bug 1652184
+ <https://bugs.launchpad.net/tripleo/+bug/1652184>`__ so swap partitions
+ can be handled from an environment file thanks to AllNodesExtraConfig.
+ - Add retry to RHEL registration, useful when having network outages during
+ registration.
+ - Fixes `bug 1651476
+ <https://bugs.launchpad.net/tripleo/+bug/1651476>`__ so firewall rules
+ are created for Opendaylight API service.
+ - Fixes `bug 1643487
+ <https://bugs.launchpad.net/tripleo/+bug/1643487>`__ to prevent source
+ address from binding to a VIP for database connection.
diff --git a/releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml b/releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml
new file mode 100644
index 00000000..55062b04
--- /dev/null
+++ b/releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ Composable service plugins now support two additional sections,
+ upgrade_tasks and upgrade_batch_tasks. These can be used by service
+ template authors to define the required behavior on upgrade as ansible
+ tasks, for both upgrades that require downtime, and rolling upgrades.
+ See puppet/services/README.rst for more details.
+upgrade:
+ - |
+ Please refer to tripleo-docs for full details on the upgrade workflow
+ required for Newton to Ocata upgrades, as it's possible some steps are
+ different to previous releases:
+ http://docs.openstack.org/developer/tripleo-docs/post_deployment/upgrade.html
diff --git a/releasenotes/notes/deployed-servers-fd47f18204cea105.yaml b/releasenotes/notes/deployed-servers-fd47f18204cea105.yaml
new file mode 100644
index 00000000..d05b268c
--- /dev/null
+++ b/releasenotes/notes/deployed-servers-fd47f18204cea105.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - It is now possible to deploy with tripleo-heat-templates using servers that
+ are already provisioned with an operating system, and not necessarily
+ provisioned with Nova and Ironic. This feature is enabled by making use of
+ the environments/deployed-server-environment.yaml environment file. For
+ more information, see
+ http://docs.openstack.org/developer/tripleo-docs/advanced_deployment/deployed_server.html
diff --git a/releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml b/releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml
new file mode 100644
index 00000000..1949e4fe
--- /dev/null
+++ b/releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ Adds the ability to manage auditd.service and enter audit.rules via tripleo
+ heat templates. This in turn enforces an audit log of system events, such
+ as system time changes, modifications to Discretionary Access Controls,
+ Failed login attempts.
+
+
diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/releasenotes/source/_static/.placeholder
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
new file mode 100644
index 00000000..8da995b0
--- /dev/null
+++ b/releasenotes/source/conf.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'oslosphinx',
+ 'reno.sphinxext',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'tripleo-heat-templates Release Notes'
+copyright = u'2017, TripleO Developers'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The full version, including alpha/beta/rc tags.
+release = '6.0.0.0b3'
+# The short X.Y version.
+version = '6.0.0'
+
+# The full version, including alpha/beta/rc tags.
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'tripleo-heat-templatesReleaseNotesdoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ ('index', 'tripleo-heat-templatesReleaseNotes.tex', u'tripleo-heat-templates Release Notes Documentation',
+ u'2016, TripleO Developers', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'tripleo-heat-templatesreleasenotes', u'tripleo-heat-templates Release Notes Documentation',
+ [u'2016, TripleO Developers'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'tripleo-heat-templatesReleaseNotes', u'tripleo-heat-templates Release Notes Documentation',
+ u'2016, TripleO Developers', 'tripleo-heat-templatesReleaseNotes', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+# -- Options for Internationalization output ------------------------------
+locale_dirs = ['locale/']
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
new file mode 100644
index 00000000..9767dad2
--- /dev/null
+++ b/releasenotes/source/index.rst
@@ -0,0 +1,18 @@
+================================================
+Welcome to tripleo-heat-templates Release Notes!
+================================================
+
+Contents
+========
+
+.. toctree::
+ :maxdepth: 2
+
+ unreleased
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`search`
diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst
new file mode 100644
index 00000000..2334dd5c
--- /dev/null
+++ b/releasenotes/source/unreleased.rst
@@ -0,0 +1,5 @@
+==============================
+ Current Series Release Notes
+==============================
+
+ .. release-notes::
diff --git a/requirements.txt b/requirements.txt
index 4e46b891..057aa287 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1 +1,6 @@
-pbr>=0.5.21,<1.0
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+pbr>=1.8 # Apache-2.0
+Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
+six>=1.9.0 # MIT
diff --git a/roles_data.yaml b/roles_data.yaml
index 86d0e4f5..b4291463 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -14,26 +14,33 @@
# defaults to '%stackname%-{{role.name.lower()}}-%index%'
# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
#
+# disable_constraints: (boolean) optional, whether to disable Nova and Glance
+# constraints for each role specified in the templates.
+#
+# upgrade_batch_size: (number): batch size for upgrades where tasks are
+# specified by services to run in batches vs all nodes at once.
+# This defaults to 1, but larger batches may be specified here.
+#
# ServicesDefault: (list) optional default list of services to be deployed
# on the role, defaults to an empty list. Sets the default for the
# {{role.name}}Services parameter in overcloud.yaml
-- name: Controller
+- name: Controller # the 'primary' role goes first
CountDefault: 1
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::CephRbdMirror
- OS::TripleO::Services::CephRgw
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
- - OS::TripleO::Services::Core
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- - OS::TripleO::Services::GlanceRegistry
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatApiCloudwatch
@@ -54,15 +61,18 @@
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::MongoDb
- OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::NovaConsoleauth
- OS::TripleO::Services::NovaVncProxy
+ - OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftProxy
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::CeilometerApi
- OS::TripleO::Services::CeilometerCollector
@@ -73,12 +83,12 @@
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
- OS::TripleO::Services::GnocchiStatsd
- - OS::Tripleo::Services::ManilaApi
- - OS::Tripleo::Services::ManilaScheduler
- - OS::Tripleo::Services::ManilaBackendGeneric
- - OS::Tripleo::Services::ManilaBackendNetapp
- - OS::Tripleo::Services::ManilaBackendCephFs
- - OS::Tripleo::Services::ManilaShare
+ - OS::TripleO::Services::ManilaApi
+ - OS::TripleO::Services::ManilaScheduler
+ - OS::TripleO::Services::ManilaBackendGeneric
+ - OS::TripleO::Services::ManilaBackendNetapp
+ - OS::TripleO::Services::ManilaBackendCephFs
+ - OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::AodhEvaluator
- OS::TripleO::Services::AodhNotifier
@@ -94,11 +104,20 @@
- OS::TripleO::Services::OpenDaylightOvs
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
- - OS::TripleO::Services::VipHosts
+ - OS::TripleO::Services::BarbicanApi
+ - OS::TripleO::Services::PankoApi
+ - OS::TripleO::Services::Zaqar
+ - OS::TripleO::Services::OVNDBs
+ - OS::TripleO::Services::NeutronML2FujitsuCfab
+ - OS::TripleO::Services::NeutronML2FujitsuFossw
+ - OS::TripleO::Services::CinderHPELeftHandISCSI
+ - OS::TripleO::Services::Etcd
+ - OS::TripleO::Services::AuditD
- name: Compute
CountDefault: 1
HostnameFormatDefault: '%stackname%-novacompute-%index%'
+ disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephClient
@@ -106,6 +125,7 @@
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Kernel
@@ -120,7 +140,7 @@
- OS::TripleO::Services::OpenDaylightOvs
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
- - OS::TripleO::Services::VipHosts
+ - OS::TripleO::Services::AuditD
- name: BlockStorage
ServicesDefault:
@@ -130,13 +150,15 @@
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
- - OS::TripleO::Services::VipHosts
+ - OS::TripleO::Services::AuditD
- name: ObjectStorage
+ disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::Kernel
@@ -144,22 +166,26 @@
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
- - OS::TripleO::Services::VipHosts
+ - OS::TripleO::Services::AuditD
- name: CephStorage
+ disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
- - OS::TripleO::Services::VipHosts
+ - OS::TripleO::Services::AuditD
diff --git a/roles_data_undercloud.yaml b/roles_data_undercloud.yaml
new file mode 100644
index 00000000..2759429c
--- /dev/null
+++ b/roles_data_undercloud.yaml
@@ -0,0 +1,35 @@
+- name: Undercloud # the 'primary' role goes first
+ CountDefault: 1
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MongoDb
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::Apache
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::MistralEngine
+ - OS::TripleO::Services::MistralApi
+ - OS::TripleO::Services::MistralExecutor
+ - OS::TripleO::Services::IronicApi
+ - OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::NovaIronic
+ - OS::TripleO::Services::Zaqar
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronApi
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::NeutronDhcpAgent
diff --git a/scripts/hosts-config.sh b/scripts/hosts-config.sh
new file mode 100755
index 00000000..f456b316
--- /dev/null
+++ b/scripts/hosts-config.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+set -eux
+set -o pipefail
+
+write_entries() {
+ local file="$1"
+ local entries="$2"
+
+ # Don't do anything if the file isn't there
+ if [ ! -f "$file" ]; then
+ return
+ fi
+
+ if grep -q "^# HEAT_HOSTS_START" "$file"; then
+ temp=$(mktemp)
+ awk -v v="$entries" '/^# HEAT_HOSTS_START/ {
+ print $0
+ print v
+ f=1
+ }f &&!/^# HEAT_HOSTS_END$/{next}/^# HEAT_HOSTS_END$/{f=0}!f' "$file" > "$temp"
+ echo "INFO: Updating hosts file $file, check below for changes"
+ diff "$file" "$temp" || true
+ cat "$temp" > "$file"
+ else
+ echo -ne "\n# HEAT_HOSTS_START - Do not edit manually within this section!\n" >> "$file"
+ echo "$entries" >> "$file"
+ echo -ne "# HEAT_HOSTS_END\n\n" >> "$file"
+ fi
+
+}
+
+if [ ! -z "$hosts" ]; then
+ for tmpl in /etc/cloud/templates/hosts.*.tmpl ; do
+ write_entries "$tmpl" "$hosts"
+ done
+ write_entries "/etc/hosts" "$hosts"
+else
+ echo "No hosts in Heat, nothing written."
+fi
diff --git a/setup.py b/setup.py
index 70c2b3f3..782bb21f 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,6 +16,14 @@
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
setuptools.setup(
- setup_requires=['pbr'],
+ setup_requires=['pbr>=1.8'],
pbr=True)
diff --git a/test-requirements.txt b/test-requirements.txt
index c3726e8b..06bce5a2 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1 +1,9 @@
-pyyaml
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+PyYAML>=3.10.0 # MIT
+Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
+six>=1.9.0 # MIT
+sphinx!=1.3b1,<1.4,>=1.2.1 # BSD
+oslosphinx>=4.7.0 # Apache-2.0
+reno>=1.8.0 # Apache-2.0
diff --git a/tools/process-templates.py b/tools/process-templates.py
new file mode 100755
index 00000000..1c8c4ba6
--- /dev/null
+++ b/tools/process-templates.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import jinja2
+import os
+import shutil
+import six
+import sys
+import yaml
+
+__tht_root_dir = os.path.dirname(os.path.dirname(__file__))
+
+
+def parse_opts(argv):
+ parser = argparse.ArgumentParser(
+ description='Configure host network interfaces using a JSON'
+ ' config file format.')
+ parser.add_argument('-p', '--base_path', metavar='BASE_PATH',
+ help="""base path of templates to process.""",
+ default='.')
+ parser.add_argument('-r', '--roles-data', metavar='ROLES_DATA',
+ help="""relative path to the roles_data.yaml file.""",
+ default='roles_data.yaml')
+ parser.add_argument('--safe',
+ action='store_true',
+ help="""Enable safe mode (do not overwrite files).""",
+ default=False)
+ parser.add_argument('-o', '--output-dir', metavar='OUTPUT_DIR',
+ help="""Output dir for all the templates""",
+ default='')
+ opts = parser.parse_args(argv[1:])
+
+ return opts
+
+
+def _j2_render_to_file(j2_template, j2_data, outfile_name=None,
+ overwrite=True):
+ yaml_f = outfile_name or j2_template.replace('.j2.yaml', '.yaml')
+ print('rendering j2 template to file: %s' % outfile_name)
+
+ if not overwrite and os.path.exists(outfile_name):
+ print('ERROR: path already exists for file: %s' % outfile_name)
+ sys.exit(1)
+
+ # Search for templates relative to the current template path first
+ template_base = os.path.dirname(yaml_f)
+ j2_loader = jinja2.loaders.FileSystemLoader([template_base, __tht_root_dir])
+
+ try:
+ # Render the j2 template
+ template = jinja2.Environment(loader=j2_loader).from_string(
+ j2_template)
+ r_template = template.render(**j2_data)
+ except jinja2.exceptions.TemplateError as ex:
+ error_msg = ("Error rendering template %s : %s"
+ % (yaml_f, six.text_type(ex)))
+ print(error_msg)
+ raise Exception(error_msg)
+ with open(outfile_name, 'w') as out_f:
+ out_f.write(r_template)
+
+
+def process_templates(template_path, role_data_path, output_dir, overwrite):
+
+ with open(role_data_path) as role_data_file:
+ role_data = yaml.safe_load(role_data_file)
+
+ j2_excludes_path = os.path.join(template_path, 'j2_excludes.yaml')
+ with open(j2_excludes_path) as role_data_file:
+ j2_excludes = yaml.safe_load(role_data_file)
+
+ if output_dir and not os.path.isdir(output_dir):
+ if os.path.exists(output_dir):
+ raise RuntimeError('Output dir %s is not a directory' % output_dir)
+ os.mkdir(output_dir)
+
+ role_names = [r.get('name') for r in role_data]
+ r_map = {}
+ for r in role_data:
+ r_map[r.get('name')] = r
+ excl_templates = ['%s/%s' % (template_path, e)
+ for e in j2_excludes.get('name')]
+
+ if os.path.isdir(template_path):
+ for subdir, dirs, files in os.walk(template_path):
+
+ # NOTE(flaper87): Ignore hidden dirs as we don't
+ # generate templates for those.
+ # Note the slice assigment for `dirs` is necessary
+ # because we need to modify the *elements* in the
+ # dirs list rather than the reference to the list.
+ # This way we'll make sure os.walk will iterate over
+ # the shrunk list. os.walk doesn't have an API for
+ # filtering dirs at this point.
+ dirs[:] = [d for d in dirs if not d[0] == '.']
+ files = [f for f in files if not f[0] == '.']
+
+ # NOTE(flaper87): We could have used shutil.copytree
+ # but it requires the dst dir to not be present. This
+ # approach is safer as it doesn't require us to delete
+ # the output_dir in advance and it allows for running
+ # the command multiple times with the same output_dir.
+ out_dir = subdir
+ if output_dir:
+ out_dir = os.path.join(output_dir, subdir)
+ if not os.path.exists(out_dir):
+ os.mkdir(out_dir)
+
+ for f in files:
+ file_path = os.path.join(subdir, f)
+ # We do two templating passes here:
+ # 1. *.role.j2.yaml - we template just the role name
+ # and create multiple files (one per role)
+ # 2. *.j2.yaml - we template with all roles_data,
+ # and create one file common to all roles
+ if f.endswith('.role.j2.yaml'):
+ print("jinja2 rendering role template %s" % f)
+ with open(file_path) as j2_template:
+ template_data = j2_template.read()
+ print("jinja2 rendering roles %s" % ","
+ .join(role_names))
+ for role in role_names:
+ j2_data = {'role': role}
+ # (dprince) For the undercloud installer we don't
+ # want to have heat check nova/glance API's
+ if r_map[role].get('disable_constraints', False):
+ j2_data['disable_constraints'] = True
+ out_f = "-".join(
+ [role.lower(),
+ os.path.basename(f).replace('.role.j2.yaml',
+ '.yaml')])
+ out_f_path = os.path.join(out_dir, out_f)
+ if not (out_f_path in excl_templates):
+ _j2_render_to_file(template_data, j2_data,
+ out_f_path, overwrite)
+ else:
+ print('skipping rendering of %s' % out_f_path)
+ elif f.endswith('.j2.yaml'):
+ print("jinja2 rendering normal template %s" % f)
+ with open(file_path) as j2_template:
+ template_data = j2_template.read()
+ j2_data = {'roles': role_data}
+ out_f = os.path.basename(f).replace('.j2.yaml', '.yaml')
+ out_f_path = os.path.join(out_dir, out_f)
+ _j2_render_to_file(template_data, j2_data, out_f_path,
+ overwrite)
+ elif output_dir:
+ shutil.copy(os.path.join(subdir, f), out_dir)
+
+ else:
+ print('Unexpected argument %s' % template_path)
+
+opts = parse_opts(sys.argv)
+
+role_data_path = os.path.join(opts.base_path, opts.roles_data)
+
+process_templates(opts.base_path, role_data_path, opts.output_dir, (not opts.safe))
diff --git a/tools/releasenotes_tox.sh b/tools/releasenotes_tox.sh
new file mode 100755
index 00000000..4fecfd92
--- /dev/null
+++ b/tools/releasenotes_tox.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+rm -rf releasenotes/build
+
+sphinx-build -a -E -W \
+ -d releasenotes/build/doctrees \
+ -b html \
+ releasenotes/source releasenotes/build/html
+BUILD_RESULT=$?
+
+UNCOMMITTED_NOTES=$(git status --porcelain | \
+ awk '$1 == "M" && $2 ~ /releasenotes\/notes/ {print $2}')
+
+if [ "${UNCOMMITTED_NOTES}" ]
+then
+ cat <<EOF
+
+REMINDER: The following changes to release notes have not been committed:
+
+${UNCOMMITTED_NOTES}
+
+While that may be intentional, keep in mind that release notes are built from
+committed changes, not the working directory.
+
+EOF
+fi
+
+exit ${BUILD_RESULT}
diff --git a/tools/tox_install.sh b/tools/tox_install.sh
new file mode 100755
index 00000000..e61b63a8
--- /dev/null
+++ b/tools/tox_install.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+# Client constraint file contains this client version pin that is in conflict
+# with installing the client from source. We should remove the version pin in
+# the constraints file before applying it for from-source installation.
+
+CONSTRAINTS_FILE="$1"
+shift 1
+
+set -e
+
+# NOTE(tonyb): Place this in the tox enviroment's log dir so it will get
+# published to logs.openstack.org for easy debugging.
+localfile="$VIRTUAL_ENV/log/upper-constraints.txt"
+
+if [[ "$CONSTRAINTS_FILE" != http* ]]; then
+ CONSTRAINTS_FILE="file://$CONSTRAINTS_FILE"
+fi
+# NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep
+curl "$CONSTRAINTS_FILE" --insecure --progress-bar --output "$localfile"
+
+pip install -c"$localfile" openstack-requirements
+
+# This is the main purpose of the script: Allow local installation of
+# the current repo. It is listed in constraints file and thus any
+# install will be constrained and we need to unconstrain it.
+edit-constraints "$localfile" -- "$CLIENT_NAME"
+
+pip install -c"$localfile" -U "$@"
+exit $?
diff --git a/tools/yaml-nic-config-2-script.py b/tools/yaml-nic-config-2-script.py
new file mode 100755
index 00000000..b8f07e4f
--- /dev/null
+++ b/tools/yaml-nic-config-2-script.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import copy
+import os
+import sys
+import traceback
+import yaml
+import six
+import re
+
+
+#convert comments into 'comments<num>: ...' YAML
+def to_commented_yaml(filename):
+ out_str = ''
+ last_non_comment_spaces = ''
+ with open(filename, 'r') as f:
+ comment_count = 0
+ for line in f:
+ char_count = 0
+ spaces = ''
+ for char in line:
+ char_count += 1
+ if char == ' ':
+ spaces+=' '
+ next;
+ elif char == '#':
+ comment_count += 1
+ comment = line[char_count:-1]
+ out_str += "%scomment%i_%i: '%s'\n" % (last_non_comment_spaces, comment_count, len(spaces), comment)
+ break;
+ else:
+ last_non_comment_spaces = spaces
+ out_str += line
+
+ #inline comments check
+ m = re.match(".*:.*#(.*)", line)
+ if m:
+ comment_count += 1
+ out_str += "%s inline_comment%i: '%s'\n" % (last_non_comment_spaces, comment_count, m.group(1))
+ break;
+
+ with open(filename, 'w') as f:
+ f.write(out_str)
+
+ return out_str
+
+#convert back to normal #commented YAML
+def to_normal_yaml(filename):
+
+ with open(filename, 'r') as f:
+ data = f.read()
+
+ out_str = ''
+ next_line_break = False
+ for line in data.split('\n'):
+ m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line) #normal comments
+ i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line) #inline comments
+ if m:
+ if next_line_break:
+ out_str += '\n'
+ next_line_break = False
+ for x in range(0, int(m.group(1))):
+ out_str += " "
+ out_str += "#%s\n" % m.group(2)
+ elif i:
+ out_str += " #%s\n" % i.group(1)
+ next_line_break = False
+ else:
+ if next_line_break:
+ out_str += '\n'
+ out_str += line
+ next_line_break = True
+
+ if next_line_break:
+ out_str += '\n'
+
+ with open(filename, 'w') as f:
+ f.write(out_str)
+
+ return out_str
+
+
+class description(six.text_type):
+ pass
+
+# FIXME: Some of this duplicates code from build_endpoint_map.py, we should
+# refactor to share the common code
+class TemplateDumper(yaml.SafeDumper):
+ def represent_ordered_dict(self, data):
+ return self.represent_dict(data.items())
+
+ def description_presenter(self, data):
+ if '\n' in data:
+ style = '>'
+ else:
+ style = ''
+ return self.represent_scalar(
+ yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG, data, style=style)
+
+
+# We load mappings into OrderedDict to preserve their order
+class TemplateLoader(yaml.SafeLoader):
+ def construct_mapping(self, node):
+ self.flatten_mapping(node)
+ return collections.OrderedDict(self.construct_pairs(node))
+
+
+TemplateDumper.add_representer(description,
+ TemplateDumper.description_presenter)
+
+TemplateDumper.add_representer(collections.OrderedDict,
+ TemplateDumper.represent_ordered_dict)
+
+
+TemplateLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
+ TemplateLoader.construct_mapping)
+
+def write_template(template, filename=None):
+ with open(filename, 'w') as f:
+ yaml.dump(template, f, TemplateDumper, width=120, default_flow_style=False)
+
+def exit_usage():
+ print('Usage %s <yaml file>' % sys.argv[0])
+ sys.exit(1)
+
+def convert(filename):
+ print('Converting %s' % filename)
+ try:
+ tpl = yaml.load(open(filename).read(), Loader=TemplateLoader)
+ except Exception:
+ print(traceback.format_exc())
+ return 0
+
+ # Check which path we need for run-os-net-config.sh because we have
+ # nic config templates in the top-level and network/config
+ script_paths = ['network/scripts/run-os-net-config.sh',
+ '../../scripts/run-os-net-config.sh']
+ script_path = None
+ for p in script_paths:
+ check_path = os.path.join(os.path.dirname(filename), p)
+ if os.path.isfile(check_path):
+ print("Found %s, using %s" % (check_path, p))
+ script_path = p
+ if script_path is None:
+ print("Error couldn't find run-os-net-config.sh relative to filename")
+ exit_usage()
+
+ for r in six.iteritems(tpl.get('resources', {})):
+ if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
+ r[1].get('properties', {}).get('group') == 'os-apply-config' and
+ r[1].get('properties', {}).get('config', {}).get('os_net_config')):
+ #print("match %s" % r[0])
+ new_r = collections.OrderedDict()
+ new_r['type'] = 'OS::Heat::SoftwareConfig'
+ new_r['properties'] = collections.OrderedDict()
+ new_r['properties']['group'] = 'script'
+ old_net_config = r[1].get(
+ 'properties', {}).get('config', {}).get('os_net_config')
+ new_config = {'str_replace': collections.OrderedDict()}
+ new_config['str_replace']['template'] = {'get_file': script_path}
+ new_config['str_replace']['params'] = {'$network_config': old_net_config}
+ new_r['properties']['config'] = new_config
+ tpl['resources'][r[0]] = new_r
+ else:
+ print("No match %s" % r[0])
+ return 0
+
+ # Preserve typical HOT template key ordering
+ od_result = collections.OrderedDict()
+ # Need to bump the HOT version so str_replace supports serializing to json
+ od_result['heat_template_version'] = "2016-10-14"
+ if tpl.get('description'):
+ od_result['description'] = description(tpl['description'])
+ od_result['parameters'] = tpl['parameters']
+ od_result['resources'] = tpl['resources']
+ od_result['outputs'] = tpl['outputs']
+ #print('Result:')
+ #print('%s' % yaml.dump(od_result, Dumper=TemplateDumper, width=120, default_flow_style=False))
+ #print('---')
+ #replace = raw_input(
+ #"Replace file %s? Answer y/n" % filename).lower() == 'y'
+ #if replace:
+ #print("Replace %s" % filename)
+ write_template(od_result, filename)
+ #else:
+ # print("NOT replacing %s" % filename)
+ # return 0
+ return 1
+
+if len(sys.argv) < 2:
+ exit_usage()
+
+path_args = sys.argv[1:]
+exit_val = 0
+num_converted = 0
+
+for base_path in path_args:
+ if os.path.isfile(base_path) and base_path.endswith('.yaml'):
+ to_commented_yaml(base_path)
+ num_converted += convert(base_path)
+ to_normal_yaml(base_path)
+ else:
+ print('Unexpected argument %s' % base_path)
+ exit_usage()
+if num_converted == 0:
+ exit_val = 1
+sys.exit(exit_val)
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index 95c7d025..19e40d19 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -19,12 +19,85 @@ import yaml
required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords']
+envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
+ 'tls-endpoints-public-ip.yaml',
+ 'tls-everywhere-endpoints-dns.yaml']
+ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
+
def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0])
sys.exit(1)
+def get_base_endpoint_map(filename):
+ try:
+ tpl = yaml.load(open(filename).read())
+ return tpl['parameters']['EndpointMap']['default']
+ except Exception:
+ print(traceback.format_exc())
+ return None
+
+
+def get_endpoint_map_from_env(filename):
+ try:
+ tpl = yaml.load(open(filename).read())
+ return {
+ 'file': filename,
+ 'map': tpl['parameter_defaults']['EndpointMap']
+ }
+ except Exception:
+ print(traceback.format_exc())
+ return None
+
+
+def validate_endpoint_map(base_map, env_map):
+ return sorted(base_map.keys()) == sorted(env_map.keys())
+
+
+def validate_mysql_connection(settings):
+ no_op = lambda *args: False
+ error_status = [0]
+
+ def mysql_protocol(items):
+ return items == ['EndpointMap', 'MysqlInternal', 'protocol']
+
+ def client_bind_address(item):
+ return 'bind_address' in item
+
+ def validate_mysql_uri(key, items):
+ # Only consider a connection if it targets mysql
+ if key.endswith('connection') and \
+ search(items, mysql_protocol, no_op):
+ # Assume the "bind_address" option is one of
+ # the token that made up the uri
+ if not search(items, client_bind_address, no_op):
+ error_status[0] = 1
+ return False
+
+ def search(item, check_item, check_key):
+ if check_item(item):
+ return True
+ elif isinstance(item, list):
+ for i in item:
+ if search(i, check_item, check_key):
+ return True
+ elif isinstance(item, dict):
+ for k in item.keys():
+ if check_key(k, item[k]):
+ return True
+ elif search(item[k], check_item, check_key):
+ return True
+ return False
+
+ search(settings, no_op, validate_mysql_uri)
+ return error_status[0]
+
+
def validate_service(filename, tpl):
+ if 'heat_template_version' in tpl and not str(tpl['heat_template_version']).isalpha():
+ print('ERROR: heat_template_version needs to be the release alias not a date: %s'
+ % filename)
+ return 1
if 'outputs' in tpl and 'role_data' in tpl['outputs']:
if 'value' not in tpl['outputs']['role_data']:
print('ERROR: invalid role_data for filename: %s'
@@ -41,6 +114,12 @@ def validate_service(filename, tpl):
print('ERROR: service_name should match file name for service: %s.'
% filename)
return 1
+ # if service connects to mysql, the uri should use option
+ # bind_address to avoid issues with VIP failover
+ if 'config_settings' in role_data and \
+ validate_mysql_connection(role_data['config_settings']):
+ print('ERROR: mysql connection uri should use option bind_address')
+ return 1
if 'parameters' in tpl:
for param in required_params:
if param not in tpl['parameters']:
@@ -83,6 +162,8 @@ if len(sys.argv) < 2:
path_args = sys.argv[1:]
exit_val = 0
failed_files = []
+base_endpoint_map = None
+env_endpoint_maps = list()
for base_path in path_args:
if os.path.isdir(base_path):
@@ -94,6 +175,12 @@ for base_path in path_args:
if failed:
failed_files.append(file_path)
exit_val |= failed
+ if f == ENDPOINT_MAP_FILE:
+ base_endpoint_map = get_base_endpoint_map(file_path)
+ if f in envs_containing_endpoint_map:
+ env_endpoint_map = get_endpoint_map_from_env(file_path)
+ if env_endpoint_map:
+ env_endpoint_maps.append(env_endpoint_map)
elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
failed = validate(base_path)
if failed:
@@ -103,6 +190,30 @@ for base_path in path_args:
print('Unexpected argument %s' % base_path)
exit_usage()
+if base_endpoint_map and \
+ len(env_endpoint_maps) == len(envs_containing_endpoint_map):
+ for env_endpoint_map in env_endpoint_maps:
+ matches = validate_endpoint_map(base_endpoint_map,
+ env_endpoint_map['map'])
+ if not matches:
+ print("ERROR: %s needs to be updated to match changes in base "
+ "endpoint map" % env_endpoint_map['file'])
+ failed_files.append(env_endpoint_map['file'])
+ exit_val |= 1
+ else:
+ print("%s matches base endpoint map" % env_endpoint_map['file'])
+else:
+ print("ERROR: Can't validate endpoint maps since a file is missing. "
+ "If you meant to delete one of these files you should update this "
+ "tool as well.")
+ if not base_endpoint_map:
+ failed_files.append(ENDPOINT_MAP_FILE)
+ if len(env_endpoint_maps) != len(envs_containing_endpoint_map):
+ matched_files = set(os.path.basename(matched_env_file['file'])
+ for matched_env_file in env_endpoint_maps)
+ failed_files.extend(set(envs_containing_endpoint_map) - matched_files)
+ exit_val |= 1
+
if failed_files:
print('Validation failed on:')
for f in failed_files:
diff --git a/tox.ini b/tox.ini
index 5d09e0a4..3796a546 100644
--- a/tox.ini
+++ b/tox.ini
@@ -11,4 +11,13 @@ deps = -r{toxinidir}/requirements.txt
commands = {posargs}
[testenv:pep8]
-commands = python ./tools/yaml-validate.py .
+commands =
+ python ./tools/process-templates.py
+ python ./network/endpoints/build_endpoint_map.py --check
+ python ./tools/yaml-validate.py .
+
+[testenv:templates]
+commands = python ./tools/process-templates.py
+
+[testenv:releasenotes]
+commands = bash -c tools/releasenotes_tox.sh