aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--Gemfile24
-rw-r--r--README.rst75
-rw-r--r--Rakefile6
-rw-r--r--all-nodes-validation.yaml2
-rw-r--r--bootstrap-config.yaml2
-rw-r--r--capabilities-map.yaml126
-rw-r--r--ci/common/net-config-multinode-os-net-config.yaml114
-rw-r--r--ci/common/net-config-multinode.yaml6
-rw-r--r--ci/environments/multinode-3nodes.yaml78
-rw-r--r--ci/environments/multinode-core.yaml37
-rw-r--r--ci/environments/multinode.yaml55
-rw-r--r--ci/environments/multinode_major_upgrade.yaml47
-rw-r--r--ci/environments/scenario001-multinode.yaml25
-rw-r--r--ci/environments/scenario002-multinode.yaml8
-rw-r--r--ci/environments/scenario003-multinode.yaml6
-rw-r--r--ci/environments/scenario004-multinode.yaml41
-rw-r--r--ci/pingtests/scenario001-multinode.yaml2
-rw-r--r--ci/pingtests/scenario002-multinode.yaml4
-rw-r--r--ci/pingtests/scenario003-multinode.yaml2
-rw-r--r--ci/pingtests/scenario004-multinode.yaml14
-rw-r--r--ci/pingtests/tenantvm_floatingip.yaml142
-rw-r--r--ci/scripts/freeipa_setup.sh120
-rw-r--r--default_passwords.yaml2
-rw-r--r--deployed-server/README.rst13
-rw-r--r--deployed-server/ctlplane-port.yaml17
-rw-r--r--deployed-server/deployed-neutron-port.yaml67
-rw-r--r--deployed-server/deployed-server-bootstrap-centos.sh16
-rw-r--r--deployed-server/deployed-server-bootstrap-centos.yaml22
-rw-r--r--deployed-server/deployed-server-bootstrap-rhel.sh13
-rw-r--r--deployed-server/deployed-server-bootstrap-rhel.yaml22
-rw-r--r--deployed-server/deployed-server-config.yaml22
-rw-r--r--deployed-server/deployed-server-roles-data.yaml173
-rw-r--r--deployed-server/deployed-server.yaml80
-rwxr-xr-xdeployed-server/scripts/get-occ-config.sh44
-rw-r--r--docker/copy-etc.sh3
-rw-r--r--docker/copy-json.py72
-rw-r--r--docker/create-config-dir.sh6
-rwxr-xr-xdocker/docker-puppet.py212
-rwxr-xr-xdocker/firstboot/setup_docker_host.sh26
-rw-r--r--docker/firstboot/setup_docker_host.yaml (renamed from docker/firstboot/install_docker_agents.yaml)15
-rw-r--r--docker/firstboot/start_docker_agents.sh108
-rw-r--r--docker/post.j2.yaml430
-rw-r--r--docker/services/README.rst145
-rw-r--r--docker/services/database/mongodb.yaml98
-rw-r--r--docker/services/glance-api.yaml96
-rw-r--r--docker/services/heat-api-cfn.yaml90
-rw-r--r--docker/services/heat-api.yaml90
-rw-r--r--docker/services/heat-engine.yaml92
-rw-r--r--docker/services/ironic-api.yaml99
-rw-r--r--docker/services/ironic-conductor.yaml111
-rw-r--r--docker/services/ironic-pxe.yaml131
-rw-r--r--docker/services/keystone.yaml153
-rw-r--r--docker/services/memcached.yaml69
-rw-r--r--docker/services/mistral-api.yaml115
-rw-r--r--docker/services/mistral-engine.yaml87
-rw-r--r--docker/services/mistral-executor.yaml91
-rw-r--r--docker/services/neutron-api.yaml102
-rw-r--r--docker/services/neutron-dhcp.yaml93
-rw-r--r--docker/services/neutron-ovs-agent.yaml51
-rw-r--r--docker/services/neutron-plugin-ml2.yaml58
-rw-r--r--docker/services/nova-api.yaml134
-rw-r--r--docker/services/nova-compute.yaml47
-rw-r--r--docker/services/nova-conductor.yaml85
-rw-r--r--docker/services/nova-ironic.yaml88
-rw-r--r--docker/services/nova-libvirt.yaml49
-rw-r--r--docker/services/nova-metadata.yaml (renamed from puppet/services/pacemaker/neutron-plugin-ml2.yaml)40
-rw-r--r--docker/services/nova-placement.yaml101
-rw-r--r--docker/services/nova-scheduler.yaml84
-rw-r--r--docker/services/rabbitmq.yaml119
-rw-r--r--docker/services/services.yaml16
-rw-r--r--docker/services/swift-proxy.yaml76
-rw-r--r--docker/services/swift-ringbuilder.yaml80
-rw-r--r--docker/services/swift-storage.yaml343
-rw-r--r--docker/services/zaqar.yaml99
-rw-r--r--environments/auditd.yaml119
-rw-r--r--environments/cinder-dellps-config.yaml31
-rw-r--r--environments/cinder-dellsc-config.yaml4
-rw-r--r--environments/cinder-eqlx-config.yaml17
-rw-r--r--environments/cinder-hpelefthand-config.yaml13
-rw-r--r--environments/cinder-iser.yaml19
-rw-r--r--environments/cinder-scaleio-config.yaml35
-rw-r--r--environments/collectd-environment.yaml23
-rw-r--r--environments/contrail/contrail-net.yaml26
-rw-r--r--environments/contrail/contrail-nic-config-compute.yaml167
-rw-r--r--environments/contrail/contrail-services.yaml45
-rw-r--r--environments/contrail/roles_data_contrail.yaml237
-rw-r--r--environments/deployed-server-bootstrap-environment-centos.yaml7
-rw-r--r--environments/deployed-server-bootstrap-environment-rhel.yaml7
-rw-r--r--environments/deployed-server-environment.yaml4
-rw-r--r--environments/deployed-server-noop-ctlplane.yaml4
-rw-r--r--environments/deployed-server-pacemaker-environment.yaml4
-rw-r--r--environments/docker-network-isolation.yaml4
-rw-r--r--environments/docker.yaml52
-rw-r--r--environments/enable-internal-tls.yaml12
-rw-r--r--environments/enable-swap-partition.yaml3
-rw-r--r--environments/enable-swap.yaml3
-rw-r--r--environments/enable_congress.yaml2
-rw-r--r--environments/enable_tacker.yaml2
-rw-r--r--environments/external-loadbalancer-vip-v6.yaml21
-rw-r--r--environments/external-loadbalancer-vip.yaml19
-rw-r--r--environments/horizon_password_validation.yaml5
-rw-r--r--environments/host-config-pre-network.j2.yaml16
-rw-r--r--environments/low-memory-usage.yaml4
-rw-r--r--environments/major-upgrade-all-in-one.yaml2
-rw-r--r--environments/major-upgrade-composable-steps.yaml16
-rw-r--r--environments/major-upgrade-converge.yaml7
-rw-r--r--environments/net-bond-with-vlans-no-external.yaml4
-rw-r--r--environments/net-bond-with-vlans-v6.yaml6
-rw-r--r--environments/net-bond-with-vlans.yaml6
-rw-r--r--environments/net-single-nic-linux-bridge-with-vlans.yaml6
-rw-r--r--environments/net-single-nic-with-vlans-no-external.yaml4
-rw-r--r--environments/net-single-nic-with-vlans-v6.yaml6
-rw-r--r--environments/net-single-nic-with-vlans.yaml6
-rw-r--r--environments/network-environment.yaml4
-rw-r--r--environments/network-isolation-no-tunneling.yaml30
-rw-r--r--environments/network-isolation.yaml3
-rw-r--r--environments/neutron-ml2-fujitsu-cfab.yaml21
-rw-r--r--environments/neutron-ml2-fujitsu-fossw.yaml22
-rw-r--r--environments/neutron-ml2-ovn.yaml11
-rw-r--r--environments/neutron-opencontrail.yaml25
-rw-r--r--environments/neutron-opendaylight-l3.yaml14
-rw-r--r--environments/neutron-opendaylight.yaml6
-rwxr-xr-xenvironments/neutron-sriov.yaml3
-rw-r--r--environments/puppet-ceph-external.yaml3
-rw-r--r--environments/puppet-ceph.yaml12
-rw-r--r--environments/puppet-pacemaker.yaml2
-rw-r--r--environments/services/ceph-mds.yaml2
-rw-r--r--environments/services/ceph-rbdmirror.yaml2
-rw-r--r--environments/services/disable-ceilometer-api.yaml2
-rw-r--r--environments/services/ec2-api.yaml3
-rw-r--r--environments/services/etcd.yaml2
-rw-r--r--environments/services/octavia.yaml9
-rw-r--r--environments/sshd-banner.yaml13
-rw-r--r--environments/tls-endpoints-public-dns.yaml55
-rw-r--r--environments/tls-endpoints-public-ip.yaml55
-rw-r--r--environments/tls-everywhere-endpoints-dns.yaml55
-rw-r--r--environments/undercloud.yaml18
-rw-r--r--environments/updates/README.md3
-rw-r--r--environments/updates/update-from-deployed-server-newton.yaml2
-rw-r--r--environments/updates/update-from-keystone-admin-internal-api.yaml29
-rw-r--r--extraconfig/all_nodes/mac_hostname.j2.yaml2
-rw-r--r--extraconfig/all_nodes/random_string.j2.yaml2
-rw-r--r--extraconfig/all_nodes/swap-partition.j2.yaml19
-rw-r--r--extraconfig/all_nodes/swap.j2.yaml10
-rw-r--r--extraconfig/nova_metadata/krb-service-principals.yaml84
-rw-r--r--extraconfig/post_deploy/default.yaml2
-rw-r--r--extraconfig/post_deploy/example.yaml2
-rw-r--r--extraconfig/post_deploy/example_run_on_update.yaml2
-rwxr-xr-xextraconfig/post_deploy/undercloud_post.sh126
-rw-r--r--extraconfig/post_deploy/undercloud_post.yaml93
-rw-r--r--extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml2
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration43
-rw-r--r--extraconfig/pre_network/ansible_host_config.ansible58
-rw-r--r--extraconfig/pre_network/config_then_reboot.yaml48
-rw-r--r--extraconfig/pre_network/host_config_and_reboot.role.j2.yaml100
-rw-r--r--extraconfig/tasks/major_upgrade_block_storage.sh22
-rw-r--r--extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml2
-rwxr-xr-xextraconfig/tasks/major_upgrade_ceph_mon.sh82
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh116
-rw-r--r--extraconfig/tasks/major_upgrade_compute.sh46
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh13
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_3.sh4
-rw-r--r--extraconfig/tasks/major_upgrade_object_storage.sh53
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml46
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml87
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh1
-rw-r--r--extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml2
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh24
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker.yaml2
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker_restart.yaml2
-rw-r--r--extraconfig/tasks/pre_puppet_pacemaker.yaml2
-rwxr-xr-xextraconfig/tasks/run_puppet.sh27
-rw-r--r--extraconfig/tasks/swift-ring-deploy.yaml31
-rw-r--r--extraconfig/tasks/swift-ring-update.yaml42
-rw-r--r--extraconfig/tasks/tripleo_upgrade_node.sh66
-rwxr-xr-xextraconfig/tasks/yum_update.sh20
-rw-r--r--extraconfig/tasks/yum_update.yaml9
-rw-r--r--extraconfig/tasks/yum_update_noop.yaml2
-rw-r--r--firstboot/install_vrouter_kmod.yaml105
-rw-r--r--firstboot/os-net-config-mappings.yaml2
-rw-r--r--firstboot/userdata_default.yaml2
-rw-r--r--firstboot/userdata_dev_rsync.yaml2
-rw-r--r--firstboot/userdata_example.yaml2
-rw-r--r--firstboot/userdata_heat_admin.yaml2
-rw-r--r--firstboot/userdata_root_password.yaml38
-rw-r--r--hosts-config.yaml2
-rw-r--r--net-config-bond.yaml2
-rw-r--r--net-config-bridge.yaml2
-rw-r--r--net-config-linux-bridge.yaml2
-rw-r--r--net-config-noop.yaml6
-rw-r--r--net-config-static-bridge-with-external-dhcp.yaml2
-rw-r--r--net-config-static-bridge.yaml2
-rw-r--r--net-config-static.yaml2
-rw-r--r--net-config-undercloud.yaml77
-rw-r--r--network/config/bond-with-vlans/ceph-storage.yaml2
-rw-r--r--network/config/bond-with-vlans/cinder-storage.yaml2
-rw-r--r--network/config/bond-with-vlans/compute-dpdk.yaml2
-rw-r--r--network/config/bond-with-vlans/compute.yaml2
-rw-r--r--network/config/bond-with-vlans/controller-no-external.yaml2
-rw-r--r--network/config/bond-with-vlans/controller-v6.yaml2
-rw-r--r--network/config/bond-with-vlans/controller.yaml2
-rw-r--r--network/config/bond-with-vlans/swift-storage.yaml2
-rw-r--r--network/config/multiple-nics/ceph-storage.yaml2
-rw-r--r--network/config/multiple-nics/cinder-storage.yaml2
-rw-r--r--network/config/multiple-nics/compute-dvr.yaml162
-rw-r--r--network/config/multiple-nics/compute.yaml2
-rw-r--r--network/config/multiple-nics/controller-v6.yaml2
-rw-r--r--network/config/multiple-nics/controller.yaml2
-rw-r--r--network/config/multiple-nics/swift-storage.yaml2
-rw-r--r--network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml2
-rw-r--r--network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml2
-rw-r--r--network/config/single-nic-linux-bridge-vlans/compute.yaml2
-rw-r--r--network/config/single-nic-linux-bridge-vlans/controller-v6.yaml2
-rw-r--r--network/config/single-nic-linux-bridge-vlans/controller.yaml2
-rw-r--r--network/config/single-nic-linux-bridge-vlans/swift-storage.yaml2
-rw-r--r--network/config/single-nic-vlans/ceph-storage.yaml2
-rw-r--r--network/config/single-nic-vlans/cinder-storage.yaml2
-rw-r--r--network/config/single-nic-vlans/compute.yaml2
-rw-r--r--network/config/single-nic-vlans/controller-no-external.yaml2
-rw-r--r--network/config/single-nic-vlans/controller-v6.yaml2
-rw-r--r--network/config/single-nic-vlans/controller.yaml2
-rw-r--r--network/config/single-nic-vlans/swift-storage.yaml2
-rwxr-xr-xnetwork/endpoints/build_endpoint_map.py7
-rw-r--r--network/endpoints/endpoint_data.yaml138
-rw-r--r--network/endpoints/endpoint_map.yaml3613
-rw-r--r--network/external.yaml2
-rw-r--r--network/external_v6.yaml2
-rw-r--r--network/internal_api.yaml2
-rw-r--r--network/internal_api_v6.yaml2
-rw-r--r--network/management.yaml2
-rw-r--r--network/management_v6.yaml2
-rw-r--r--network/networks.yaml2
-rw-r--r--network/ports/ctlplane_vip.yaml2
-rw-r--r--network/ports/external.yaml2
-rw-r--r--network/ports/external_from_pool.yaml2
-rw-r--r--network/ports/external_from_pool_v6.yaml2
-rw-r--r--network/ports/external_v6.yaml2
-rw-r--r--network/ports/from_service.yaml2
-rw-r--r--network/ports/from_service_v6.yaml2
-rw-r--r--network/ports/internal_api.yaml2
-rw-r--r--network/ports/internal_api_from_pool.yaml2
-rw-r--r--network/ports/internal_api_from_pool_v6.yaml2
-rw-r--r--network/ports/internal_api_v6.yaml2
-rw-r--r--network/ports/management.yaml2
-rw-r--r--network/ports/management_from_pool.yaml2
-rw-r--r--network/ports/management_from_pool_v6.yaml2
-rw-r--r--network/ports/management_v6.yaml2
-rw-r--r--network/ports/net_ip_list_map.yaml19
-rw-r--r--network/ports/net_ip_map.yaml2
-rw-r--r--network/ports/net_vip_map_external.yaml2
-rw-r--r--network/ports/net_vip_map_external_v6.yaml2
-rw-r--r--network/ports/noop.yaml2
-rw-r--r--network/ports/storage.yaml2
-rw-r--r--network/ports/storage_from_pool.yaml2
-rw-r--r--network/ports/storage_from_pool_v6.yaml2
-rw-r--r--network/ports/storage_mgmt.yaml2
-rw-r--r--network/ports/storage_mgmt_from_pool.yaml2
-rw-r--r--network/ports/storage_mgmt_from_pool_v6.yaml2
-rw-r--r--network/ports/storage_mgmt_v6.yaml2
-rw-r--r--network/ports/storage_v6.yaml2
-rw-r--r--network/ports/tenant.yaml2
-rw-r--r--network/ports/tenant_from_pool.yaml2
-rw-r--r--network/ports/tenant_from_pool_v6.yaml2
-rw-r--r--network/ports/tenant_v6.yaml2
-rw-r--r--network/ports/vip.yaml2
-rw-r--r--network/ports/vip_v6.yaml2
-rwxr-xr-xnetwork/scripts/run-os-net-config.sh26
-rw-r--r--network/service_net_map.j2.yaml85
-rw-r--r--network/storage.yaml2
-rw-r--r--network/storage_mgmt.yaml2
-rw-r--r--network/storage_mgmt_v6.yaml2
-rw-r--r--network/storage_v6.yaml2
-rw-r--r--network/tenant.yaml2
-rw-r--r--network/tenant_v6.yaml2
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml52
-rw-r--r--overcloud.j2.yaml73
-rw-r--r--puppet/all-nodes-config.yaml5
-rw-r--r--puppet/blockstorage-role.yaml296
-rw-r--r--puppet/cephstorage-role.yaml296
-rw-r--r--puppet/compute-role.yaml296
-rw-r--r--puppet/config.role.j2.yaml2
-rw-r--r--puppet/controller-config-pacemaker.yaml41
-rw-r--r--puppet/controller-role.yaml297
-rw-r--r--puppet/deploy-artifacts.sh2
-rw-r--r--puppet/deploy-artifacts.yaml2
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml2
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml10
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml59
-rw-r--r--puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml87
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml86
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/multiple.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/default.yaml2
-rw-r--r--puppet/extraconfig/pre_deploy/per_node.yaml2
-rw-r--r--puppet/extraconfig/tls/ca-inject.yaml2
-rw-r--r--puppet/extraconfig/tls/freeipa-enroll.yaml83
-rw-r--r--puppet/extraconfig/tls/tls-cert-inject.yaml2
-rw-r--r--puppet/major_upgrade_steps.j2.yaml213
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp21
-rw-r--r--puppet/manifests/overcloud_role.pp4
-rw-r--r--puppet/objectstorage-role.yaml297
-rw-r--r--puppet/post-upgrade.j2.yaml27
-rw-r--r--puppet/post.j2.yaml103
-rw-r--r--puppet/puppet-steps.j2102
-rw-r--r--puppet/role.role.j2.yaml303
-rw-r--r--puppet/services/README.rst61
-rw-r--r--puppet/services/aodh-api.yaml9
-rw-r--r--puppet/services/aodh-base.yaml5
-rw-r--r--puppet/services/aodh-evaluator.yaml9
-rw-r--r--puppet/services/aodh-listener.yaml9
-rw-r--r--puppet/services/aodh-notifier.yaml9
-rw-r--r--puppet/services/apache-internal-tls-certmonger.yaml41
-rw-r--r--puppet/services/apache.yaml8
-rw-r--r--puppet/services/auditd.yaml (renamed from puppet/services/pacemaker/core.yaml)17
-rw-r--r--puppet/services/barbican-api.yaml15
-rw-r--r--puppet/services/ca-certs.yaml2
-rw-r--r--puppet/services/ceilometer-agent-central.yaml9
-rw-r--r--puppet/services/ceilometer-agent-compute.yaml19
-rw-r--r--puppet/services/ceilometer-agent-notification.yaml9
-rw-r--r--puppet/services/ceilometer-api.yaml8
-rw-r--r--puppet/services/ceilometer-base.yaml22
-rw-r--r--puppet/services/ceilometer-collector.yaml9
-rw-r--r--puppet/services/ceilometer-expirer.yaml2
-rw-r--r--puppet/services/ceph-base.yaml2
-rw-r--r--puppet/services/ceph-client.yaml2
-rw-r--r--puppet/services/ceph-external.yaml43
-rw-r--r--puppet/services/ceph-mds.yaml (renamed from puppet/services/pacemaker/ceilometer-api.yaml)30
-rw-r--r--puppet/services/ceph-mon.yaml34
-rw-r--r--puppet/services/ceph-osd.yaml46
-rw-r--r--puppet/services/ceph-rgw.yaml28
-rw-r--r--puppet/services/cinder-api.yaml23
-rw-r--r--puppet/services/cinder-backend-dellps.yaml85
-rw-r--r--puppet/services/cinder-backend-dellsc.yaml85
-rw-r--r--puppet/services/cinder-backend-scaleio.yaml111
-rw-r--r--puppet/services/cinder-backup.yaml2
-rw-r--r--puppet/services/cinder-base.yaml52
-rw-r--r--puppet/services/cinder-hpelefthand-iscsi.yaml56
-rw-r--r--puppet/services/cinder-scheduler.yaml9
-rw-r--r--puppet/services/cinder-volume.yaml14
-rw-r--r--puppet/services/congress.yaml97
-rw-r--r--puppet/services/database/mongodb-base.yaml2
-rw-r--r--puppet/services/database/mongodb.yaml9
-rw-r--r--puppet/services/database/mysql-client.yaml30
-rw-r--r--puppet/services/database/mysql-internal-tls-certmonger.yaml6
-rw-r--r--puppet/services/database/mysql.yaml26
-rw-r--r--puppet/services/database/redis-base.yaml2
-rw-r--r--puppet/services/database/redis.yaml2
-rw-r--r--puppet/services/disabled/glance-registry.yaml30
-rw-r--r--puppet/services/ec2-api.yaml117
-rw-r--r--puppet/services/etcd.yaml58
-rw-r--r--puppet/services/glance-api.yaml57
-rw-r--r--puppet/services/glance-base.yaml4
-rw-r--r--puppet/services/glance-registry.yaml100
-rw-r--r--puppet/services/gnocchi-api.yaml19
-rw-r--r--puppet/services/gnocchi-base.yaml5
-rw-r--r--puppet/services/gnocchi-metricd.yaml11
-rw-r--r--puppet/services/gnocchi-statsd.yaml9
-rw-r--r--puppet/services/haproxy-internal-tls-certmonger.yaml37
-rw-r--r--puppet/services/haproxy-public-tls-certmonger.yaml6
-rw-r--r--puppet/services/haproxy.yaml11
-rw-r--r--puppet/services/heat-api-cfn.yaml9
-rw-r--r--puppet/services/heat-api-cloudwatch.yaml9
-rw-r--r--puppet/services/heat-api.yaml9
-rw-r--r--puppet/services/heat-base.yaml77
-rw-r--r--puppet/services/heat-engine.yaml39
-rw-r--r--puppet/services/horizon.yaml14
-rw-r--r--puppet/services/ironic-api.yaml13
-rw-r--r--puppet/services/ironic-base.yaml3
-rw-r--r--puppet/services/ironic-conductor.yaml15
-rw-r--r--puppet/services/keepalived.yaml2
-rw-r--r--puppet/services/kernel.yaml2
-rw-r--r--puppet/services/keystone.yaml66
-rw-r--r--puppet/services/logging/fluentd-base.yaml2
-rw-r--r--puppet/services/logging/fluentd-client.yaml11
-rw-r--r--puppet/services/logging/fluentd-config.yaml2
-rw-r--r--puppet/services/manila-api.yaml5
-rw-r--r--puppet/services/manila-backend-cephfs.yaml20
-rw-r--r--puppet/services/manila-backend-generic.yaml2
-rw-r--r--puppet/services/manila-backend-netapp.yaml2
-rw-r--r--puppet/services/manila-base.yaml3
-rw-r--r--puppet/services/manila-scheduler.yaml2
-rw-r--r--puppet/services/manila-share.yaml4
-rw-r--r--puppet/services/memcached.yaml17
-rw-r--r--puppet/services/metrics/collectd.yaml120
-rw-r--r--puppet/services/mistral-api.yaml2
-rw-r--r--puppet/services/mistral-base.yaml5
-rw-r--r--puppet/services/mistral-engine.yaml2
-rw-r--r--puppet/services/mistral-executor.yaml2
-rw-r--r--puppet/services/monitoring/sensu-base.yaml2
-rw-r--r--puppet/services/monitoring/sensu-client.yaml11
-rw-r--r--puppet/services/network/contrail-analytics-database.yaml (renamed from puppet/services/pacemaker/neutron-midonet.yaml)22
-rw-r--r--puppet/services/network/contrail-analytics.yaml59
-rw-r--r--puppet/services/network/contrail-base.yaml87
-rw-r--r--puppet/services/network/contrail-config.yaml30
-rw-r--r--puppet/services/network/contrail-control.yaml23
-rw-r--r--puppet/services/network/contrail-database.yaml14
-rw-r--r--puppet/services/network/contrail-heat.yaml (renamed from puppet/services/pacemaker/neutron-plugin-nuage.yaml)18
-rw-r--r--puppet/services/network/contrail-neutron-plugin.yaml (renamed from puppet/services/pacemaker/database/mongodb.yaml)25
-rw-r--r--puppet/services/network/contrail-provision.yaml (renamed from puppet/services/pacemaker/neutron-plugin-plumgrid.yaml)17
-rw-r--r--puppet/services/network/contrail-tsn.yaml64
-rw-r--r--puppet/services/network/contrail-vrouter.yaml64
-rw-r--r--puppet/services/network/contrail-webui.yaml32
-rw-r--r--puppet/services/neutron-api.yaml47
-rw-r--r--puppet/services/neutron-base.yaml2
-rw-r--r--puppet/services/neutron-compute-plugin-midonet.yaml2
-rw-r--r--puppet/services/neutron-compute-plugin-nuage.yaml2
-rw-r--r--puppet/services/neutron-compute-plugin-ovn.yaml26
-rw-r--r--puppet/services/neutron-compute-plugin-plumgrid.yaml2
-rw-r--r--puppet/services/neutron-dhcp.yaml14
-rw-r--r--puppet/services/neutron-l3-compute-dvr.yaml27
-rw-r--r--puppet/services/neutron-l3.yaml27
-rw-r--r--puppet/services/neutron-metadata.yaml11
-rw-r--r--puppet/services/neutron-midonet.yaml2
-rw-r--r--puppet/services/neutron-ovs-agent.yaml18
-rw-r--r--puppet/services/neutron-ovs-dpdk-agent.yaml10
-rw-r--r--puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml73
-rw-r--r--puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml78
-rw-r--r--puppet/services/neutron-plugin-ml2-ovn.yaml29
-rw-r--r--puppet/services/neutron-plugin-ml2.yaml12
-rw-r--r--puppet/services/neutron-plugin-nuage.yaml2
-rw-r--r--puppet/services/neutron-plugin-opencontrail.yaml74
-rw-r--r--puppet/services/neutron-plugin-plumgrid.yaml5
-rw-r--r--puppet/services/neutron-sriov-agent.yaml7
-rw-r--r--puppet/services/nova-api.yaml178
-rw-r--r--puppet/services/nova-base.yaml96
-rw-r--r--puppet/services/nova-compute.yaml27
-rw-r--r--puppet/services/nova-conductor.yaml22
-rw-r--r--puppet/services/nova-consoleauth.yaml6
-rw-r--r--puppet/services/nova-ironic.yaml10
-rw-r--r--puppet/services/nova-libvirt.yaml15
-rw-r--r--puppet/services/nova-metadata.yaml2
-rw-r--r--puppet/services/nova-placement.yaml133
-rw-r--r--puppet/services/nova-scheduler.yaml9
-rw-r--r--puppet/services/nova-vnc-proxy.yaml6
-rw-r--r--puppet/services/octavia-api.yaml98
-rw-r--r--puppet/services/octavia-base.yaml62
-rw-r--r--puppet/services/octavia-health-manager.yaml61
-rw-r--r--puppet/services/octavia-housekeeping.yaml70
-rw-r--r--puppet/services/octavia-worker.yaml102
-rw-r--r--puppet/services/opendaylight-api.yaml13
-rw-r--r--puppet/services/opendaylight-ovs.yaml13
-rw-r--r--puppet/services/ovn-dbs.yaml40
-rw-r--r--puppet/services/pacemaker.yaml33
-rw-r--r--puppet/services/pacemaker/ceilometer-agent-central.yaml45
-rw-r--r--puppet/services/pacemaker/ceilometer-agent-notification.yaml45
-rw-r--r--puppet/services/pacemaker/ceilometer-collector.yaml45
-rw-r--r--puppet/services/pacemaker/ceph-rbdmirror.yaml (renamed from puppet/services/pacemaker/gnocchi-api.yaml)28
-rw-r--r--puppet/services/pacemaker/cinder-api.yaml45
-rw-r--r--puppet/services/pacemaker/cinder-backup.yaml2
-rw-r--r--puppet/services/pacemaker/cinder-scheduler.yaml45
-rw-r--r--puppet/services/pacemaker/cinder-volume.yaml2
-rw-r--r--puppet/services/pacemaker/database/mysql.yaml8
-rw-r--r--puppet/services/pacemaker/database/redis.yaml2
-rw-r--r--puppet/services/pacemaker/glance-api.yaml74
-rw-r--r--puppet/services/pacemaker/glance-registry.yaml47
-rw-r--r--puppet/services/pacemaker/gnocchi-metricd.yaml47
-rw-r--r--puppet/services/pacemaker/gnocchi-statsd.yaml46
-rw-r--r--puppet/services/pacemaker/haproxy.yaml4
-rw-r--r--puppet/services/pacemaker/heat-api-cfn.yaml44
-rw-r--r--puppet/services/pacemaker/heat-api-cloudwatch.yaml44
-rw-r--r--puppet/services/pacemaker/heat-api.yaml44
-rw-r--r--puppet/services/pacemaker/heat-engine.yaml45
-rw-r--r--puppet/services/pacemaker/horizon.yaml41
-rw-r--r--puppet/services/pacemaker/keystone.yaml45
-rw-r--r--puppet/services/pacemaker/manila-share.yaml2
-rw-r--r--puppet/services/pacemaker/memcached.yaml42
-rw-r--r--puppet/services/pacemaker/neutron-dhcp.yaml46
-rw-r--r--puppet/services/pacemaker/neutron-l3.yaml46
-rw-r--r--puppet/services/pacemaker/neutron-metadata.yaml44
-rw-r--r--puppet/services/pacemaker/neutron-ovs-agent.yaml42
-rw-r--r--puppet/services/pacemaker/neutron-plugin-opencontrail.yaml40
-rw-r--r--puppet/services/pacemaker/neutron-server.yaml48
-rw-r--r--puppet/services/pacemaker/nova-api.yaml45
-rw-r--r--puppet/services/pacemaker/nova-conductor.yaml45
-rw-r--r--puppet/services/pacemaker/nova-consoleauth.yaml45
-rw-r--r--puppet/services/pacemaker/nova-scheduler.yaml45
-rw-r--r--puppet/services/pacemaker/nova-vnc-proxy.yaml45
-rw-r--r--puppet/services/pacemaker/rabbitmq.yaml2
-rw-r--r--puppet/services/pacemaker/sahara-api.yaml45
-rw-r--r--puppet/services/pacemaker/sahara-engine.yaml45
-rw-r--r--puppet/services/pacemaker_remote.yaml57
-rw-r--r--puppet/services/panko-api.yaml4
-rw-r--r--puppet/services/panko-base.yaml6
-rw-r--r--puppet/services/rabbitmq.yaml10
-rw-r--r--puppet/services/sahara-api.yaml6
-rw-r--r--puppet/services/sahara-base.yaml5
-rw-r--r--puppet/services/sahara-engine.yaml9
-rw-r--r--puppet/services/services.yaml21
-rw-r--r--puppet/services/snmp.yaml6
-rw-r--r--puppet/services/sshd.yaml (renamed from puppet/services/neutron-compute-plugin-opencontrail.yaml)15
-rw-r--r--puppet/services/swift-base.yaml2
-rw-r--r--puppet/services/swift-proxy.yaml77
-rw-r--r--puppet/services/swift-ringbuilder.yaml14
-rw-r--r--puppet/services/swift-storage.yaml32
-rw-r--r--puppet/services/tacker.yaml97
-rw-r--r--puppet/services/time/ntp.yaml10
-rw-r--r--puppet/services/time/timezone.yaml2
-rw-r--r--puppet/services/tripleo-firewall.yaml2
-rw-r--r--puppet/services/tripleo-packages.yaml10
-rw-r--r--puppet/services/zaqar.yaml4
-rw-r--r--puppet/upgrade_config.yaml14
-rw-r--r--releasenotes/notes/6.0.0-b52a14a71fc62788.yaml125
-rw-r--r--releasenotes/notes/add-default-ntp-server-696b8568e09be497.yaml6
-rw-r--r--releasenotes/notes/composable-ha-37e2d7e1f57f5c10.yaml12
-rw-r--r--releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml14
-rw-r--r--releasenotes/notes/deployed-servers-fd47f18204cea105.yaml8
-rw-r--r--releasenotes/notes/ha-by-default-55326e699ee8602c.yaml5
-rw-r--r--releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml10
-rw-r--r--releasenotes/notes/keystone_internal-53cc7b24ebdd9df4.yaml9
-rw-r--r--releasenotes/notes/memcached-max-memory-ef6834d17953fca6.yaml7
-rw-r--r--releasenotes/notes/octavia-service-integration-03bd3eb6cfe1efaf.yaml4
-rw-r--r--releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml9
-rw-r--r--releasenotes/source/_static/.placeholder0
-rw-r--r--releasenotes/source/conf.py264
-rw-r--r--releasenotes/source/index.rst19
-rw-r--r--releasenotes/source/ocata.rst6
-rw-r--r--releasenotes/source/unreleased.rst5
-rw-r--r--requirements.txt8
-rw-r--r--roles_data.yaml48
-rw-r--r--roles_data_undercloud.yaml35
-rwxr-xr-xscripts/hosts-config.sh14
-rw-r--r--setup.py11
-rw-r--r--test-requirements.txt10
-rwxr-xr-xtools/process-templates.py55
-rwxr-xr-xtools/releasenotes_tox.sh28
-rwxr-xr-xtools/tox_install.sh30
-rwxr-xr-xtools/yaml-validate.py115
-rw-r--r--tox.ini4
533 files changed, 16973 insertions, 4909 deletions
diff --git a/.gitignore b/.gitignore
index 0925145c..cea6064d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -59,3 +59,6 @@ puppet/compute-config.yaml
puppet/controller-config.yaml
puppet/objectstorage-config.yaml
puppet/post.yaml
+
+# Files created by releasenotes build
+releasenotes/build
diff --git a/Gemfile b/Gemfile
deleted file mode 100644
index 302ef415..00000000
--- a/Gemfile
+++ /dev/null
@@ -1,24 +0,0 @@
-source 'https://rubygems.org'
-
-group :development, :test do
- gem 'puppetlabs_spec_helper', :require => false
-
- gem 'puppet-lint', '~> 1.1'
- gem 'puppet-lint-absolute_classname-check'
- gem 'puppet-lint-absolute_template_path'
- gem 'puppet-lint-trailing_newline-check'
-
- # Puppet 4.x related lint checks
- gem 'puppet-lint-unquoted_string-check'
- gem 'puppet-lint-leading_zero-check'
- gem 'puppet-lint-variable_contains_upcase'
- gem 'puppet-lint-numericvariable'
-end
-
-if puppetversion = ENV['PUPPET_GEM_VERSION']
- gem 'puppet', puppetversion, :require => false
-else
- gem 'puppet', :require => false
-end
-
-# vim:ft=ruby
diff --git a/README.rst b/README.rst
index 288112fc..68fdd0ec 100644
--- a/README.rst
+++ b/README.rst
@@ -53,3 +53,78 @@ A description of the directory layout in TripleO Heat Templates.
* validation-scripts: validation scripts useful to all deployment
configurations
+
+
+Service testing matrix
+----------------------
+
+The configuration for the CI scenarios will be defined in `tripleo-heat-templates/ci/`
+and should be executed according to the following table:
+
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| - | scenario001 | scenario002 | scenario003 | scenario004 | multinode-nonha |
++================+=============+=============+=============+=============+=================+
+| keystone | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| glance | file | swift | file | file | swift |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| cinder | rbd | iscsi | | | iscsi |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| heat | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| mysql | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| neutron | ovs | ovs | ovs | ovs | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| rabbitmq | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| mongodb | X | X | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| redis | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| haproxy | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| keepalived | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| memcached | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| pacemaker | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| nova | qemu | qemu | qemu | qemu | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| ntp | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| snmp | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| timezone | X | X | X | X | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| sahara | | | X | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| mistral | | | X | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| swift | | X | | | X |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| aodh | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| ceilometer | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| gnocchi | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| panko | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| barbican | | X | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| zaqar | | X | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| ec2api | | X | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| cephrgw | | X | | X | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| tacker | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| congress | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| cephmds | | | | X | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| manila | | | | X | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
diff --git a/Rakefile b/Rakefile
deleted file mode 100644
index bca6a6c2..00000000
--- a/Rakefile
+++ /dev/null
@@ -1,6 +0,0 @@
-require 'puppetlabs_spec_helper/rake_tasks'
-require 'puppet-lint/tasks/puppet-lint'
-
-PuppetLint.configuration.fail_on_warnings = true
-PuppetLint.configuration.send('disable_80chars')
-PuppetLint.configuration.send('disable_autoloader_layout')
diff --git a/all-nodes-validation.yaml b/all-nodes-validation.yaml
index a7383375..65d01d0f 100644
--- a/all-nodes-validation.yaml
+++ b/all-nodes-validation.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Software Config to drive validations that occur on all nodes.
diff --git a/bootstrap-config.yaml b/bootstrap-config.yaml
index c87670e3..a3fdee96 100644
--- a/bootstrap-config.yaml
+++ b/bootstrap-config.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: 'Bootstrap Config'
parameters:
diff --git a/capabilities-map.yaml b/capabilities-map.yaml
index ae747621..cc22ff92 100644
--- a/capabilities-map.yaml
+++ b/capabilities-map.yaml
@@ -160,6 +160,16 @@ topics:
description: Deploy Mistral service
requires:
- overcloud-resource-registry-puppet.yaml
+ - title: Ceilometer Api
+ description:
+ environments:
+ - file: environments/services/disable-ceilometer-api.yaml
+ title: Ceilometer Api
+ description: Disable Ceilometer Api service. This service is
+ deprecated and will be removed in future releases. Please move
+ to using gnocchi/aodh/panko apis instead.
+ requires:
+ - overcloud-resource-registry-puppet.yaml
# - title: Network Interface Configuration
# description:
@@ -355,6 +365,16 @@ topics:
description: Enables PLUMgrid extensions
requires:
- overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-ml2-fujitsu-cfab.yaml
+ title: Fujitsu Neutron plugin for C-Fabric
+ description: Enable C-Fabric in the overcloud
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-ml2-fujitsu-fossw.yaml
+ title: Fujitsu Neutron plugin for FOS
+ description: Enable FOS in the overcloud
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- title: Nova Extensions
description:
@@ -393,42 +413,90 @@ topics:
requires:
- overcloud-resource-registry-puppet.yaml
- file: environments/cinder-dellsc-config.yaml
- title: Cinder Dell Storage Center ISCSI backend
+ title: Cinder Dell EMC Storage Center ISCSI backend
+ description: >
+ Enables a Cinder Dell EMC Storage Center ISCSI backend,
+ configured via puppet
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/cinder-hpelefthand-config.yaml
+ title: Cinder HPELeftHandISCSI backend
description: >
- Enables a Cinder Dell Storage Center ISCSI backend, configured
+ Enables a Cinder HPELeftHandISCSI backend, configured
via puppet
requires:
- overcloud-resource-registry-puppet.yaml
- - file: environments/cinder-eqlx-config.yaml
- title: Cinder EQLX backend
+ - file: environments/cinder-dellps-config.yaml
+ title: Cinder Dell EMC PS Series backend
description: >
- Enables a Cinder EQLX backend, configured via puppet
+ Enables a Cinder Dell EMC PS Series backend,
+ configured via puppet
requires:
- overcloud-resource-registry-puppet.yaml
- - title: Externally managed Ceph
+ - file: environments/cinder-iser.yaml
+ title: Cinder iSER backend
+ description: >
+ Enable a Cinder iSER RDMA backend, configured via puppet
+ - file: environments/cinder-scaleio-config.yaml
+ title: Cinder Dell EMC ScaleIO backend
+ description: >
+ Enables a Cinder Dell EMC ScaleIO backend,
+ configured via puppet
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Ceph
description: >
- Enable the use of an externally managed Ceph cluster
+ Enable the use of Ceph in the overcloud
environments:
- file: environments/puppet-ceph-external.yaml
title: Externally managed Ceph
- description:
+ description: >
+ Configures the overcloud to use an externally managed Ceph cluster, via RBD driver.
requires:
- overcloud-resource-registry-puppet.yaml
- - title: Ceph Devel
+ - file: environments/puppet-ceph.yaml
+ title: TripleO managed Ceph
+ description: >
+ Deploys a Ceph cluster via TripleO, requires at lease one CephStorage node or
+ use of hyperconverged-ceph.yaml environment for the HCI scenario, where CephOSD is
+ colocated with NovaCompute and configures the overcloud to use it, via RBD driver.
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: CephMDS
description: >
- Enable a Ceph storage cluster using the controller and 2 ceph nodes.
- Rbd backends are enabled for Cinder, Glance, and Nova.
+ Deploys CephMDS via TripleO, an additional Ceph service needed to create shared
+ filesystems hosted in Ceph.
environments:
- - file: environments/puppet-ceph-devel.yaml
- title: Ceph Devel
+ - file: environments/services/ceph-mds.yaml
+ title: Deploys CephMDS
description:
requires:
+ - environments/puppet-ceph.yaml
+ - title: Ceph Rados Gateway
+ description: >
+ Deploys CephRGW via TripleO, transparently replaces Swift providing a compatible API
+ which stores data in the Ceph cluster.
+ environments:
+ - file: environments/ceph-radosgw.yaml
+ title: Deploys CephRGW
+ description:
+ requires:
+ - environments/puppet-ceph.yaml
+ - title: Manila with CephFS
+ description: >
+ Deploys Manila and configures it with the CephFS driver. This requires the deployment of
+ Ceph and CephMDS from TripleO or the use of an external Ceph cluster for the overcloud.
+ environments:
+ - file: environments/manila-cephfsnative-config.yaml
+ title: Deploys Manila with CephFS driver
+ description: Deploys Manila and configures CephFS as its default backend.
+ requires:
- overcloud-resource-registry-puppet.yaml
- title: Storage Environment
description: >
Can be used to set up storage backends. Defaults to Ceph used as a
- backend for Cinder, Glance and Nova ephemeral storage. It configures
- for example which services will use Ceph, or if any of the services
+ backend for Cinder, Glance, Nova ephemeral storage and Gnocchi. It
+ configures which services will use Ceph, or if any of the services
will use NFS. And more. Usually requires to be edited by user first.
tags:
- no-gui
@@ -504,3 +572,31 @@ topics:
description:
requires:
- overcloud-resource-registry-puppet.yaml
+
+ - title: Security Options
+ description: Security Hardening Options
+ environment_groups:
+ - title: SSH Banner Text
+ description: Enables population of SSH Banner Text
+ environments:
+ - file: environments/sshd-banner.yaml
+ title: SSH Banner Text
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Horizon Password Validation
+ description: Enable Horizon Password validation
+ environments:
+ - file: environments/horizon_password_validation.yaml
+ title: Horizon Password Validation
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: AuditD Rules
+ description: Management of AuditD rules
+ environments:
+ - file: environments/auditd.yaml
+ title: AuditD Rule Management
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
diff --git a/ci/common/net-config-multinode-os-net-config.yaml b/ci/common/net-config-multinode-os-net-config.yaml
new file mode 100644
index 00000000..8c50b641
--- /dev/null
+++ b/ci/common/net-config-multinode-os-net-config.yaml
@@ -0,0 +1,114 @@
+heat_template_version: ocata
+
+description: >
+ Software Config to drive os-net-config for a simple bridge configured
+ with a static IP address for the ctlplane network.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet:
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ OvSBridgeMtu:
+ default: 1300
+ description: The mtu of the OvS bridge
+ type: number
+
+resources:
+
+ OsNetConfigImpl:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - |
+ #!/bin/bash
+ function network_config_hook {
+ primary_private_ip=$(cat /etc/nodepool/primary_node_private)
+ sed -i "s/primary_private_ip/$primary_private_ip/" /etc/os-net-config/config.json
+ subnode_private_ip=$(cat /etc/nodepool/node_private)
+ sed -i "s/subnode_private_ip/$subnode_private_ip/" /etc/os-net-config/config.json
+ # We start with an arbitrarily high vni key so that we don't
+ # overlap with Neutron created values. These will also match the
+ # values that we've been using previously from the devstack-gate
+ # code.
+ vni=1000002
+ subnode_index=$(grep -n $(cat /etc/nodepool/node_private) /etc/nodepool/sub_nodes_private | cut -d: -f1)
+ let vni+=$subnode_index
+ sed -i "s/vni/$vni/" /etc/os-net-config/config.json
+ export interface_name="br-ex_$primary_private_ip"
+ # Until we are fully migrated to os-net-config we need to clean
+ # up the old bridge first created by devstack-gate
+ ovs-vsctl del-br br-ex
+ }
+
+ -
+ str_replace:
+ template:
+ get_file: ../../network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: bridge_name
+ mtu:
+ get_param: OvSBridgeMtu
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ list_join:
+ - "/"
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ members:
+ - type: ovs_tunnel
+ name: interface_name
+ tunnel_type: vxlan
+ ovs_options:
+ - list_join:
+ - "="
+ - - key
+ - vni
+ - list_join:
+ - "="
+ - - remote_ip
+ - primary_private_ip
+ - list_join:
+ - "="
+ - - local_ip
+ - subnode_private_ip
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/ci/common/net-config-multinode.yaml b/ci/common/net-config-multinode.yaml
index 49a06881..dc31235a 100644
--- a/ci/common/net-config-multinode.yaml
+++ b/ci/common/net-config-multinode.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Software Config to drive os-net-config for a simple bridge configured
@@ -47,7 +47,9 @@ resources:
str_replace:
template: |
#!/bin/bash
- ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name
+ if ! ip addr show dev $bridge_name | grep CONTROLPLANEIP/CONTROLPLANESUBNETCIDR; then
+ ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name
+ fi
params:
CONTROLPLANEIP: {get_param: ControlPlaneIp}
CONTROLPLANESUBNETCIDR: {get_param: ControlPlaneSubnetCidr}
diff --git a/ci/environments/multinode-3nodes.yaml b/ci/environments/multinode-3nodes.yaml
new file mode 100644
index 00000000..03065c6a
--- /dev/null
+++ b/ci/environments/multinode-3nodes.yaml
@@ -0,0 +1,78 @@
+# Specifies which roles (groups of nodes) will be deployed
+# Note this is used as an input to the various *.j2.yaml
+# jinja2 templates, so that they are converted into *.yaml
+# during the plan creation (via a mistral action/workflow).
+#
+# The format is a list, with the following format:
+#
+# * name: (string) mandatory, name of the role, must be unique
+#
+# CountDefault: (number) optional, default number of nodes, defaults to 0
+# sets the default for the {{role.name}}Count parameter in overcloud.yaml
+#
+# HostnameFormatDefault: (string) optional default format string for hostname
+# defaults to '%stackname%-{{role.name.lower()}}-%index%'
+# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
+#
+# ServicesDefault: (list) optional default list of services to be deployed
+# on the role, defaults to an empty list. Sets the default for the
+# {{role.name}}Services parameter in overcloud.yaml
+
+- name: ControllerApi
+ CountDefault: 1
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronApi
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaConsoleauth
+ - OS::TripleO::Services::NovaVncProxy
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+
+- name: Controller
+ CountDefault: 1
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CinderBackup
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
diff --git a/ci/environments/multinode-core.yaml b/ci/environments/multinode-core.yaml
new file mode 100644
index 00000000..0c07a1b0
--- /dev/null
+++ b/ci/environments/multinode-core.yaml
@@ -0,0 +1,37 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Core Service
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ Debug:
+ type: string
+ default: ''
+
+resources:
+
+outputs:
+ role_data:
+ description: Role data for the multinode firewall configuration
+ value:
+ service_name: multinode_core
+ config_settings:
+ tripleo.core.firewall_rules:
+ '999 core':
+ proto: 'udp'
+ dport:
+ - 4789
diff --git a/ci/environments/multinode.yaml b/ci/environments/multinode.yaml
new file mode 100644
index 00000000..d89a4942
--- /dev/null
+++ b/ci/environments/multinode.yaml
@@ -0,0 +1,55 @@
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ # Required for Centos 7.3 and Qemu 2.6.0
+ nova::compute::libvirt::libvirt_cpu_mode: 'none'
+ #NOTE(gfidente): not great but we need this to deploy on ext4
+ #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+ ceph::profile::params::osd_max_object_name_len: 256
+ ceph::profile::params::osd_max_object_namespace_len: 64
+ SwiftCeilometerPipelineEnabled: False
+ Debug: True
diff --git a/ci/environments/multinode_major_upgrade.yaml b/ci/environments/multinode_major_upgrade.yaml
new file mode 100644
index 00000000..6710fef7
--- /dev/null
+++ b/ci/environments/multinode_major_upgrade.yaml
@@ -0,0 +1,47 @@
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::GlanceRegistry
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::SaharaApi
+ - OS::TripleO::Services::SaharaEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ # Required for Centos 7.3 and Qemu 2.6.0
+ nova::compute::libvirt::libvirt_cpu_mode: 'none'
+ heat::rpc_response_timeout: 600
+ SwiftCeilometerPipelineEnabled: False
+ Debug: True
diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml
index ee5bd648..e09ca705 100644
--- a/ci/environments/scenario001-multinode.yaml
+++ b/ci/environments/scenario001-multinode.yaml
@@ -4,18 +4,22 @@ resource_registry:
OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml
OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml
OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml
+ OS::TripleO::Services::PankoApi: /usr/share/openstack-tripleo-heat-templates/puppet/services/panko-api.yaml
+ OS::TripleO::Services::Collectd: /usr/share/openstack-tripleo-heat-templates/puppet/services/metrics/collectd.yaml
+ OS::TripleO::Services::Tacker: /usr/share/openstack-tripleo-heat-templates/puppet/services/tacker.yaml
+ OS::TripleO::Services::Congress: /usr/share/openstack-tripleo-heat-templates/puppet/services/congress.yaml
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- - OS::TripleO::Services::GlanceRegistry
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -29,10 +33,12 @@ parameter_defaults:
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
@@ -50,6 +56,7 @@ parameter_defaults:
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
- OS::TripleO::Services::GnocchiStatsd
+ - OS::TripleO::Services::PankoApi
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephClient
@@ -57,6 +64,11 @@ parameter_defaults:
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Collectd
+ - OS::TripleO::Services::Tacker
+ - OS::TripleO::Services::Congress
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
@@ -79,3 +91,14 @@ parameter_defaults:
GlanceBackend: rbd
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
+ BannerText: |
+ ******************************************************************
+ * This system is for the use of authorized users only. Usage of *
+ * this system may be monitored and recorded by system personnel. *
+ * Anyone using this system expressly consents to such monitoring *
+ * and is advised that if such monitoring reveals possible *
+ * evidence of criminal activity, system personnel may provide *
+ * the evidence from such monitoring to law enforcement officials.*
+ ******************************************************************
+ CollectdExtraPlugins:
+ - rrdtool
diff --git a/ci/environments/scenario002-multinode.yaml b/ci/environments/scenario002-multinode.yaml
index 97fec24c..3207d133 100644
--- a/ci/environments/scenario002-multinode.yaml
+++ b/ci/environments/scenario002-multinode.yaml
@@ -3,18 +3,19 @@ resource_registry:
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml
OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml
+ OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- - OS::TripleO::Services::GlanceRegistry
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -28,6 +29,7 @@ parameter_defaults:
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Ntp
@@ -45,7 +47,11 @@ parameter_defaults:
- OS::TripleO::Services::BarbicanApi
- OS::TripleO::Services::MongoDb
- OS::TripleO::Services::Zaqar
+ - OS::TripleO::Services::Ec2Api
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
Debug: true
+ SwiftCeilometerPipelineEnabled: false
diff --git a/ci/environments/scenario003-multinode.yaml b/ci/environments/scenario003-multinode.yaml
index 092426cb..1dc8b13d 100644
--- a/ci/environments/scenario003-multinode.yaml
+++ b/ci/environments/scenario003-multinode.yaml
@@ -12,12 +12,12 @@ parameter_defaults:
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- - OS::TripleO::Services::GlanceRegistry
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -31,6 +31,7 @@ parameter_defaults:
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Ntp
@@ -43,6 +44,8 @@ parameter_defaults:
- OS::TripleO::Services::MistralApi
- OS::TripleO::Services::MistralEngine
- OS::TripleO::Services::MistralExecutor
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
@@ -50,3 +53,4 @@ parameter_defaults:
# we don't deploy Swift so we switch to file backend.
GlanceBackend: 'file'
KeystoneTokenProvider: 'fernet'
+ SwiftCeilometerPipelineEnabled: false
diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml
index 4aa18709..dc05ab4e 100644
--- a/ci/environments/scenario004-multinode.yaml
+++ b/ci/environments/scenario004-multinode.yaml
@@ -1,25 +1,44 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
- OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml
- OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml
- OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml
- OS::TripleO::Services::CephRgw: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-rgw.yaml
+ OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml
+ OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
+ OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
+ OS::TripleO::Services::CephRgw: ../../puppet/services/ceph-rgw.yaml
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
+ OS::TripleO::Services::ManilaApi: ../../puppet/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaShare: ../../puppet/services/pacemaker/manila-share.yaml
+ OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
+ # These enable Pacemaker
+ OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
+ OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml
+ OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
+ OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
+ OS::TripleO::Services::Redis: ../../puppet/services/pacemaker/database/redis.yaml
+ OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::CephMds
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::CephRgw
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- - OS::TripleO::Services::GlanceRegistry
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -29,10 +48,15 @@ parameter_defaults:
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::ManilaApi
+ - OS::TripleO::Services::ManilaScheduler
+ - OS::TripleO::Services::ManilaBackendCephFs
+ - OS::TripleO::Services::ManilaShare
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Ntp
@@ -40,10 +64,8 @@ parameter_defaults:
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- - OS::TripleO::Services::CephMon
- - OS::TripleO::Services::CephOSD
- - OS::TripleO::Services::CephClient
- - OS::TripleO::Services::CephRgw
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
@@ -60,3 +82,4 @@ parameter_defaults:
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+ SwiftCeilometerPipelineEnabled: false
diff --git a/ci/pingtests/scenario001-multinode.yaml b/ci/pingtests/scenario001-multinode.yaml
index ede83db0..2651c0d0 100644
--- a/ci/pingtests/scenario001-multinode.yaml
+++ b/ci/pingtests/scenario001-multinode.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2013-05-23
+heat_template_version: ocata
description: >
HOT template to created resources deployed by scenario001.
diff --git a/ci/pingtests/scenario002-multinode.yaml b/ci/pingtests/scenario002-multinode.yaml
index 1ab7eef9..da1ae60c 100644
--- a/ci/pingtests/scenario002-multinode.yaml
+++ b/ci/pingtests/scenario002-multinode.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2013-05-23
+heat_template_version: ocata
description: >
HOT template to created resources deployed by scenario002.
@@ -81,7 +81,7 @@ resources:
type: OS::Cinder::EncryptedVolumeType
properties:
volume_type: {get_resource: luks_volume_type}
- provider: nova.volume.encryptors.luks.LuksEncryptor
+ provider: luks
cipher: aes-xts-plain64
control_location: front-end
key_size: 256
diff --git a/ci/pingtests/scenario003-multinode.yaml b/ci/pingtests/scenario003-multinode.yaml
index 445c47af..c3ceadaf 100644
--- a/ci/pingtests/scenario003-multinode.yaml
+++ b/ci/pingtests/scenario003-multinode.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2013-05-23
+heat_template_version: ocata
description: >
HOT template to created resources deployed by scenario003.
diff --git a/ci/pingtests/scenario004-multinode.yaml b/ci/pingtests/scenario004-multinode.yaml
index 17792cd1..ebdfea14 100644
--- a/ci/pingtests/scenario004-multinode.yaml
+++ b/ci/pingtests/scenario004-multinode.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2013-05-23
+heat_template_version: ocata
description: >
HOT template to created resources deployed by scenario004.
@@ -118,6 +118,18 @@ resources:
ram: 512
vcpus: 1
+ manila_share_type:
+ type: OS::Manila::ShareType
+ properties:
+ name: default
+ driver_handles_share_servers: false
+
+ manila_share:
+ type: OS::Manila::Share
+ properties:
+ share_protocol: CEPHFS
+ size: 1
+
outputs:
server1_private_ip:
description: IP address of server1 in private network
diff --git a/ci/pingtests/tenantvm_floatingip.yaml b/ci/pingtests/tenantvm_floatingip.yaml
new file mode 100644
index 00000000..b910d6c1
--- /dev/null
+++ b/ci/pingtests/tenantvm_floatingip.yaml
@@ -0,0 +1,142 @@
+heat_template_version: ocata
+
+description: >
+ This template resides in tripleo-ci for Mitaka CI jobs only.
+ For Newton and beyond, please look in THT.
+ HOT template to create a new neutron network plus a router to the public
+ network, and for deploying a server into the new network. The template also
+ assigns a floating IP address and sets security group rules. ADAPTED FROM
+ https://raw.githubusercontent.com/openstack/heat-templates/master/hot/servers_in_new_neutron_net.yaml
+parameters:
+ key_name:
+ type: string
+ description: Name of keypair to assign to servers
+ default: 'pingtest_key'
+ image:
+ type: string
+ description: Name of image to use for servers
+ default: 'pingtest_image'
+ public_net_name:
+ type: string
+ default: 'nova'
+ description: >
+ ID or name of public network for which floating IP addresses will be allocated
+ private_net_name:
+ type: string
+ description: Name of private network to be created
+ default: 'default-net'
+ private_net_cidr:
+ type: string
+ description: Private network address (CIDR notation)
+ default: '192.168.2.0/24'
+ private_net_gateway:
+ type: string
+ description: Private network gateway address
+ default: '192.168.2.1'
+ private_net_pool_start:
+ type: string
+ description: Start of private network IP address allocation pool
+ default: '192.168.2.100'
+ private_net_pool_end:
+ type: string
+ default: '192.168.2.200'
+ description: End of private network IP address allocation pool
+
+resources:
+
+ key_pair:
+ type: OS::Nova::KeyPair
+ properties:
+ save_private_key: true
+ name: {get_param: key_name }
+
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: private_net_name }
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: { get_param: private_net_cidr }
+ gateway_ip: { get_param: private_net_gateway }
+ allocation_pools:
+ - start: { get_param: private_net_pool_start }
+ end: { get_param: private_net_pool_end }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: public_net_name }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ volume1:
+ type: OS::Cinder::Volume
+ properties:
+ name: Volume1
+ image: { get_param: image }
+ size: 1
+
+ server1:
+ type: OS::Nova::Server
+ depends_on: volume1
+ properties:
+ name: Server1
+ block_device_mapping:
+ - device_name: vda
+ volume_id: { get_resource: volume1 }
+ flavor: { get_resource: test_flavor }
+ key_name: { get_resource: key_pair }
+ networks:
+ - port: { get_resource: server1_port }
+
+ server1_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+
+ server1_floating_ip:
+ type: OS::Neutron::FloatingIP
+ # TODO: investigate why we need this depends_on and if we could
+ # replace it by router_id with get_resource: router_interface
+ depends_on: router_interface
+ properties:
+ floating_network: { get_param: public_net_name }
+ port_id: { get_resource: server1_port }
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ description: Add security group rules for server
+ name: pingtest-security-group
+ rules:
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ - remote_ip_prefix: 0.0.0.0/0
+ protocol: icmp
+
+ test_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 512
+ vcpus: 1
+
+outputs:
+ server1_private_ip:
+ description: IP address of server1 in private network
+ value: { get_attr: [ server1, first_address ] }
+ server1_public_ip:
+ description: Floating IP address of server1 in public network
+ value: { get_attr: [ server1_floating_ip, floating_ip_address ] }
diff --git a/ci/scripts/freeipa_setup.sh b/ci/scripts/freeipa_setup.sh
new file mode 100644
index 00000000..e699841f
--- /dev/null
+++ b/ci/scripts/freeipa_setup.sh
@@ -0,0 +1,120 @@
+#!/bin/bash
+#
+# Used environment variables:
+#
+# - Hostname
+# - FreeIPAIP
+# - DirectoryManagerPassword
+# - AdminPassword
+# - UndercloudFQDN
+# - HostsSecret
+# - ProvisioningCIDR: If set, it adds the given CIDR to the provisioning
+# interface (which is hardcoded to eth1)
+# - UsingNovajoin: If unset, we pre-provision the service principals
+# needed for the overcloud deploy. If set, we skip this,
+# since novajoin will do it.
+#
+set -eux
+
+if [ -f "~/freeipa-setup.env" ]; then
+ source ~/freeipa-setup.env
+elif [ -f "/tmp/freeipa-setup.env" ]; then
+ source /tmp/freeipa-setup.env
+fi
+
+export Hostname=${Hostname:-""}
+export FreeIPAIP=${FreeIPAIP:-""}
+export DirectoryManagerPassword=${DirectoryManagerPassword:-""}
+export AdminPassword=${AdminPassword:-""}
+export UndercloudFQDN=${UndercloudFQDN:-""}
+export HostsSecret=${HostsSecret:-""}
+export ProvisioningCIDR=${ProvisioningCIDR:-""}
+export UsingNovajoin=${UsingNovajoin:-""}
+
+if [ -n "$ProvisioningCIDR" ]; then
+ # Add address to provisioning network interface
+ ip link set dev eth1 up
+ ip addr add $ProvisioningCIDR dev eth1
+fi
+
+# Set DNS servers
+echo "nameserver 8.8.8.8" >> /etc/resolv.conf
+echo "nameserver 8.8.4.4" >> /etc/resolv.conf
+
+yum -q -y remove openstack-dashboard
+
+# Install the needed packages
+yum -q install -y ipa-server ipa-server-dns epel-release rng-tools mod_nss git
+yum -q install -y haveged
+
+# Prepare hostname
+hostnamectl set-hostname --static $Hostname
+
+echo $FreeIPAIP `hostname` | tee -a /etc/hosts
+
+# Set iptables rules
+cat << EOF > freeipa-iptables-rules.txt
+# Firewall configuration written by system-config-firewall
+# Manual customization of this file is not recommended.
+*filter
+:INPUT ACCEPT [0:0]
+:FORWARD ACCEPT [0:0]
+:OUTPUT ACCEPT [0:0]
+-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
+-A INPUT -p icmp -j ACCEPT
+-A INPUT -i lo -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
+#TCP ports for FreeIPA
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 80 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 443 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 389 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 636 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 88 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 464 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 53 -j ACCEPT
+#UDP ports for FreeIPA
+-A INPUT -m state --state NEW -m udp -p udp --dport 88 -j ACCEPT
+-A INPUT -m state --state NEW -m udp -p udp --dport 464 -j ACCEPT
+-A INPUT -m state --state NEW -m udp -p udp --dport 123 -j ACCEPT
+-A INPUT -m state --state NEW -m udp -p udp --dport 53 -j ACCEPT
+-A INPUT -j REJECT --reject-with icmp-host-prohibited
+-A FORWARD -j REJECT --reject-with icmp-host-prohibited
+COMMIT
+EOF
+
+iptables-restore < freeipa-iptables-rules.txt
+
+# Entropy generation; otherwise, ipa-server-install will lag.
+chkconfig haveged on
+systemctl start haveged
+
+# Remove conflicting httpd configuration
+rm -f /etc/httpd/conf.d/ssl.conf
+
+# Set up FreeIPA
+ipa-server-install -U -r `hostname -d|tr "[a-z]" "[A-Z]"` \
+ -p $DirectoryManagerPassword -a $AdminPassword \
+ --hostname `hostname -f` \
+ --ip-address=$FreeIPAIP \
+ --setup-dns --auto-forwarders --auto-reverse
+
+# Authenticate
+echo $AdminPassword | kinit admin
+
+# Verify we have TGT
+klist
+
+if [ "$?" = '1' ]; then
+ exit 1
+fi
+
+if [ -z "$UsingNovajoin" ]; then
+ # Create undercloud host
+ ipa host-add $UndercloudFQDN --password=$HostsSecret --force
+
+ # Create overcloud nodes and services
+ git clone https://github.com/JAORMX/freeipa-tripleo-incubator.git
+ cd freeipa-tripleo-incubator
+ python create_ipa_tripleo_host_setup.py -w $HostsSecret -d $(hostname -d) \
+ --controller-count 1 --compute-count 1
+fi
diff --git a/default_passwords.yaml b/default_passwords.yaml
index 7a47f443..c85881e5 100644
--- a/default_passwords.yaml
+++ b/default_passwords.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: Passwords we manage at the top level
diff --git a/deployed-server/README.rst b/deployed-server/README.rst
index ce74e77b..e4d8299b 100644
--- a/deployed-server/README.rst
+++ b/deployed-server/README.rst
@@ -119,10 +119,15 @@ from the deployment command, the script should be ready to run:
[NovaCompute]: CREATE_IN_PROGRESS state changed
The user running the script must be able to ssh as root to each server. Define
-the hostnames of the deployed servers you intend to use for each role type::
-
- export controller_hosts="controller0 controller1 controller2"
- export compute_hosts="compute0"
+the names of your custom roles (if applicable) and hostnames of the deployed
+servers you intend to use for each role type. For each role name, a
+corresponding <role-name>_hosts variable should also be defined, e.g.::
+
+ export ROLES="Controller NewtorkNode StorageNode Compute"
+ export Controller_hosts="10.0.0.1 10.0.0.2 10.0.0.3"
+ export NetworkNode_hosts="10.0.0.4 10.0.0.5 10.0.0.6"
+ export StorageNode_hosts="10.0.0.7 10.0.08"
+ export Compute_hosts="10.0.0.9 10.0.0.10 10.0.0.11"
Then run the script on the undercloud with a stackrc file sourced, and
the script will copy the needed os-collect-config.conf configuration to each
diff --git a/deployed-server/ctlplane-port.yaml b/deployed-server/ctlplane-port.yaml
index eb10fba0..7b5cdf11 100644
--- a/deployed-server/ctlplane-port.yaml
+++ b/deployed-server/ctlplane-port.yaml
@@ -1,8 +1,14 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
parameters:
- Hostname:
+ network:
type: string
+ default: ctlplane
+ name:
+ type: string
+ replacement_policy:
+ type: string
+ default: AUTO
resources:
@@ -13,11 +19,10 @@ resources:
name:
list_join:
- '-'
- - - {get_param: Hostname}
- - ctlplane
+ - - {get_param: name}
- port
replacement_policy: AUTO
outputs:
- ip_address:
- value: {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
+ fixed_ips:
+ value: {get_attr: [ControlPlanePort, fixed_ips]}
diff --git a/deployed-server/deployed-neutron-port.yaml b/deployed-server/deployed-neutron-port.yaml
new file mode 100644
index 00000000..bddf8bc1
--- /dev/null
+++ b/deployed-server/deployed-neutron-port.yaml
@@ -0,0 +1,67 @@
+heat_template_version: ocata
+
+description: "
+ A fake OS::Neutron::Port stack which outputs fixed_ips and subnets based on
+ the input from the DeployedServerPortMap (set via parameter_defaults). This
+ lookup requires the use of port naming conventions. In order for this to work
+ with deployed-server the keys should be <hostname>-<network>.
+ Example:
+ parameter_defaults:
+ DeployedServerPortMap:
+ gatsby-ctlplane:
+ fixed_ips:
+ - ip_address: 127.0.0.1
+ subnets:
+ - cidr: 24"
+
+parameters:
+ name:
+ default: ''
+ type: string
+ network:
+ default: ''
+ type: string
+ fixed_ips:
+ default: ''
+ type: comma_delimited_list
+ replacement_policy:
+ default: ''
+ type: string
+ DeployedServerPortMap:
+ default: {}
+ type: json
+
+
+outputs:
+ fixed_ips:
+ value:
+ {get_param: [DeployedServerPortMap, {get_param: name}, fixed_ips]}
+ subnets:
+ value:
+ {get_param: [DeployedServerPortMap, {get_param: name}, subnets]}
+ name:
+ value: {get_param: name}
+ status:
+ value: DOWN
+ allowed_address_pairs:
+ value: {}
+ device_id:
+ value: ''
+ device_owner:
+ value: {get_param: network}
+ dns_assignment:
+ value: ''
+ port_security_enabled:
+ value: False
+ admin_state_up:
+ value: False
+ security_groups:
+ value: {}
+ network_id:
+ value: ''
+ tenant_id:
+ value: ''
+ qos_policy_id:
+ value: ''
+ mac_address:
+ value: ''
diff --git a/deployed-server/deployed-server-bootstrap-centos.sh b/deployed-server/deployed-server-bootstrap-centos.sh
new file mode 100644
index 00000000..7266ca57
--- /dev/null
+++ b/deployed-server/deployed-server-bootstrap-centos.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+set -eux
+
+yum install -y \
+ jq \
+ python-ipaddr \
+ openstack-puppet-modules \
+ os-net-config \
+ openvswitch \
+ python-heat-agent*
+
+ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
+
+setenforce 0
+sed -i 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config
diff --git a/deployed-server/deployed-server-bootstrap-centos.yaml b/deployed-server/deployed-server-bootstrap-centos.yaml
new file mode 100644
index 00000000..c1740d78
--- /dev/null
+++ b/deployed-server/deployed-server-bootstrap-centos.yaml
@@ -0,0 +1,22 @@
+heat_template_version: ocata
+
+description: 'Deployed Server Bootstrap Config'
+
+parameters:
+
+ server:
+ type: string
+
+resources:
+
+ DeployedServerBootstrapConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: deployed-server-bootstrap-centos.sh}
+
+ DeployedServerBootstrapDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: DeployedServerBootstrapConfig}
+ server: {get_param: server}
diff --git a/deployed-server/deployed-server-bootstrap-rhel.sh b/deployed-server/deployed-server-bootstrap-rhel.sh
new file mode 100644
index 00000000..36ff0077
--- /dev/null
+++ b/deployed-server/deployed-server-bootstrap-rhel.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -eux
+
+yum install -y \
+ jq \
+ python-ipaddr \
+ openstack-puppet-modules \
+ os-net-config \
+ openvswitch \
+ python-heat-agent*
+
+ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
diff --git a/deployed-server/deployed-server-bootstrap-rhel.yaml b/deployed-server/deployed-server-bootstrap-rhel.yaml
new file mode 100644
index 00000000..2d2f5156
--- /dev/null
+++ b/deployed-server/deployed-server-bootstrap-rhel.yaml
@@ -0,0 +1,22 @@
+heat_template_version: ocata
+
+description: 'Deployed Server Bootstrap Config'
+
+parameters:
+
+ server:
+ type: string
+
+resources:
+
+ DeployedServerBootstrapConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: deployed-server-bootstrap-rhel.sh}
+
+ DeployedServerBootstrapDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: DeployedServerBootstrapConfig}
+ server: {get_param: server}
diff --git a/deployed-server/deployed-server-config.yaml b/deployed-server/deployed-server-config.yaml
deleted file mode 100644
index 8c59dc72..00000000
--- a/deployed-server/deployed-server-config.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-heat_template_version: 2014-10-16
-parameters:
- user_data_format:
- type: string
- default: SOFTWARE_CONFIG
-
-resources:
- # We just need something which returns a unique ID, but we can't
- # use RandomString because RefId returns the value, not the physical
- # resource ID, SoftwareConfig should work as it returns a UUID
- deployed-server-config:
- type: OS::Heat::SoftwareConfig
-
-outputs:
- # FIXME(shardy) this is needed because TemplateResource returns an
- # ARN not a UUID, which overflows the Deployment server_id column..
- user_data_format:
- value: SOFTWARE_CONFIG
- OS::stack_id:
- value: {get_resource: deployed-server-config}
-
-
diff --git a/deployed-server/deployed-server-roles-data.yaml b/deployed-server/deployed-server-roles-data.yaml
new file mode 100644
index 00000000..04da5565
--- /dev/null
+++ b/deployed-server/deployed-server-roles-data.yaml
@@ -0,0 +1,173 @@
+# Specifies which roles (groups of nodes) will be deployed
+# Note this is used as an input to the various *.j2.yaml
+# jinja2 templates, so that they are converted into *.yaml
+# during the plan creation (via a mistral action/workflow).
+#
+# The format is a list, with the following format:
+#
+# * name: (string) mandatory, name of the role, must be unique
+#
+# CountDefault: (number) optional, default number of nodes, defaults to 0
+# sets the default for the {{role.name}}Count parameter in overcloud.yaml
+#
+# HostnameFormatDefault: (string) optional default format string for hostname
+# defaults to '%stackname%-{{role.name.lower()}}-%index%'
+# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
+#
+# disable_constraints: (boolean) optional, whether to disable Nova and Glance
+# constraints for each role specified in the templates.
+#
+# ServicesDefault: (list) optional default list of services to be deployed
+# on the role, defaults to an empty list. Sets the default for the
+# {{role.name}}Services parameter in overcloud.yaml
+
+- name: ControllerDeployedServer
+ CountDefault: 1
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::CephRgw
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderBackup
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronApi
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::Redis
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::MongoDb
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaConsoleauth
+ - OS::TripleO::Services::NovaVncProxy
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::CeilometerApi
+ - OS::TripleO::Services::CeilometerCollector
+ - OS::TripleO::Services::CeilometerExpirer
+ - OS::TripleO::Services::CeilometerAgentCentral
+ - OS::TripleO::Services::CeilometerAgentNotification
+ - OS::TripleO::Services::Horizon
+ - OS::TripleO::Services::GnocchiApi
+ - OS::TripleO::Services::GnocchiMetricd
+ - OS::TripleO::Services::GnocchiStatsd
+ - OS::TripleO::Services::ManilaApi
+ - OS::TripleO::Services::ManilaScheduler
+ - OS::TripleO::Services::ManilaBackendGeneric
+ - OS::TripleO::Services::ManilaBackendNetapp
+ - OS::TripleO::Services::ManilaBackendCephFs
+ - OS::TripleO::Services::ManilaShare
+ - OS::TripleO::Services::AodhApi
+ - OS::TripleO::Services::AodhEvaluator
+ - OS::TripleO::Services::AodhNotifier
+ - OS::TripleO::Services::AodhListener
+ - OS::TripleO::Services::SaharaApi
+ - OS::TripleO::Services::SaharaEngine
+ - OS::TripleO::Services::IronicApi
+ - OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::NovaIronic
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::OpenDaylightApi
+ - OS::TripleO::Services::OpenDaylightOvs
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::BarbicanApi
+ - OS::TripleO::Services::PankoApi
+ - OS::TripleO::Services::Zaqar
+ - OS::TripleO::Services::OVNDBs
+
+- name: ComputeDeployedServer
+ CountDefault: 1
+ HostnameFormatDefault: '%stackname%-novacompute-%index%'
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephClient
+ - OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::ComputeNeutronCorePlugin
+ - OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::ComputeCeilometerAgent
+ - OS::TripleO::Services::ComputeNeutronL3Agent
+ - OS::TripleO::Services::ComputeNeutronMetadataAgent
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::OpenDaylightOvs
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+
+- name: BlockStorageDeployedServer
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::BlockStorageCinderVolume
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+
+- name: ObjectStorageDeployedServer
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+
+- name: CephStorageDeployedServer
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
diff --git a/deployed-server/deployed-server.yaml b/deployed-server/deployed-server.yaml
index da5698e5..1e8afb25 100644
--- a/deployed-server/deployed-server.yaml
+++ b/deployed-server/deployed-server.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
parameters:
image:
type: string
@@ -21,7 +21,7 @@ parameters:
default: ''
name:
type: string
- default: ''
+ default: 'deployed-server'
image_update_policy:
type: string
default: ''
@@ -38,28 +38,52 @@ parameters:
type: json
description: Optional scheduler hints to pass to nova
default: {}
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
- # We just need something which returns a unique ID, but we can't
- # use RandomString because RefId returns the value, not the physical
- # resource ID, SoftwareConfig should work as it returns a UUID
deployed-server:
- type: OS::TripleO::DeployedServerConfig
+ type: OS::Heat::DeployedServer
+ properties:
+ name: {get_param: name}
+ software_config_transport: {get_param: software_config_transport}
+
+ UpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ UpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
properties:
- user_data_format: SOFTWARE_CONFIG
+ name: UpgradeInitDeployment
+ server: {get_resource: deployed-server}
+ config: {get_resource: UpgradeInitConfig}
+
InstanceIdConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: apply-config
config:
- instance-id: {get_attr: [deployed-server, "OS::stack_id"]}
+ instance-id: {get_resource: deployed-server}
InstanceIdDeployment:
type: OS::Heat::StructuredDeployment
properties:
config: {get_resource: InstanceIdConfig}
server: {get_resource: deployed-server}
+ depends_on: UpgradeInitDeployment
HostsEntryConfig:
type: OS::Heat::SoftwareConfig
@@ -69,21 +93,10 @@ resources:
#!/bin/bash
set -eux
mkdir -p $heat_outputs_path
- host=$(hostnamectl --static)
- echo -n "$host " > $heat_outputs_path.hosts_entry
- host_ip=$(python -c "import socket; print socket.gethostbyname(\"$host\")")
- echo -n "$host_ip " >> $heat_outputs_path.hosts_entry
- echo >> $heat_outputs_path.hosts_entry
- cat $heat_outputs_path.hosts_entry
- echo -n $host_ip > $heat_outputs_path.ip_address
- cat $heat_outputs_path.ip_address
+ host=$(hostname -s)
echo -n $host > $heat_outputs_path.hostname
cat $heat_outputs_path.hostname
outputs:
- - name: hosts_entry
- description: hosts_entry
- - name: ip_address
- description: ip_address
- name: hostname
description: hostname
@@ -93,23 +106,28 @@ resources:
config: {get_resource: HostsEntryConfig}
server: {get_resource: deployed-server}
+ DeployedServerBootstrapConfig:
+ type: OS::TripleO::DeployedServer::Bootstrap
+ properties:
+ server: {get_resource: deployed-server}
+
ControlPlanePort:
type: OS::TripleO::DeployedServer::ControlPlanePort
properties:
- Hostname: {get_attr: [HostsEntryDeployment, hostname]}
+ network: ctlplane
+ name:
+ list_join:
+ - '-'
+ - - {get_attr: [HostsEntryDeployment, hostname]}
+ - ctlplane
+ replacement_policy: AUTO
outputs:
- # FIXME(shardy) this is needed because TemplateResource returns an
- # ARN not a UUID, which overflows the Deployment server_id column..
OS::stack_id:
- value: {get_attr: [deployed-server, "OS::stack_id"]}
+ value: {get_resource: deployed-server}
networks:
value:
ctlplane:
- - {get_attr: [ControlPlanePort, ip_address]}
+ - {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
name:
- value: {get_attr: [HostsEntryDeployment, hostname]}
- hosts_entry:
- value: {get_attr: [HostsEntryDeployment, hosts_entry]}
- ip_address:
- value: {get_attr: [HostsEntryDeployment, ip_address]}
+ value: {get_attr: [HostsEntryDeployment, hostname]}
diff --git a/deployed-server/scripts/get-occ-config.sh b/deployed-server/scripts/get-occ-config.sh
index d6219e85..6c196f97 100755
--- a/deployed-server/scripts/get-occ-config.sh
+++ b/deployed-server/scripts/get-occ-config.sh
@@ -11,14 +11,22 @@ OBJECTSTORAGE_HOSTS=${OBJECTSTORAGE_HOSTS:-""}
CEPHSTORAGE_HOSTS=${CEPHSTORAGE_HOSTS:-""}
SUBNODES_SSH_KEY=${SUBNODES_SSH_KEY:-"~/.ssh/id_rsa"}
SSH_OPTIONS="-tt -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=Verbose -o PasswordAuthentication=no -o ConnectionAttempts=32"
+OVERCLOUD_ROLES=${OVERCLOUD_ROLES:-"Controller Compute BlockStorage ObjectStorage CephStorage"}
+
+# Set the _hosts vars for the default roles based on the old var names that
+# were all caps for backwards compatibility.
+Controller_hosts=${Controller_hosts:-"$CONTROLLER_HOSTS"}
+Compute_hosts=${Compute_hosts:-"$COMPUTE_HOSTS"}
+BlockStorage_hosts=${BlockStorage_hosts:-"$BLOCKSTORAGE_HOSTS"}
+ObjectStorage_hosts=${ObjectStorage_hosts:-"$OBJECTSTORAGE_HOSTS"}
+CephStorage_hosts=${CephStorage_hosts:-"$CEPHSTORAGE_HOSTS"}
+
+# Set the _hosts_a vars for each role defined
+for role in $OVERCLOUD_ROLES; do
+ eval hosts=\${${role}_hosts}
+ read -a ${role}_hosts_a <<< $hosts
+done
-read -a Controller_hosts_a <<< $CONTROLLER_HOSTS
-read -a Compute_hosts_a <<< $COMPUTE_HOSTS
-read -a BlockStorage_hosts_a <<< $BLOCKSTORAGE_HOSTS
-read -a ObjectStorage_hosts_a <<< $OBJECTSTORAGE_HOSTS
-read -a CephStorage_hosts_a <<< $CEPHSTORAGE_HOSTS
-
-roles=${OVERCLOUD_ROLES:-"Controller Compute BlockStorage ObjectStorage CephStorage"}
admin_user_id=$(openstack user show admin -c id -f value)
admin_project_id=$(openstack project show admin -c id -f value)
@@ -44,7 +52,7 @@ function check_stack {
}
-for role in $roles; do
+for role in $OVERCLOUD_ROLES; do
while ! check_stack overcloud; do
sleep $SLEEP_TIME
done
@@ -71,24 +79,26 @@ for role in $roles; do
server_stack=$(openstack stack resource show $stack $server_resource_name -c physical_resource_id -f value)
done
- deployed_server_stack=$(openstack stack resource show $server_stack deployed-server -c physical_resource_id -f value)
+ while true; do
+ deployed_server_metadata_url=$(openstack stack resource metadata $server_stack deployed-server | jq -r '.["os-collect-config"].request.metadata_url')
+ if [ "$deployed_server_metadata_url" = "null" ]; then
+ continue
+ else
+ break
+ fi
+ done
echo "======================"
echo "$role$i os-collect-config.conf configuration:"
config="
[DEFAULT]
-collectors=heat
+collectors=request
command=os-refresh-config
polling_interval=30
-[heat]
-user_id=$admin_user_id
-password=$OS_PASSWORD
-auth_url=$OS_AUTH_URL
-project_id=$admin_project_id
-stack_id=$deployed_server_stack
-resource_name=deployed-server-config"
+[request]
+metadata_url=$deployed_server_metadata_url"
echo "$config"
echo "======================"
diff --git a/docker/copy-etc.sh b/docker/copy-etc.sh
deleted file mode 100644
index 1a6cd520..00000000
--- a/docker/copy-etc.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-echo "Copying agent container /etc to /var/lib/etc-data"
-cp -a /etc/* /var/lib/etc-data/
diff --git a/docker/copy-json.py b/docker/copy-json.py
deleted file mode 100644
index e85ff11e..00000000
--- a/docker/copy-json.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/python
-import json
-import os
-
-data = {}
-file_perms = '0600'
-libvirt_perms = '0644'
-
-libvirt_config = os.getenv('libvirt_config').split(',')
-nova_config = os.getenv('nova_config').split(',')
-neutron_openvswitch_agent_config = os.getenv('neutron_openvswitch_agent_config').split(',')
-
-# Command, Config_files, Owner, Perms
-services = {
- 'nova-libvirt': [
- '/usr/sbin/libvirtd',
- libvirt_config,
- 'root',
- libvirt_perms],
- 'nova-compute': [
- '/usr/bin/nova-compute',
- nova_config,
- 'nova',
- file_perms],
- 'neutron-openvswitch-agent': [
- '/usr/bin/neutron-openvswitch-agent',
- neutron_openvswitch_agent_config,
- 'neutron',
- file_perms],
- 'ovs-vswitchd': [
- '/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/kolla/openvswitch/ovs-vswitchd.log'],
- 'ovsdb-server': [
- '/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --remote=ptcp:6640:127.0.0.1 --log-file=/var/log/kolla/openvswitch/ovsdb-server.log']
-}
-
-
-def build_config_files(config, owner, perms):
- config_source = '/var/lib/kolla/config_files/'
- config_files_dict = {}
- source = os.path.basename(config)
- dest = config
- config_files_dict.update({'source': config_source + source,
- 'dest': dest,
- 'owner': owner,
- 'perm': perms})
- return config_files_dict
-
-
-for service in services:
- if service != 'ovs-vswitchd' and service != 'ovsdb-server':
- command = services.get(service)[0]
- config_files = services.get(service)[1]
- owner = services.get(service)[2]
- perms = services.get(service)[3]
- config_files_list = []
- for config_file in config_files:
- if service == 'nova-libvirt':
- command = command + ' --config ' + config_file
- else:
- command = command + ' --config-file ' + config_file
- data['command'] = command
- config_files_dict = build_config_files(config_file, owner, perms)
- config_files_list.append(config_files_dict)
- data['config_files'] = config_files_list
- else:
- data['command'] = services.get(service)[0]
- data['config_files'] = []
-
- json_config_dir = '/var/lib/etc-data/json-config/'
- with open(json_config_dir + service + '.json', 'w') as json_file:
- json.dump(data, json_file, sort_keys=True, indent=4,
- separators=(',', ': '))
diff --git a/docker/create-config-dir.sh b/docker/create-config-dir.sh
new file mode 100644
index 00000000..1be1a56f
--- /dev/null
+++ b/docker/create-config-dir.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+# This is where we stack puppet configuration (for now)...
+mkdir -p /var/lib/config-data
+
+# This is the docker-puppet configs end in
+mkdir -p /var/lib/docker-puppet
diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py
new file mode 100755
index 00000000..fe87ce7a
--- /dev/null
+++ b/docker/docker-puppet.py
@@ -0,0 +1,212 @@
+#!/usr/bin/env python
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Shell script tool to run puppet inside of the given docker container image.
+# Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON
+# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
+# that can be used to generate config files or run ad-hoc puppet modules
+# inside of a container.
+
+import json
+import os
+import subprocess
+import sys
+import tempfile
+
+
+# this is to match what we do in deployed-server
+def short_hostname():
+ subproc = subprocess.Popen(['hostname', '-s'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ return cmd_stdout.rstrip()
+
+
+def pull_image(name):
+ print('Pulling image: %s' % name)
+ subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ print(cmd_stdout)
+ print(cmd_stderr)
+
+
+def rm_container(name):
+ print('Removing container: %s' % name)
+ subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ print(cmd_stdout)
+ print(cmd_stderr)
+
+
+config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
+print('docker-puppet')
+print('CONFIG: %s' % config_file)
+with open(config_file) as f:
+ json_data = json.load(f)
+
+# To save time we support configuring 'shared' services at the same
+# time. For example configuring all of the heat services
+# in a single container pass makes sense and will save some time.
+# To support this we merge shared settings together here.
+#
+# We key off of config_volume as this should be the same for a
+# given group of services. We are also now specifying the container
+# in which the services should be configured. This should match
+# in all instances where the volume name is also the same.
+
+configs = {}
+
+for service in (json_data or []):
+ if service is None:
+ continue
+ config_volume = service[0] or ''
+ puppet_tags = service[1] or ''
+ manifest = service[2] or ''
+ config_image = service[3] or ''
+ volumes = service[4] if len(service) > 4 else []
+
+ print('---------')
+ print('config_volume %s' % config_volume)
+ print('puppet_tags %s' % puppet_tags)
+ print('manifest %s' % manifest)
+ print('config_image %s' % config_image)
+ print('volumes %s' % volumes)
+ # We key off of config volume for all configs.
+ if config_volume in configs:
+ # Append puppet tags and manifest.
+ print("Existing service, appending puppet tags and manifest\n")
+ if puppet_tags:
+ configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
+ puppet_tags)
+ if manifest:
+ configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2],
+ manifest)
+ if configs[config_volume][3] != config_image:
+ print("WARNING: Config containers do not match even though"
+ " shared volumes are the same!\n")
+ else:
+ print("Adding new service\n")
+ configs[config_volume] = service
+
+print('Service compilation completed.\n')
+
+for config_volume in configs:
+
+ service = configs[config_volume]
+ puppet_tags = service[1] or ''
+ manifest = service[2] or ''
+ config_image = service[3] or ''
+ volumes = service[4] if len(service) > 4 else []
+
+ if puppet_tags:
+ puppet_tags = "file,file_line,concat,%s" % puppet_tags
+ else:
+ puppet_tags = "file,file_line,concat"
+
+ print('---------')
+ print('config_volume %s' % config_volume)
+ print('puppet_tags %s' % puppet_tags)
+ print('manifest %s' % manifest)
+ print('config_image %s' % config_image)
+ hostname = short_hostname()
+
+ with open('/var/lib/docker-puppet/docker-puppet.sh', 'w') as script_file:
+ os.chmod(script_file.name, 0755)
+ script_file.write("""#!/bin/bash
+ set -ex
+ mkdir -p /etc/puppet
+ cp -a /tmp/puppet-etc/* /etc/puppet
+ rm -Rf /etc/puppet/ssl # not in use and causes permission errors
+ echo '{"step": 6}' > /etc/puppet/hieradata/docker.json
+ TAGS=""
+ if [ -n "%(puppet_tags)s" ]; then
+ TAGS='--tags "%(puppet_tags)s"'
+ fi
+ FACTER_hostname=%(hostname)s FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
+
+ # Disables archiving
+ if [ -z "%(no_archive)s" ]; then
+ rm -Rf /var/lib/config-data/%(name)s
+
+ # copying etc should be enough for most services
+ mkdir -p /var/lib/config-data/%(name)s/etc
+ cp -a /etc/* /var/lib/config-data/%(name)s/etc/
+
+ if [ -d /root/ ]; then
+ cp -a /root/ /var/lib/config-data/%(name)s/root/
+ fi
+ if [ -d /var/lib/ironic/tftpboot/ ]; then
+ mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/
+ cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/tftpboot/
+ fi
+ if [ -d /var/lib/ironic/httpboot/ ]; then
+ mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/
+ cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/httpboot/
+ fi
+
+ # apache services may files placed in /var/www/
+ if [ -d /var/www/ ]; then
+ mkdir -p /var/lib/config-data/%(name)s/var/www
+ cp -a /var/www/* /var/lib/config-data/%(name)s/var/www/
+ fi
+ fi
+ """ % {'puppet_tags': puppet_tags, 'name': config_volume,
+ 'hostname': hostname,
+ 'no_archive': os.environ.get('NO_ARCHIVE', '')})
+
+ with tempfile.NamedTemporaryFile() as tmp_man:
+ with open(tmp_man.name, 'w') as man_file:
+ man_file.write('include ::tripleo::packages\n')
+ man_file.write(manifest)
+
+ rm_container('docker-puppet-%s' % config_volume)
+ pull_image(config_image)
+
+ dcmd = ['/usr/bin/docker', 'run',
+ '--user', 'root',
+ '--name', 'docker-puppet-%s' % config_volume,
+ '--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
+ '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
+ '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
+ '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',
+ '--volume', 'tripleo_logs:/var/log/tripleo/',
+ '--volume', '/var/lib/docker-puppet/docker-puppet.sh:/var/lib/docker-puppet/docker-puppet.sh:ro']
+
+ for volume in volumes:
+ dcmd.extend(['--volume', volume])
+
+ dcmd.extend(['--entrypoint', '/var/lib/docker-puppet/docker-puppet.sh'])
+
+ env = {}
+ if os.environ.get('NET_HOST', 'false') == 'true':
+ print('NET_HOST enabled')
+ dcmd.extend(['--net', 'host', '--volume',
+ '/etc/hosts:/etc/hosts:ro'])
+ dcmd.append(config_image)
+
+ subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, env=env)
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ print(cmd_stdout)
+ print(cmd_stderr)
+ if subproc.returncode != 0:
+ print('Failed running docker-puppet.py for %s' % config_volume)
+ sys.exit(subproc.returncode)
+ else:
+ rm_container('docker-puppet-%s' % config_volume)
diff --git a/docker/firstboot/setup_docker_host.sh b/docker/firstboot/setup_docker_host.sh
new file mode 100755
index 00000000..b2287e91
--- /dev/null
+++ b/docker/firstboot/setup_docker_host.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+set -eux
+# TODO This would be better in puppet
+
+# TODO remove this when built image includes docker
+if [ ! -f "/usr/bin/docker" ]; then
+ yum -y install docker
+fi
+
+# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is
+# a place holder for text replacement done via heat
+if [ "$docker_namespace_is_registry" = "True" ]; then
+ /usr/bin/systemctl stop docker.service
+ # if namespace is used with local registry, trim all namespacing
+ trim_var=$docker_registry
+ registry_host="${trim_var%%/*}"
+ /bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker
+fi
+
+# enable and start docker
+/usr/bin/systemctl enable docker.service
+/usr/bin/systemctl start docker.service
+
+# Disable libvirtd
+/usr/bin/systemctl disable libvirtd.service
+/usr/bin/systemctl stop libvirtd.service
diff --git a/docker/firstboot/install_docker_agents.yaml b/docker/firstboot/setup_docker_host.yaml
index f6d61e2d..2f258987 100644
--- a/docker/firstboot/install_docker_agents.yaml
+++ b/docker/firstboot/setup_docker_host.yaml
@@ -1,9 +1,6 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
parameters:
- DockerAgentImage:
- type: string
- default: heat-docker-agents
DockerNamespace:
type: string
default: tripleoupstream
@@ -17,22 +14,18 @@ resources:
type: OS::Heat::MultipartMime
properties:
parts:
- - config: {get_resource: install_docker_agents}
+ - config: {get_resource: setup_docker_host}
- install_docker_agents:
+ setup_docker_host:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
params:
- $agent_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerAgentImage} ]
$docker_registry: {get_param: DockerNamespace}
$docker_namespace_is_registry: {get_param: DockerNamespaceIsRegistry}
- template: {get_file: ./start_docker_agents.sh}
+ template: {get_file: ./setup_docker_host.sh}
outputs:
OS::stack_id:
diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh
deleted file mode 100644
index acb44ce5..00000000
--- a/docker/firstboot/start_docker_agents.sh
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/bin/bash
-set -eux
-
-/sbin/setenforce 0
-/sbin/modprobe ebtables
-
-# CentOS sets ptmx to 000. Withoutit being 666, we can't use Cinder volumes
-chmod 666 /dev/pts/ptmx
-
-# We need hostname -f to return in a centos container for the puppet hook
-HOSTNAME=$(hostname)
-echo "127.0.0.1 $HOSTNAME.localdomain $HOSTNAME" >> /etc/hosts
-
-# update docker for local insecure registry(optional)
-# Note: This is different for different docker versions
-# For older docker versions < 1.4.x use commented line
-#echo "OPTIONS='--insecure-registry $docker_registry'" >> /etc/sysconfig/docker
-#echo "ADD_REGISTRY='--registry-mirror $docker_registry'" >> /etc/sysconfig/docker
-
-# Local docker registry 1.8
-# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is
-# a place holder for text replacement done via heat
-if [ "$docker_namespace_is_registry" = True ]; then
- /usr/bin/systemctl stop docker.service
- # if namespace is used with local registry, trim all namespacing
- trim_var=$docker_registry
- registry_host="${trim_var%%/*}"
- /bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker
- /usr/bin/systemctl start --no-block docker.service
-fi
-
-/usr/bin/docker pull $agent_image &
-DOCKER_PULL_PID=$!
-
-mkdir -p /var/lib/etc-data/json-config #FIXME: this should be a docker data container
-
-# NOTE(flaper87): Heat Agent required mounts
-AGENT_COMMAND_MOUNTS="-v /var/lib/etc-data:/var/lib/etc-data \
- -v /run:/run \
- -v /etc:/host/etc \
- -v /usr/bin/atomic:/usr/bin/atomic \
- -v /var/lib/dhclient:/var/lib/dhclient \
- -v /var/lib/cloud:/var/lib/cloud \
- -v /var/lib/heat-cfntools:/var/lib/heat-cfntools \
- -v /etc/sysconfig/docker:/etc/sysconfig/docker \
- -v /usr/lib64/libseccomp.so.2:/usr/lib64/libseccomp.so.2"
-
-
-# NOTE(flaper87): Some of these commands may not be present depending on the
-# atomic version.
-for docker_cmd in docker docker-current docker-latest; do
- if [ -f "/usr/bin/$docker_cmd" ]; then
- AGENT_COMMAND_MOUNTS+=" -v /usr/bin/$docker_cmd:/usr/bin/$docker_cmd"
- fi
-done
-
-# heat-docker-agents service
-cat <<EOF > /etc/systemd/system/heat-docker-agents.service
-
-[Unit]
-Description=Heat Docker Agent Container
-After=docker.service
-Requires=docker.service
-
-[Service]
-User=root
-Restart=on-failure
-ExecStartPre=-/usr/bin/docker kill heat-agents
-ExecStartPre=-/usr/bin/docker rm heat-agents
-ExecStart=/usr/bin/docker run --name heat-agents --privileged --net=host \
- $AGENT_COMMAND_MOUNTS \
- --entrypoint=/usr/bin/os-collect-config $agent_image
-ExecStop=/usr/bin/docker stop heat-agents
-
-[Install]
-WantedBy=multi-user.target
-
-EOF
-
-# enable and start heat-docker-agents
-chmod 0640 /etc/systemd/system/heat-docker-agents.service
-/usr/bin/systemctl enable heat-docker-agents.service
-/usr/bin/systemctl start --no-block heat-docker-agents.service
-
-# Disable NetworkManager and let the ifup/down scripts work properly.
-/usr/bin/systemctl disable NetworkManager
-/usr/bin/systemctl stop NetworkManager
-
-# Atomic's root partition & logical volume defaults to 3G. In order to launch
-# larger VMs, we need to enlarge the root logical volume and scale down the
-# docker_pool logical volume. We are allocating 80% of the disk space for
-# vm data and the remaining 20% for docker images.
-ATOMIC_ROOT='/dev/mapper/atomicos-root'
-ROOT_DEVICE=`pvs -o vg_name,pv_name --no-headings | grep atomicos | awk '{ print $2}'`
-
-growpart $( echo "${ROOT_DEVICE}" | sed -r 's/([^0-9]*)([0-9]+)/\1 \2/' )
-pvresize "${ROOT_DEVICE}"
-lvresize -l +80%FREE "${ATOMIC_ROOT}"
-xfs_growfs "${ATOMIC_ROOT}"
-
-cat <<EOF > /etc/sysconfig/docker-storage-setup
-GROWPART=true
-AUTO_EXTEND_POOL=yes
-POOL_AUTOEXTEND_PERCENT=30
-POOL_AUTOEXTEND_THRESHOLD=70
-EOF
-
-wait $DOCKER_PULL_PID
diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml
index de17cffe..3473f4ca 100644
--- a/docker/post.j2.yaml
+++ b/docker/post.j2.yaml
@@ -1,4 +1,8 @@
-heat_template_version: 2016-10-14
+# certain initialization steps (run in a container) will occur
+# on the first role listed in the roles file
+{% set primary_role_name = roles[0].name -%}
+
+heat_template_version: ocata
description: >
Post-deploy configuration steps via puppet for all roles,
@@ -8,46 +12,82 @@ parameters:
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
-
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
-
DeployIdentifier:
default: ''
type: string
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
- DockerNamespace:
- description: namespace
- default: 'tripleoupstream'
- type: string
-
- DockerOpenvswitchDBImage:
- description: image
- default: 'centos-binary-openvswitch-db-server'
- type: string
-
- DockerOvsVswitchdImage:
- description: image
- default: 'centos-binary-openvswitch-vswitchd'
- type: string
+resources:
- LibvirtConfig:
- type: string
- default: "/etc/libvirt/libvirtd.conf"
+ # These utility tasks use docker-puppet.py to execute tasks via puppet
+ # We only execute these on the first node in the primary role
+ {{primary_role_name}}DockerPuppetTasks:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ yaql:
+ expression:
+ dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1]))
+ data:
+ docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]}
+
+# BEGIN primary_role_name docker-puppet-tasks (run only on a single node)
+{% for step in range(1, 6) %}
+
+ {{primary_role_name}}DockerPuppetJsonConfig{{step}}:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json:
+ {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']}
+
+ {{primary_role_name}}DockerPuppetJsonDeployment{{step}}:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ server: {get_param: [servers, {{primary_role_name}}, '0']}
+ config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}}
- NovaConfig:
- type: string
- default: "/etc/nova/nova.conf,/etc/nova/rootwrap.conf"
+ {{primary_role_name}}DockerPuppetTasksConfig{{step}}:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: docker-puppet.py}
+ inputs:
+ - name: CONFIG
+ - name: NET_HOST
+ - name: NO_ARCHIVE
- NeutronOpenvswitchAgentConfig:
- type: string
- default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
+ {{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
+ type: OS::Heat::SoftwareDeployment
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step}}
+ - {{dep.name}}ContainersDeployment_Step{{step}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+ properties:
+ name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+ server: {get_param: [servers, {{primary_role_name}}, '0']}
+ config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}}
+ input_values:
+ CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
+ NET_HOST: 'true'
+ NO_ARCHIVE: 'true'
-resources:
+{% endfor %}
+# END primary_role_name docker-puppet-tasks
{% for role in roles %}
# Post deployment steps for all roles
@@ -69,87 +109,180 @@ resources:
input_values:
update_identifier: {get_param: DeployIdentifier}
- {{role.name}}Config:
- type: OS::TripleO::{{role.name}}Config
+ {{role.name}}CreateConfigDir:
+ type: OS::Heat::SoftwareConfig
properties:
- StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
- {% if role.name.lower() == 'compute' %}
- PuppetTags: {get_param: [role_data, {{role.name}}, puppet_tags]}
- {% endif %}
+ group: script
+ config: {get_file: create-config-dir.sh}
- # Step through a series of configuration steps
- {{role.name}}Deployment_Step1:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ {{role.name}}CreateConfigDirDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
properties:
- name: {{role.name}}Deployment_Step1
servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 1
- update_identifier: {get_param: DeployIdentifier}
+ config: {get_resource: {{role.name}}CreateConfigDir}
- {{role.name}}Deployment_Step2:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step1
- {% endfor %}
+ # this creates a JSON config file for our docker-puppet.py script
+ {{role.name}}GenPuppetConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-puppet/docker-puppet.json:
+ yaql:
+ # select only services that have a non-null config_image with
+ # a step_config as well
+ expression:
+ $.data.config_volume.zip($.data.puppet_tags, $.data.step_config, $.data.config_image).where($[3] != null and $[1] != null)
+ data:
+ config_volume: {get_param: [role_data, {{role.name}}, config_volume]}
+ step_config: {get_param: [role_data, {{role.name}}, step_config]}
+ puppet_tags: {get_param: [role_data, {{role.name}}, puppet_tags]}
+ config_image: {get_param: [role_data, {{role.name}}, config_image]}
+
+ {{role.name}}GenPuppetDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
properties:
- name: {{role.name}}Deployment_Step2
servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
+ config: {get_resource: {{role.name}}GenPuppetConfig}
- {{role.name}}Deployment_Step3:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step2
- {% endfor %}
+ {{role.name}}GenerateConfig:
+ type: OS::Heat::SoftwareConfig
properties:
- name: {{role.name}}Deployment_Step3
+ group: script
+ config: {get_file: docker-puppet.py}
+
+ {{role.name}}GenerateConfigDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment]
+ properties:
+ name: {{role.name}}GenerateConfigDeployment
servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
+ config: {get_resource: {{role.name}}GenerateConfig}
+
+ {{role.name}}PuppetStepConfig:
+ type: OS::Heat::Value
+ properties:
+ type: string
+ value:
+ yaql:
+ expression:
+ # select 'step_config' only from services that do not have a docker_image
+ $.data.service_names.zip($.data.step_config, $.data.docker_image).where($[2] = null).where($[1] != null).select($[1]).join("\n")
+ data:
+ service_names: {get_param: [role_data, {{role.name}}, service_names]}
+ step_config: {get_param: [role_data, {{role.name}}, step_config]}
+ docker_image: {get_param: [role_data, {{role.name}}, docker_image]}
+
+ {{role.name}}DockerConfig:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ yaql:
+ expression:
+ # select 'docker_config' only from services that have a docker_image
+ $.data.service_names.zip($.data.docker_config, $.data.docker_image).where($[2] != null).select($[1]).reduce($1.mergeWith($2), {})
+ data:
+ service_names: {get_param: [role_data, {{role.name}}, service_names]}
+ docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
+ docker_image: {get_param: [role_data, {{role.name}}, docker_image]}
+
+ {{role.name}}KollaJsonConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ {get_param: [role_data, {{role.name}}, kolla_config]}
+
+ {{role.name}}KollaJsonDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ name: {{role.name}}KollaJsonDeployment
+ config: {get_resource: {{role.name}}KollaJsonConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+
+ # BEGIN BAREMETAL CONFIG STEPS
+
+ {% if role.name == 'Controller' %}
+ ControllerPrePuppet:
+ type: OS::TripleO::Tasks::ControllerPrePuppet
+ properties:
+ servers: {get_param: [servers, Controller]}
input_values:
- step: 3
update_identifier: {get_param: DeployIdentifier}
+ {% endif %}
+
+ {{role.name}}Config:
+ type: OS::TripleO::{{role.name}}Config
+ properties:
+ StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]}
- {{role.name}}Deployment_Step4:
+ {% for step in range(1, 6) %}
+
+ {{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
+ {% if step == 1 %}
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ {% else %}
depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step3
- {% endfor %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ - {{dep.name}}ContainersDeployment_Step{{step -1}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
+ {% endif %}
properties:
- name: {{role.name}}Deployment_Step4
+ name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}Config}
input_values:
- step: 4
+ step: {{step}}
update_identifier: {get_param: DeployIdentifier}
- {{role.name}}Deployment_Step5:
+ {% endfor %}
+ # END BAREMETAL CONFIG STEPS
+
+ # BEGIN CONTAINER CONFIG STEPS
+ {% for step in range(1, 6) %}
+
+ {{role.name}}ContainersConfig_Step{{step}}:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: docker-cmd
+ config:
+ {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]}
+
+ {{role.name}}ContainersDeployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
+ {% if step == 1 %}
depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step4
- {% endfor %}
+ - {{role.name}}PreConfig
+ - {{role.name}}KollaJsonDeployment
+ - {{role.name}}GenPuppetDeployment
+ - {{role.name}}GenerateConfigDeployment
+ {% else %}
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}ContainersDeployment_Step{{step -1}}
+ - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
+ {% endif %}
properties:
- name: {{role.name}}Deployment_Step5
+ name: {{role.name}}ContainersDeployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: 5
- update_identifier: {get_param: DeployIdentifier}
+ config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}}
+
+ {% endfor %}
+ # END CONTAINER CONFIG STEPS
{{role.name}}PostConfig:
type: OS::TripleO::Tasks::{{role.name}}PostConfig
depends_on:
{% for dep in roles %}
- {{dep.name}}Deployment_Step5
+ - {{primary_role_name}}DockerPuppetTasksDeployment5
{% endfor %}
properties:
servers: {get_param: servers}
@@ -167,142 +300,15 @@ resources:
properties:
servers: {get_param: [servers, {{role.name}}]}
- {% if role.name.lower() == 'compute' %}
- CopyEtcConfig:
- type: OS::Heat::SoftwareConfig
- depends_on: {{role.name}}PostConfig
- properties:
- group: script
- outputs:
- - name: result
- config: {get_file: ../docker/copy-etc.sh}
-
- CopyEtcDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: CopyEtcDeployment
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: CopyEtcConfig}
-
- CopyJsonConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: libvirt_config
- - name: nova_config
- - name: neutron_openvswitch_agent_config
- config: {get_file: ../docker/copy-json.py}
-
- CopyJsonDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CopyEtcDeployment
+ {% if role.name == 'Controller' %}
+ ControllerPostPuppet:
+ depends_on:
+ - ControllerExtraConfigPost
+ type: OS::TripleO::Tasks::ControllerPostPuppet
properties:
- name: CopyJsonDeployment
- config: {get_resource: CopyJsonConfig}
- servers: {get_param: [servers, {{role.name}}]}
+ servers: {get_param: [servers, Controller]}
input_values:
- libvirt_config: {get_param: LibvirtConfig}
- nova_config: {get_param: NovaConfig}
- neutron_openvswitch_agent_config: {get_param: NeutronOpenvswitchAgentConfig}
-
- NovaComputeContainersDeploymentOVS:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: CopyJsonDeployment
- properties:
- name: NovaComputeContainersDeploymentOVS
- config: {get_resource: NovaComputeContainersConfigOVS}
- servers: {get_param: [servers, {{role.name}}]}
-
- NovaComputeContainersConfigOVS:
- type: OS::Heat::StructuredConfig
- properties:
- group: docker-cmd
- config:
- openvswitchdb:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchDBImage} ]
- net: host
- restart: always
- volumes:
- - /var/lib/etc-data/json-config/ovsdb-server.json:/var/lib/kolla/config_files/config.json
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - logs:/var/log/kolla/
- - openvswitch_db:/var/lib/openvswitch/
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-
- ovsvswitchd:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOvsVswitchdImage} ]
- net: host
- privileged: true
- restart: always
- volumes:
- - /var/lib/etc-data/json-config/ovs-vswitchd.json:/var/lib/kolla/config_files/config.json
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
- - logs:/var/log/kolla/
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-
- NovaComputeContainersDeploymentNetconfig:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: NovaComputeContainersDeploymentOVS
- properties:
- name: NovaComputeContainersDeploymentNetconfig
- config: {get_resource: NovaComputeContainersConfigNetconfig}
- servers: {get_param: [servers, {{role.name}}]}
-
- # We run os-net-config here because we depend on the ovs containers to be up
- # and running before we configure the network. This allows explicit timing
- # of the network configuration.
- NovaComputeContainersConfigNetconfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- outputs:
- - name: result
- config: |
- #!/bin/bash
- /usr/local/bin/run-os-net-config
-
- {{role.name}}ContainersConfig_Step1:
- type: OS::Heat::StructuredConfig
- depends_on: CopyJsonDeployment
- properties:
- group: docker-cmd
- config:
- {get_param: [role_data, {{role.name}}, docker_config, step_1]}
-
- {{role.name}}ContainersConfig_Step2:
- type: OS::Heat::StructuredConfig
- depends_on: CopyJsonDeployment
- properties:
- group: docker-cmd
- config:
- {get_param: [role_data, {{role.name}}, docker_config, step_2]}
-
- {{role.name}}ContainersDeployment_Step1:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy, NovaComputeContainersDeploymentNetconfig]
- properties:
- name: {{role.name}}ContainersDeployment_Step1
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ContainersConfig_Step1}
-
- {{role.name}}ContainersDeployment_Step2:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: {{role.name}}ContainersDeployment_Step1
- properties:
- name: {{role.name}}ContainersDeployment_Step2
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ContainersConfig_Step2}
+ update_identifier: {get_param: DeployIdentifier}
{% endif %}
+
{% endfor %}
diff --git a/docker/services/README.rst b/docker/services/README.rst
index 8d1f9e86..881a2a37 100644
--- a/docker/services/README.rst
+++ b/docker/services/README.rst
@@ -1,60 +1,115 @@
-========
-services
-========
+===============
+Docker Services
+===============
-A TripleO nested stack Heat template that encapsulates generic configuration
-data to configure a specific service. This generally includes everything
-needed to configure the service excluding the local bind ports which
-are still managed in the per-node role templates directly (controller.yaml,
-compute.yaml, etc.). All other (global) service settings go into
-the puppet/service templates.
+TripleO docker services are currently built on top of the puppet services.
+To do this each of the docker services includes the output of the
+t-h-t puppet/service templates where appropriate.
-Input Parameters
-----------------
+In general global docker specific service settings should reside in these
+templates (templates in the docker/services directory.) The required and
+optional items are specified in the docker settings section below.
-Each service may define its own input parameters and defaults.
-Operators will use the parameter_defaults section of any Heat
-environment to set per service parameters.
+If you are adding a config setting that applies to both docker and
+baremetal that setting should (so long as we use puppet) go into the
+puppet/services templates themselves.
-Config Settings
----------------
-
-Each service may define a config_settings output variable which returns
-Hiera settings to be configured.
-
-Steps
------
-
-Each service may define an output variable which returns a puppet manifest
-snippet that will run at each of the following steps. Earlier manifests
-are re-asserted when applying latter ones.
-
- * config_settings: Custom hiera settings for this service. These are
- used to generate configs.
+Building Kolla Images
+---------------------
- * step_config: A puppet manifest that is used to step through the deployment
- sequence. Each sequence is given a "step" (via hiera('step') that provides
- information for when puppet classes should activate themselves.
+TripleO currently relies on Kolla docker containers. Kolla supports container
+customization and we are making use of this feature within TripleO to inject
+puppet (our configuration tool of choice) into the Kolla base images. The
+undercloud nova-scheduler also requires openstack-tripleo-common to
+provide custom filters.
- * docker_compose:
+To build Kolla images for TripleO adjust your kolla config to build your
+centos base image with puppet using the example below:
- * container_name:
+.. code-block::
- * volumes:
+$ cat template-overrides.j2
+{% extends parent_template %}
+{% set base_centos_binary_packages_append = ['puppet'] %}
+{% set nova_scheduler_packages_append = ['openstack-tripleo-common'] %}
-Steps correlate to the following:
-
- 1) Service configuration generation with puppet.
+kolla-build --base centos --template-override template-overrides.j2
- 2) Early Openstack Service setup (database init?)
+..
- 3) Early containerized networking services startup (OVS)
- 4) Network configuration
-
- 5) General OpenStack Services
+Docker settings
+---------------
+Each service may define an output variable which returns a puppet manifest
+snippet that will run at each of the following steps. Earlier manifests
+are re-asserted when applying latter ones.
- 6) Service activation (Pacemaker)
+ * config_settings: This setting is generally inherited from the
+ puppet/services templates and only need to be appended
+ to on accasion if docker specific config settings are required.
+
+ * step_config: This setting controls the manifest that is used to
+ create docker config files via puppet. The puppet tags below are
+ used along with this manifest to generate a config directory for
+ this container.
+
+ * kolla_config: Contains YAML that represents how to map config files
+ into the kolla container. This config file is typically mapped into
+ the container itself at the /var/lib/kolla/config_files/config.json
+ location and drives how kolla's external config mechanisms work.
+
+ * docker_image: The full name of the docker image that will be used.
+
+ * docker_config: Data that is passed to the docker-cmd hook to configure
+ a container, or step of containers at each step. See the available steps
+ below and the related docker-cmd hook documentation in the heat-agents
+ project.
+
+ * puppet_tags: Puppet resource tag names that are used to generate config
+ files with puppet. Only the named config resources are used to generate
+ a config file. Any service that specifies tags will have the default
+ tags of 'file,concat,file_line' appended to the setting.
+ Example: keystone_config
+
+ * config_volume: The name of the volume (directory) where config files
+ will be generated for this service. Use this as the location to
+ bind mount into the running Kolla container for configuration.
+
+ * config_image: The name of the docker image that will be used for
+ generating configuration files. This is often the same value as
+ 'docker_image' above but some containers share a common set of
+ config files which are generated in a common base container.
+
+ * docker_puppet_tasks: This section provides data to drive the
+ docker-puppet.py tool directly. The task is executed only once
+ within the cluster (not on each node) and is useful for several
+ puppet snippets we require for initialization of things like
+ keystone endpoints, database users, etc. See docker-puppet.py
+ for formatting.
+
+Docker steps
+------------
+Similar to baremetal docker containers are brought up in a stepwise manner.
+The current architecture supports bringing up baremetal services alongside
+of containers. For each step the baremetal puppet manifests are executed
+first and then any docker containers are brought up afterwards.
- 7) Fencing (Pacemaker)
+Steps correlate to the following:
+ Pre) Containers config files generated per hiera settings.
+ 1) Load Balancer configuration baremetal
+ a) step 1 baremetal
+ b) step 1 containers
+ 2) Core Services (Database/Rabbit/NTP/etc.)
+ a) step 2 baremetal
+ b) step 2 containers
+ 3) Early Openstack Service setup (Ringbuilder, etc.)
+ a) step 3 baremetal
+ b) step 3 containers
+ 4) General OpenStack Services
+ a) step 4 baremetal
+ b) step 4 containers
+ c) Keystone containers post initialization (tenant,service,endpoint creation)
+ 5) Service activation (Pacemaker)
+ a) step 5 baremetal
+ b) step 5 containers
diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml
new file mode 100644
index 00000000..cc28846a
--- /dev/null
+++ b/docker/services/database/mongodb.yaml
@@ -0,0 +1,98 @@
+heat_template_version: ocata
+
+description: >
+ MongoDB service deployment using puppet and docker
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMongodbImage:
+ description: image
+ default: 'centos-binary-mongodb:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ MongodbPuppetBase:
+ type: ../../../puppet/services/database/mongodb.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Containerized service Mongodb using composable services.
+ value:
+ service_name: {get_attr: [MongodbPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [MongodbPuppetBase, role_data, config_settings]
+ - mongodb::server::fork: false
+ step_config:
+ list_join:
+ - "\n"
+ - - "['Mongodb_database', 'Mongodb_user', 'Mongodb_replset'].each |String $val| { noop_resource($val) }"
+ - {get_attr: [MongodbPuppetBase, role_data, step_config]}
+ upgrade_tasks: {get_attr: [MongodbPuppetBase, role_data, upgrade_tasks]}
+ # BEGIN DOCKER SETTINGS #
+ docker_image: &mongodb_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
+ puppet_tags: file # set this even though file is the default
+ config_volume: mongodb
+ config_image: *mongodb_image
+ kolla_config:
+ /var/lib/kolla/config_files/mongodb.json:
+ command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run
+ config_files:
+ - dest: /etc/mongod.conf
+ source: /var/lib/kolla/config_files/src/etc/mongod.conf
+ owner: mongodb
+ perm: '0600'
+ - dest: /etc/mongos.conf
+ source: /var/lib/kolla/config_files/src/etc/mongos.conf
+ owner: mongodb
+ perm: '0600'
+ docker_config:
+ step_2:
+ mongodb:
+ image: *mongodb_image
+ net: host
+ privileged: false
+ volumes: &mongodb_volumes
+ - /var/lib/kolla/config_files/mongodb.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/mongodb/:/var/lib/kolla/config_files/src:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log/kolla
+ - mongodb:/var/lib/mongodb/
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ docker_puppet_tasks:
+ # MySQL database initialization occurs only on single node
+ step_2:
+ - 'mongodb_init_tasks'
+ - 'mongodb_database,mongodb_user,mongodb_replset'
+ - 'include ::tripleo::profile::base::database::mongodb'
+ - list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
+ - - "mongodb:/var/lib/mongodb"
+ - "logs:/var/log/kolla:ro"
diff --git a/docker/services/glance-api.yaml b/docker/services/glance-api.yaml
new file mode 100644
index 00000000..b8ab9622
--- /dev/null
+++ b/docker/services/glance-api.yaml
@@ -0,0 +1,96 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Glance service configured with Puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGlanceApiImage:
+ description: image
+ default: 'centos-binary-glance-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ GlanceApiPuppetBase:
+ type: ../../puppet/services/glance-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Glance API role.
+ value:
+ service_name: {get_attr: [GlanceApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [GlanceApiPuppetBase, role_data, config_settings]
+ - glance::api::sync_db: false
+ step_config: {get_attr: [GlanceApiPuppetBase, role_data, step_config]}
+ service_config_settings: {get_attr: [GlanceApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ docker_image: &glance_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
+ puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config
+ config_volume: glance_api
+ config_image: *glance_image
+ kolla_config:
+ /var/lib/kolla/config_files/glance-api.json:
+ command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
+ config_files:
+ - dest: /etc/glance/glance-api.conf
+ owner: glance
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/glance/glance-api.conf
+ - dest: /etc/glance/glance-swift.conf
+ owner: glance
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/glance/glance-swift.conf
+ docker_config:
+ step_3:
+ glance_api_db_sync:
+ image: *glance_image
+ net: host
+ privileged: false
+ detach: false
+ volumes: &glance_volumes
+ - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
+ - /etc/localtime:/etc/localtime:ro
+ - /lib/modules:/lib/modules:ro
+ - /var/lib/config-data/glance_api/:/var/lib/kolla/config_files/src:ro
+ - /run:/run
+ - /dev:/dev
+ - /etc/hosts:/etc/hosts:ro
+ environment:
+ - KOLLA_BOOTSTRAP=True
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_4:
+ glance_api:
+ image: *glance_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: *glance_volumes
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/heat-api-cfn.yaml b/docker/services/heat-api-cfn.yaml
new file mode 100644
index 00000000..93632166
--- /dev/null
+++ b/docker/services/heat-api-cfn.yaml
@@ -0,0 +1,90 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Heat API CFN service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerHeatApiCfnImage:
+ description: image
+ default: 'centos-binary-heat-api-cfn:latest'
+ type: string
+ # we configure all heat services in the same heat engine container
+ DockerHeatEngineImage:
+ description: image
+ default: 'centos-binary-heat-engine:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ HeatBase:
+ type: ../../puppet/services/heat-api-cfn.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Heat API CFN role.
+ value:
+ service_name: {get_attr: [HeatBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [HeatBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: {get_attr: [HeatBase, role_data, step_config]}
+ service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &heat_api_cfn_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ]
+ puppet_tags: heat_config,file,concat,file_line
+ config_volume: heat
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/heat_api_cfn.json:
+ command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+ config_files:
+ - dest: /etc/heat/heat.conf
+ owner: heat
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ docker_config:
+ step_4:
+ heat_api_cfn:
+ image: *heat_api_cfn_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /dev:/dev
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/heat-api.yaml b/docker/services/heat-api.yaml
new file mode 100644
index 00000000..2efabb61
--- /dev/null
+++ b/docker/services/heat-api.yaml
@@ -0,0 +1,90 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Heat API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerHeatApiImage:
+ description: image
+ default: 'centos-binary-heat-api:latest'
+ type: string
+ # we configure all heat services in the same heat engine container
+ DockerHeatEngineImage:
+ description: image
+ default: 'centos-binary-heat-engine:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ HeatBase:
+ type: ../../puppet/services/heat-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Heat API role.
+ value:
+ service_name: {get_attr: [HeatBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [HeatBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: {get_attr: [HeatBase, role_data, step_config]}
+ service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &heat_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ]
+ puppet_tags: heat_config,file,concat,file_line
+ config_volume: heat
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/heat_api.json:
+ command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+ config_files:
+ - dest: /etc/heat/heat.conf
+ owner: heat
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ docker_config:
+ step_4:
+ heat_api:
+ image: *heat_api_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /dev:/dev
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/heat-engine.yaml b/docker/services/heat-engine.yaml
new file mode 100644
index 00000000..db8c2be5
--- /dev/null
+++ b/docker/services/heat-engine.yaml
@@ -0,0 +1,92 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Heat Engine service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerHeatEngineImage:
+ description: image
+ default: 'centos-binary-heat-engine:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ HeatBase:
+ type: ../../puppet/services/heat-engine.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Heat Engine role.
+ value:
+ service_name: {get_attr: [HeatBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [HeatBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: {get_attr: [HeatBase, role_data, step_config]}
+ service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &heat_engine_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ puppet_tags: heat_config,file,concat,file_line
+ config_volume: heat
+ config_image: *heat_engine_image
+ kolla_config:
+ /var/lib/kolla/config_files/heat_engine.json:
+ command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+ config_files:
+ - dest: /etc/heat/heat.conf
+ owner: heat
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ docker_config:
+ step_3:
+ heat_engine_db_sync:
+ image: *heat_engine_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/heat/etc/heat:/etc/heat:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ command: ['heat-manage', 'db_sync']
+ step_4:
+ heat_engine:
+ image: *heat_engine_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/ironic-api.yaml b/docker/services/ironic-api.yaml
new file mode 100644
index 00000000..80120568
--- /dev/null
+++ b/docker/services/ironic-api.yaml
@@ -0,0 +1,99 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Ironic API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerIronicApiImage:
+ description: image
+ default: 'centos-binary-ironic-api:latest'
+ type: string
+ DockerIronicConfigImage:
+ description: image
+ default: 'centos-binary-ironic-pxe:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ IronicApiBase:
+ type: ../../puppet/services/ironic-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Ironic API role.
+ value:
+ service_name: {get_attr: [IronicApiBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [IronicApiBase, role_data, config_settings]
+ step_config: {get_attr: [IronicApiBase, role_data, step_config]}
+ service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &ironic_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
+ puppet_tags: ironic_config
+ config_volume: ironic
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ironic_api.json:
+ command: /usr/bin/ironic-api
+ config_files:
+ - dest: /etc/ironic/ironic.conf
+ owner: ironic
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+ docker_config:
+ step_3:
+ ironic_db_sync:
+ image: *ironic_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/ironic/etc/:/etc/:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ command: ['ironic-dbsync', '--config-file', '/etc/ironic/ironic.conf']
+ step_4:
+ ironic_api:
+ start_order: 10
+ image: *ironic_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/ironic-conductor.yaml b/docker/services/ironic-conductor.yaml
new file mode 100644
index 00000000..945ef3fc
--- /dev/null
+++ b/docker/services/ironic-conductor.yaml
@@ -0,0 +1,111 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Ironic Conductor service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerIronicConductorImage:
+ description: image
+ default: 'centos-binary-ironic-conductor:latest'
+ type: string
+ DockerIronicConfigImage:
+ description: image
+ default: 'centos-binary-ironic-pxe:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ IronicConductorBase:
+ type: ../../puppet/services/ironic-conductor.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Ironic Conductor role.
+ value:
+ service_name: {get_attr: [IronicConductorBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [IronicConductorBase, role_data, config_settings]
+ # to avoid hard linking errors we store these on the same
+ # volume/device as the ironic master_path
+ - ironic::drivers::pxe::tftp_root: /var/lib/ironic/tftpboot
+ - ironic::drivers::pxe::tftp_master_path: /var/lib/ironic/tftpboot/master_images
+ - ironic::pxe::tftp_root: /var/lib/ironic/tftpboot
+ - ironic::pxe::http_root: /var/lib/ironic/httpboot
+ - ironic::conductor::http_root: /var/lib/ironic/httpboot
+ step_config: {get_attr: [IronicConductorBase, role_data, step_config]}
+ service_config_settings: {get_attr: [IronicConductorBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &ironic_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ]
+ puppet_tags: ironic_config
+ config_volume: ironic
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ironic_conductor.json:
+ command: /usr/bin/ironic-conductor
+ config_files:
+ - dest: /etc/ironic/ironic.conf
+ owner: ironic
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+ permissions:
+ - path: /var/lib/ironic/httpboot
+ owner: ironic:ironic
+ recurse: true
+ - path: /var/lib/ironic/tftpboot
+ owner: ironic:ironic
+ recurse: true
+ docker_config:
+ step_4:
+ ironic-init-dirs:
+ image: *ironic_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir /var/lib/ironic/httpboot && mkdir /var/lib/ironic/tftpboot']
+ volumes:
+ - ironic:/var/lib/ironic
+ ironic_conductor:
+ start_order: 80
+ image: *ironic_image
+ net: host
+ privileged: true
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /lib/modules:/lib/modules:ro
+ - /sys:/sys
+ - /dev:/dev
+ - /run:/run #shared?
+ - ironic:/var/lib/ironic
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/ironic-pxe.yaml b/docker/services/ironic-pxe.yaml
new file mode 100644
index 00000000..bc7b4677
--- /dev/null
+++ b/docker/services/ironic-pxe.yaml
@@ -0,0 +1,131 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Ironic PXE service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerIronicPxeImage:
+ description: image
+ default: 'centos-binary-ironic-pxe:latest'
+ type: string
+ DockerIronicConfigImage:
+ description: image
+ default: 'centos-binary-ironic-pxe:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the Ironic PXE role.
+ value:
+ service_name: ironic_pxe
+ config_settings: {}
+ step_config: ''
+ service_config_settings: {}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &ironic_pxe_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ]
+ puppet_tags: ironic_config
+ config_volume: ironic
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ironic_pxe_http.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/ironic/ironic.conf
+ owner: ironic
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+ - dest: /etc/httpd/conf.d/10-ipxe_vhost.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-ipxe_vhost.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ /var/lib/kolla/config_files/ironic_pxe_tftp.json:
+ command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot
+ config_files:
+ - dest: /etc/ironic/ironic.conf
+ owner: ironic
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+ - dest: /var/lib/ironic/tftpboot/chain.c32
+ owner: ironic
+ perm: '0744'
+ source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/chain.c32
+ - dest: /var/lib/ironic/tftpboot/pxelinux.0
+ owner: ironic
+ perm: '0744'
+ source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/pxelinux.0
+ - dest: /var/lib/ironic/tftpboot/ipxe.efi
+ owner: ironic
+ perm: '0744'
+ source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/ipxe.efi
+ - dest: /var/lib/ironic/tftpboot/undionly.kpxe
+ owner: ironic
+ perm: '0744'
+ source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/undionly.kpxe
+ - dest: /var/lib/ironic/tftpboot/map-file
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/map-file
+ docker_config:
+ step_4:
+ ironic_pxe_tftp:
+ start_order: 90
+ image: *ironic_pxe_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /dev/log:/dev/log
+ - ironic:/var/lib/ironic/
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ ironic_pxe_http:
+ start_order: 91
+ image: *ironic_pxe_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - ironic:/var/lib/ironic/
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml
new file mode 100644
index 00000000..1d25da72
--- /dev/null
+++ b/docker/services/keystone.yaml
@@ -0,0 +1,153 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Keystone service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerKeystoneImage:
+ description: image
+ default: 'centos-binary-keystone:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ AdminPassword:
+ description: The password for the keystone admin account, used for monitoring, querying neutron etc.
+ type: string
+ hidden: true
+
+resources:
+
+ KeystoneBase:
+ type: ../../puppet/services/keystone.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Keystone API role.
+ value:
+ service_name: {get_attr: [KeystoneBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [KeystoneBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config:
+ list_join:
+ - "\n"
+ - - "['Keystone_user', 'Keystone_endpoint', 'Keystone_domain', 'Keystone_tenant', 'Keystone_user_role', 'Keystone_role', 'Keystone_service'].each |String $val| { noop_resource($val) }"
+ - {get_attr: [KeystoneBase, role_data, step_config]}
+ service_config_settings: {get_attr: [KeystoneBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &keystone_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
+ puppet_tags: keystone_config
+ config_volume: keystone
+ config_image: *keystone_image
+ kolla_config:
+ /var/lib/kolla/config_files/keystone.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/keystone/keystone.conf
+ owner: keystone
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/keystone/keystone.conf
+ - dest: /etc/keystone/credential-keys/0
+ owner: keystone
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/0
+ - dest: /etc/keystone/credential-keys/1
+ owner: keystone
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/1
+ - dest: /etc/httpd/conf.d/10-keystone_wsgi_admin.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_admin.conf
+ - dest: /etc/httpd/conf.d/10-keystone_wsgi_main.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_main.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ - dest: /var/www/cgi-bin/keystone/keystone-admin
+ owner: keystone
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-admin
+ - dest: /var/www/cgi-bin/keystone/keystone-public
+ owner: keystone
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-public
+ docker_config:
+ step_3:
+ keystone-init-log:
+ start_order: 0
+ image: *keystone_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir /var/log/httpd && mkdir /var/log/keystone && chown keystone:keystone /var/log/keystone']
+ volumes:
+ - logs:/var/log
+ keystone_db_sync:
+ start_order: 1
+ image: *keystone_image
+ net: host
+ privileged: false
+ detach: false
+ volumes: &keystone_volumes
+ - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/keystone/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/keystone/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log
+ environment:
+ - KOLLA_BOOTSTRAP=True
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ keystone:
+ start_order: 1
+ image: *keystone_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: *keystone_volumes
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ keystone_bootstrap:
+ start_order: 2
+ action: exec
+ command:
+ [ 'keystone', 'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ]
+ docker_puppet_tasks:
+ # Keystone endpoint creation occurs only on single node
+ step_4:
+ - 'keystone_init_tasks'
+ - 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
+ - 'include ::tripleo::profile::base::keystone'
+ - list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
diff --git a/docker/services/memcached.yaml b/docker/services/memcached.yaml
new file mode 100644
index 00000000..d459c825
--- /dev/null
+++ b/docker/services/memcached.yaml
@@ -0,0 +1,69 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Memcached services
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMemcachedImage:
+ description: image
+ default: 'centos-binary-memcached:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ MemcachedBase:
+ type: ../../puppet/services/memcached.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Memcached API role.
+ value:
+ service_name: {get_attr: [MemcachedBase, role_data, service_name]}
+ config_settings: {get_attr: [MemcachedBase, role_data, config_settings]}
+ step_config: {get_attr: [MemcachedBase, role_data, step_config]}
+ service_config_settings: {get_attr: [MemcachedBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &memcached_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
+ puppet_tags: 'file'
+ config_volume: 'memcached'
+ config_image: *memcached_image
+ kolla_config: {}
+ docker_config:
+ step_1:
+ memcached:
+ image: *memcached_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS']
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/mistral-api.yaml b/docker/services/mistral-api.yaml
new file mode 100644
index 00000000..e535a817
--- /dev/null
+++ b/docker/services/mistral-api.yaml
@@ -0,0 +1,115 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Mistral API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMistralApiImage:
+ description: image
+ default: 'centos-binary-mistral-api:latest'
+ type: string
+ DockerMistralConfigImage:
+ description: image
+ default: 'centos-binary-mistral-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ MistralApiBase:
+ type: ../../puppet/services/mistral-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Mistral API role.
+ value:
+ service_name: {get_attr: [MistralApiBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [MistralApiBase, role_data, config_settings]
+ step_config: {get_attr: [MistralApiBase, role_data, step_config]}
+ service_config_settings: {get_attr: [MistralApiBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &mistral_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
+ puppet_tags: mistral_config
+ config_volume: mistral
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/mistral_api.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api
+ config_files:
+ - dest: /etc/mistral/mistral.conf
+ owner: mistral
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ docker_config:
+ step_3:
+ mistral_db_sync:
+ start_order: 1
+ image: *mistral_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/mistral/etc/:/etc/:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head']
+ mistral_db_populate:
+ start_order: 2
+ image: *mistral_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/mistral/etc/:/etc/:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ # NOTE: dprince this requires that we install openstack-tripleo-common into
+ # the Mistral API image so that we get tripleo* actions
+ command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate']
+ step_4:
+ mistral_api:
+ start_order: 15
+ image: *mistral_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/mistral-engine.yaml b/docker/services/mistral-engine.yaml
new file mode 100644
index 00000000..be4c8af7
--- /dev/null
+++ b/docker/services/mistral-engine.yaml
@@ -0,0 +1,87 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Mistral Engine service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMistralEngineImage:
+ description: image
+ default: 'centos-binary-mistral-engine:latest'
+ type: string
+ DockerMistralConfigImage:
+ description: image
+ default: 'centos-binary-mistral-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ MistralBase:
+ type: ../../puppet/services/mistral-engine.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Mistral Engine role.
+ value:
+ service_name: {get_attr: [MistralBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [MistralBase, role_data, config_settings]
+ step_config: {get_attr: [MistralBase, role_data, step_config]}
+ service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &mistral_engine_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ]
+ puppet_tags: mistral_config
+ config_volume: mistral
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/mistral_engine.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine
+ config_files:
+ - dest: /etc/mistral/mistral.conf
+ owner: mistral
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ docker_config:
+ step_4:
+ mistral_engine:
+ image: *mistral_engine_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/mistral-executor.yaml b/docker/services/mistral-executor.yaml
new file mode 100644
index 00000000..33608a42
--- /dev/null
+++ b/docker/services/mistral-executor.yaml
@@ -0,0 +1,91 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Mistral Executor service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMistralExecutorImage:
+ description: image
+ default: 'centos-binary-mistral-executor:latest'
+ type: string
+ DockerMistralConfigImage:
+ description: image
+ default: 'centos-binary-mistral-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ MistralBase:
+ type: ../../puppet/services/mistral-executor.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Mistral Executor role.
+ value:
+ service_name: {get_attr: [MistralBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [MistralBase, role_data, config_settings]
+ step_config: {get_attr: [MistralBase, role_data, step_config]}
+ service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &mistral_executor_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ]
+ puppet_tags: mistral_config
+ config_volume: mistral
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/mistral_executor.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor
+ config_files:
+ - dest: /etc/mistral/mistral.conf
+ owner: mistral
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ docker_config:
+ step_4:
+ mistral_executor:
+ image: *mistral_executor_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ # FIXME: this is required in order for Nova cells
+ # initialization workflows on the Undercloud. Need to
+ # exclude this on the overcloud for security reasons.
+ - /var/lib/config-data/nova/etc/nova:/etc/nova:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/neutron-api.yaml b/docker/services/neutron-api.yaml
new file mode 100644
index 00000000..e444f391
--- /dev/null
+++ b/docker/services/neutron-api.yaml
@@ -0,0 +1,102 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Neutron API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNeutronApiImage:
+ description: image
+ default: 'centos-binary-neutron-server:latest'
+ type: string
+ # we configure all neutron services in the same neutron
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-openvswitch-agent:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NeutronBase:
+ type: ../../puppet/services/neutron-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron API role.
+ value:
+ service_name: {get_attr: [NeutronBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ step_config: {get_attr: [NeutronBase, role_data, step_config]}
+ service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &neutron_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
+ puppet_tags: neutron_config,neutron_api_config
+ config_volume: neutron
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/neutron_api.json:
+ command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
+ config_files:
+ - dest: /etc/neutron/neutron.conf
+ owner: neutron
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
+ - dest: /etc/neutron/plugin.ini
+ owner: neutron
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini
+ docker_config:
+ step_3:
+ neutron_db_sync:
+ image: *neutron_api_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
+ - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ command: ['neutron-db-manage', 'upgrade', 'heads']
+ step_4:
+ neutron_api:
+ image: *neutron_api_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/neutron-dhcp.yaml b/docker/services/neutron-dhcp.yaml
new file mode 100644
index 00000000..fc13b3d1
--- /dev/null
+++ b/docker/services/neutron-dhcp.yaml
@@ -0,0 +1,93 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Neutron DHCP service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNeutronApiImage:
+ description: image
+ default: 'centos-binary-neutron-dhcp-agent:latest'
+ type: string
+ # we configure all neutron services in the same neutron
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-openvswitch-agent:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NeutronBase:
+ type: ../../puppet/services/neutron-dhcp.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron DHCP role.
+ value:
+ service_name: {get_attr: [NeutronBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ step_config: {get_attr: [NeutronBase, role_data, step_config]}
+ service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &neutron_dhcp_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
+ puppet_tags: neutron_config,neutron_dhcp_agent_config
+ config_volume: neutron
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/neutron_dhcp.json:
+ command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
+ config_files:
+ - dest: /etc/neutron/neutron.conf
+ owner: neutron
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
+ - dest: /etc/neutron/dhcp_agent.ini
+ owner: neutron
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/neutron/dhcp_agent.ini
+ docker_config:
+ step_4:
+ neutron_dhcp:
+ image: *neutron_dhcp_image
+ net: host
+ pid: host
+ privileged: true
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /lib/modules:/lib/modules:ro
+ - /run/:/run
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/neutron-ovs-agent.yaml b/docker/services/neutron-ovs-agent.yaml
index 8d092a34..ab99da5e 100644
--- a/docker/services/neutron-ovs-agent.yaml
+++ b/docker/services/neutron-ovs-agent.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
OpenStack Neutron openvswitch service
@@ -10,14 +10,8 @@ parameters:
type: string
DockerOpenvswitchImage:
description: image
- default: 'centos-binary-neutron-openvswitch-agent'
+ default: 'centos-binary-neutron-openvswitch-agent:latest'
type: string
- NeutronOpenvswitchAgentPluginVolume:
- type: string
- default: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/ovs_neutron_plugin.ini:ro"
- NeutronOpenvswitchAgentOvsVolume:
- type: string
- default: " "
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -38,38 +32,53 @@ resources:
NeutronOvsAgentBase:
type: ../../puppet/services/neutron-ovs-agent.yaml
properties:
+ EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
description: Role data for Neutron openvswitch service
value:
+ service_name: {get_attr: [NeutronOvsAgentBase, role_data, service_name]}
config_settings: {get_attr: [NeutronOvsAgentBase, role_data, config_settings]}
step_config: {get_attr: [NeutronOvsAgentBase, role_data, step_config]}
+ docker_image: &neutron_ovs_agent_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
+ config_volume: neutron
+ config_image: *neutron_ovs_agent_image
+ kolla_config:
+ /var/lib/kolla/config_files/neutron-openvswitch-agent.json:
+ command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+ config_files:
+ - dest: /etc/neutron/neutron.conf
+ owner: neutron
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
+ - dest: /etc/neutron/plugins/ml2/openvswitch_agent.ini
+ owner: neutron
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/openvswitch_agent.ini
+ - dest: /etc/neutron/plugins/ml2/ml2_conf.ini
+ owner: neutron
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini
docker_config:
- step_1:
+ step_4:
neutronovsagent:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
+ image: *neutron_ovs_agent_image
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json
- - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro
- - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro
- - {get_param: NeutronOpenvswitchAgentPluginVolume}
- - {get_param: NeutronOpenvswitchAgentOvsVolume}
+ - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro
- /etc/localtime:/etc/localtime:ro
- /lib/modules:/lib/modules:ro
- /run:/run
- - logs:/var/log/kolla/
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- step_2: {}
diff --git a/docker/services/neutron-plugin-ml2.yaml b/docker/services/neutron-plugin-ml2.yaml
new file mode 100644
index 00000000..37ab8db2
--- /dev/null
+++ b/docker/services/neutron-plugin-ml2.yaml
@@ -0,0 +1,58 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Neutron ML2 Plugin configured with Puppet
+
+parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-openvswitch-agent:latest'
+ type: string
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NeutronBase:
+ type: ../../puppet/services/neutron-plugin-ml2.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron ML2 Plugin role.
+ value:
+ service_name: {get_attr: [NeutronBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ step_config: {get_attr: [NeutronBase, role_data, step_config]}
+ service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &docker_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ puppet_tags: ''
+ config_volume: 'neutron'
+ config_image: *docker_image
+ kolla_config: {}
+ docker_config: {}
diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml
new file mode 100644
index 00000000..26ae514a
--- /dev/null
+++ b/docker/services/nova-api.yaml
@@ -0,0 +1,134 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaApiImage:
+ description: image
+ default: 'centos-binary-nova-api:latest'
+ type: string
+ DockerNovaBaseImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NovaApiBase:
+ type: ../../puppet/services/nova-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Nova API role.
+ value:
+ service_name: {get_attr: [NovaApiBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NovaApiBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: {get_attr: [NovaApiBase, role_data, step_config]}
+ service_config_settings: {get_attr: [NovaApiBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &nova_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ]
+ puppet_tags: nova_config
+ config_volume: nova
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_api.json:
+ command: /usr/bin/nova-api
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ docker_config:
+ step_3:
+ nova_api_db_sync:
+ start_order: 1
+ image: *nova_api_image
+ net: host
+ detach: false
+ volumes: &nova_api_volumes
+ - /var/lib/config-data/nova/etc/:/etc/:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ command: ['/usr/bin/nova-manage', 'api_db', 'sync']
+ # FIXME: we probably want to wait on the 'cell_v2 update' in order for this
+ # to be capable of upgrading a baremetal setup. This is to ensure the name
+ # of the cell is 'default'
+ nova_api_map_cell0:
+ start_order: 2
+ image: *nova_api_image
+ net: host
+ detach: false
+ volumes: *nova_api_volumes
+ command:
+ - '/usr/bin/nova-manage'
+ - 'cell_v2'
+ - 'map_cell0'
+ nova_api_create_default_cell:
+ start_order: 3
+ image: *nova_api_image
+ net: host
+ detach: false
+ volumes: *nova_api_volumes
+ # NOTE: allowing the exit code 2 is a dirty way of making
+ # this idempotent (if the resource already exists a conflict
+ # is raised)
+ exit_codes: [0,2]
+ command:
+ - '/usr/bin/nova-manage'
+ - 'cell_v2'
+ - 'create_cell'
+ - '--name="default"'
+ nova_db_sync:
+ start_order: 4
+ image: *nova_api_image
+ net: host
+ detach: false
+ volumes: *nova_api_volumes
+ command: ['/usr/bin/nova-manage', 'db', 'sync']
+ step_4:
+ nova_api:
+ start_order: 2
+ image: *nova_api_image
+ net: host
+ user: nova
+ privileged: true
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/nova-compute.yaml b/docker/services/nova-compute.yaml
index 5c56aeee..570df95f 100644
--- a/docker/services/nova-compute.yaml
+++ b/docker/services/nova-compute.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
OpenStack containerized Nova Compute service
@@ -10,7 +10,7 @@ parameters:
type: string
DockerNovaComputeImage:
description: image
- default: 'centos-binary-nova-compute'
+ default: 'centos-binary-nova-compute:latest'
type: string
ServiceNetMap:
default: {}
@@ -29,41 +29,58 @@ parameters:
resources:
+
NovaComputeBase:
type: ../../puppet/services/nova-compute.yaml
properties:
EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
outputs:
role_data:
description: Role data for the Nova Compute service.
value:
+ service_name: {get_attr: [NovaComputeBase, role_data, service_name]}
config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]}
step_config: {get_attr: [NovaComputeBase, role_data, step_config]}
puppet_tags: nova_config,nova_paste_api_ini
+ docker_image: &nova_compute_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ config_volume: nova_libvirt
+ config_image: *nova_compute_image
+ kolla_config:
+ /var/lib/kolla/config_files/nova-compute.json:
+ command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ - dest: /etc/nova/rootwrap.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf
docker_config:
- step_1:
+ # FIXME: run discover hosts here
+ step_4:
novacompute:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ image: *nova_compute_image
net: host
privileged: true
user: root
restart: always
volumes:
- - /var/lib/etc-data/json-config/nova-compute.json:/var/lib/kolla/config_files/config.json
- - /var/lib/etc-data/nova/nova.conf:/var/lib/kolla/config_files/nova.conf:ro
- - /var/lib/etc-data/nova/rootwrap.conf:/var/lib/kolla/config_files/rootwrap.conf:ro
+ - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro
+ - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
- /etc/localtime:/etc/localtime:ro
- /lib/modules:/lib/modules:ro
- /run:/run
- - /dev:/dev
- - logs:/var/log/kolla/
- - /etc/iscsi:/etc/iscsi
+ - /var/lib/nova:/var/lib/nova
- libvirtd:/var/lib/libvirt
- - nova_compute:/var/lib/nova/
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- step_2: {}
diff --git a/docker/services/nova-conductor.yaml b/docker/services/nova-conductor.yaml
new file mode 100644
index 00000000..aa009b4f
--- /dev/null
+++ b/docker/services/nova-conductor.yaml
@@ -0,0 +1,85 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova Conductor service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaConductorImage:
+ description: image
+ default: 'centos-binary-nova-conductor:latest'
+ type: string
+ DockerNovaBaseImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+
+resources:
+
+ NovaConductorBase:
+ type: ../../puppet/services/nova-conductor.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Conductor service.
+ value:
+ service_name: {get_attr: [NovaConductorBase, role_data, service_name]}
+ config_settings: {get_attr: [NovaConductorBase, role_data, config_settings]}
+ step_config: {get_attr: [NovaConductorBase, role_data, step_config]}
+ service_config_settings: {get_attr: [NovaConductorBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &nova_conductor_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
+ puppet_tags: nova_config
+ config_volume: nova
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_conductor.json:
+ command: /usr/bin/nova-conductor
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ docker_config:
+ step_4:
+ nova_conductor:
+ image: *nova_conductor_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/nova-ironic.yaml b/docker/services/nova-ironic.yaml
new file mode 100644
index 00000000..c1858ded
--- /dev/null
+++ b/docker/services/nova-ironic.yaml
@@ -0,0 +1,88 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova Ironic Compute service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaComputeImage:
+ description: image
+ default: 'centos-binary-nova-compute-ironic:latest'
+ type: string
+ DockerNovaBaseImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+
+ NovaIronicBase:
+ type: ../../puppet/services/nova-ironic.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Compute service.
+ value:
+ service_name: {get_attr: [NovaIronicBase, role_data, service_name]}
+ config_settings: {get_attr: [NovaIronicBase, role_data, config_settings]}
+ step_config: {get_attr: [NovaIronicBase, role_data, step_config]}
+ puppet_tags: nova_config,nova_paste_api_ini
+ docker_image: &nova_ironic_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ config_volume: nova
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_ironic.json:
+ command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ - dest: /etc/nova/rootwrap.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf
+ docker_config:
+ step_5:
+ novacompute:
+ image: *nova_ironic_image
+ net: host
+ privileged: true
+ user: root
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova:/var/lib/kolla/config_files/src:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
+ - nova_compute:/var/lib/nova/
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml
index 36511557..d6e7dc76 100644
--- a/docker/services/nova-libvirt.yaml
+++ b/docker/services/nova-libvirt.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
OpenStack Libvirt Service
@@ -10,7 +10,13 @@ parameters:
type: string
DockerLibvirtImage:
description: image
- default: 'centos-binary-libvirt'
+ default: 'centos-binary-nova-libvirt:latest'
+ type: string
+ # we configure libvirt via the nova-compute container due to coupling
+ # in the puppet modules
+ DockerNovaComputeImage:
+ description: image
+ default: 'centos-binary-nova-compute:latest'
type: string
ServiceNetMap:
default: {}
@@ -33,37 +39,54 @@ resources:
type: ../../puppet/services/nova-libvirt.yaml
properties:
EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
outputs:
role_data:
description: Role data for the Libvirt service.
value:
+ service_name: {get_attr: [NovaLibvirtBase, role_data, service_name]}
config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]}
step_config: {get_attr: [NovaLibvirtBase, role_data, step_config]}
+ docker_image: &libvirt_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
puppet_tags: nova_config
+ config_volume: nova_libvirt
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova-libvirt.json:
+ command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
+ config_files:
+ - dest: /etc/libvirt/libvirtd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/libvirt/libvirtd.conf
docker_config:
- step_1:
+ step_3:
nova_libvirt:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
+ image: *libvirt_image
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/etc-data/json-config/nova-libvirt.json:/var/lib/kolla/config_files/config.json
- - /var/lib/etc-data/libvirt/libvirtd.conf:/var/lib/kolla/config_files/libvirtd.conf
+ - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro
+ - /dev:/dev
- /etc/localtime:/etc/localtime:ro
- /lib/modules:/lib/modules:ro
- /run:/run
- - /dev:/dev
- /sys/fs/cgroup:/sys/fs/cgroup
- - logs:/var/log/kolla/
+ - /var/lib/nova:/var/lib/nova
+ # Needed to use host's virtlogd
+ - /var/run/libvirt:/var/run/libvirt
- libvirtd:/var/lib/libvirt
- - nova_compute:/var/lib/nova/
- nova_libvirt_qemu:/etc/libvirt/qemu
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- step_2: {}
diff --git a/puppet/services/pacemaker/neutron-plugin-ml2.yaml b/docker/services/nova-metadata.yaml
index 234f116e..a4baaa27 100644
--- a/puppet/services/pacemaker/neutron-plugin-ml2.yaml
+++ b/docker/services/nova-metadata.yaml
@@ -1,9 +1,14 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- OpenStack Neutron ML2 Plugin with Pacemaker configured with Puppet
+ OpenStack containerized Nova Metadata service
parameters:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -13,30 +18,31 @@ parameters:
DefaultPasswords:
default: {}
type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
+
resources:
- NeutronMl2Base:
- type: ../neutron-plugin-ml2.yaml
+ NovaMetadataBase:
+ type: ../../puppet/services/nova-metadata.yaml
properties:
+ EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
- description: Role data for the Neutron ML2 plugin.
+ description: Role data for the Nova Metadata service.
value:
- service_name: neutron_plugin_ml2
+ service_name: {get_attr: [NovaMetadataBase, role_data, service_name]}
config_settings:
map_merge:
- - get_attr: [NeutronMl2Base, role_data, config_settings]
- - neutron::agents::ml2::ovs::enabled: false
- neutron::agents::ml2::ovs::manage_service: false
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::plugins::ml2
+ - get_attr: [NovaMetadataBase, role_data, config_settings]
+ step_config: {get_attr: [NovaMetadataBase, role_data, step_config]}
+ service_config_settings: {get_attr: [NovaMetadataBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: ''
+ puppet_tags: ''
+ config_volume: ''
+ config_image: ''
+ kolla_config: {}
+ docker_config: {}
diff --git a/docker/services/nova-placement.yaml b/docker/services/nova-placement.yaml
new file mode 100644
index 00000000..f0f7d724
--- /dev/null
+++ b/docker/services/nova-placement.yaml
@@ -0,0 +1,101 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova Placement API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaPlacementImage:
+ description: image
+ default: 'centos-binary-nova-placement-api'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NovaPlacementBase:
+ type: ../../puppet/services/nova-placement.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Placement API role.
+ value:
+ service_name: {get_attr: [NovaPlacementBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [NovaPlacementBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: {get_attr: [NovaPlacementBase, role_data, step_config]}
+ service_config_settings: {get_attr: [NovaPlacementBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &nova_placement_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
+ puppet_tags: nova_config
+ config_volume: nova_placement
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_placement.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ - dest: /etc/httpd/conf.d/10-placement_wsgi.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-placement_wsgi.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ - dest: /var/www/cgi-bin/nova/nova-placement-api
+ owner: nova
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/nova/nova-placement-api
+ docker_config:
+ # start this early so it is up before computes start reporting
+ step_3:
+ nova_placement:
+ start_order: 1
+ image: *nova_placement_image
+ net: host
+ user: root
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_placement/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/nova_placement/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/nova-scheduler.yaml b/docker/services/nova-scheduler.yaml
new file mode 100644
index 00000000..a1a98b48
--- /dev/null
+++ b/docker/services/nova-scheduler.yaml
@@ -0,0 +1,84 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Nova Scheduler service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaSchedulerImage:
+ description: image
+ default: 'centos-binary-nova-scheduler:latest'
+ type: string
+ DockerNovaBaseImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ NovaSchedulerBase:
+ type: ../../puppet/services/nova-scheduler.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Scheduler service.
+ value:
+ service_name: {get_attr: [NovaSchedulerBase, role_data, service_name]}
+ config_settings: {get_attr: [NovaSchedulerBase, role_data, config_settings]}
+ step_config: {get_attr: [NovaSchedulerBase, role_data, step_config]}
+ service_config_settings: {get_attr: [NovaSchedulerBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &nova_scheduler_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ]
+ puppet_tags: nova_config
+ config_volume: nova
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_scheduler.json:
+ command: /usr/bin/nova-scheduler
+ config_files:
+ - dest: /etc/nova/nova.conf
+ owner: nova
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ docker_config:
+ step_4:
+ nova_scheduler:
+ image: *nova_scheduler_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/rabbitmq.yaml b/docker/services/rabbitmq.yaml
new file mode 100644
index 00000000..cea3d8a7
--- /dev/null
+++ b/docker/services/rabbitmq.yaml
@@ -0,0 +1,119 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Rabbitmq service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerRabbitmqImage:
+ description: image
+ default: 'centos-binary-rabbitmq:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RabbitCookie:
+ type: string
+ default: ''
+ hidden: true
+
+resources:
+
+ RabbitmqBase:
+ type: ../../puppet/services/rabbitmq.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Rabbitmq API role.
+ value:
+ service_name: {get_attr: [RabbitmqBase, role_data, service_name]}
+ config_settings: {get_attr: [RabbitmqBase, role_data, config_settings]}
+ step_config: {get_attr: [RabbitmqBase, role_data, step_config]}
+ service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &rabbitmq_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
+ puppet_tags: file
+ config_volume: rabbitmq
+ config_image: *rabbitmq_image
+ kolla_config:
+ /var/lib/kolla/config_files/rabbitmq.json:
+ command: /usr/lib/rabbitmq/bin/rabbitmq-server
+ config_files:
+ - dest: /etc/rabbitmq/rabbitmq.config
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq.config
+ - dest: /etc/rabbitmq/enabled_plugins
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/rabbitmq/enabled_plugins
+ - dest: /etc/rabbitmq/rabbitmq-env.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq-env.conf
+ - dest: /etc/rabbitmq/rabbitmqadmin.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmqadmin.conf
+ docker_config:
+ step_1:
+ rabbitmq_bootstrap:
+ start_order: 0
+ image: *rabbitmq_image
+ net: host
+ privileged: false
+ volumes:
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - rabbitmq:/var/lib/rabbitmq/
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - KOLLA_BOOTSTRAP=True
+ -
+ list_join:
+ - '='
+ - - 'RABBITMQ_CLUSTER_COOKIE'
+ -
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: RabbitCookie}
+ - {get_param: [DefaultPasswords, rabbit_cookie]}
+ rabbitmq:
+ start_order: 1
+ image: *rabbitmq_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - rabbitmq:/var/lib/rabbitmq/
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/services.yaml b/docker/services/services.yaml
index 37e7b655..cd9f4cb5 100644
--- a/docker/services/services.yaml
+++ b/docker/services/services.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Utility stack to convert an array of services into a set of combined
@@ -66,8 +66,14 @@ outputs:
global_config_settings:
{get_attr: [PuppetServices, role_data, global_config_settings]}
step_config:
- {get_attr: [PuppetServices, role_data, step_config]}
- puppet_tags: {list_join: [",", {get_attr: [ServiceChain, role_data, puppet_tags]}]}
+ {get_attr: [ServiceChain, role_data, step_config]}
+ docker_image: {get_attr: [ServiceChain, role_data, docker_image]}
+ puppet_tags: {get_attr: [ServiceChain, role_data, puppet_tags]}
+ config_volume: {get_attr: [ServiceChain, role_data, config_volume]}
+ config_image: {get_attr: [ServiceChain, role_data, config_image]}
+ kolla_config:
+ map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
docker_config:
- step_1: {map_merge: {get_attr: [ServiceChain, role_data, docker_config, step_1]}}
- step_2: {map_merge: {get_attr: [ServiceChain, role_data, docker_config, step_2]}}
+ {get_attr: [ServiceChain, role_data, docker_config]}
+ docker_puppet_tasks:
+ {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
diff --git a/docker/services/swift-proxy.yaml b/docker/services/swift-proxy.yaml
new file mode 100644
index 00000000..09553319
--- /dev/null
+++ b/docker/services/swift-proxy.yaml
@@ -0,0 +1,76 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized swift proxy service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSwiftProxyImage:
+ description: image
+ default: 'centos-binary-swift-proxy-server:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ SwiftProxyBase:
+ type: ../../puppet/services/swift-proxy.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the swift proxy.
+ value:
+ service_name: {get_attr: [SwiftProxyBase, role_data, service_name]}
+ config_settings: {get_attr: [SwiftProxyBase, role_data, config_settings]}
+ step_config: {get_attr: [SwiftProxyBase, role_data, step_config]}
+ service_config_settings: {get_attr: [SwiftProxyBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &swift_proxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ puppet_tags: swift_proxy_config
+ config_volume: swift
+ config_image: *swift_proxy_image
+ kolla_config:
+ /var/lib/kolla/config_files/swift_proxy.json:
+ command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
+ docker_config:
+ step_4:
+ swift_proxy:
+ image: *swift_proxy_image
+ net: host
+ user: swift
+ restart: always
+ # I'm mounting /etc/swift as rw. Are the rings written to at all during runtime?
+ volumes:
+ - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/swift-ringbuilder.yaml b/docker/services/swift-ringbuilder.yaml
new file mode 100644
index 00000000..de91e7cf
--- /dev/null
+++ b/docker/services/swift-ringbuilder.yaml
@@ -0,0 +1,80 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Swift Ringbuilder
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSwiftProxyImage:
+ description: image
+ default: 'centos-binary-swift-proxy-server:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ SwiftMinPartHours:
+ type: number
+ default: 1
+ description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance.
+ SwiftPartPower:
+ default: 10
+ description: Partition Power to use when building Swift rings
+ type: number
+ SwiftRingBuild:
+ default: true
+ description: Whether to manage Swift rings or not
+ type: boolean
+ SwiftReplicas:
+ type: number
+ default: 3
+ description: How many replicas to use in the swift rings.
+ SwiftRawDisks:
+ default: {}
+ description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
+ type: json
+ SwiftUseLocalDir:
+ default: true
+ description: 'Use a local directory for Swift storage services when building rings'
+ type: boolean
+
+resources:
+
+ SwiftRingbuilderBase:
+ type: ../../puppet/services/swift-ringbuilder.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for Swift Ringbuilder configuration in containers.
+ value:
+ service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]}
+ config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+ step_config: {get_attr: [SwiftRingbuilderBase, role_data, step_config]}
+ service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
+ puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
+ # BEGIN DOCKER SETTINGS
+ docker_image: &docker_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ config_volume: 'swift'
+ config_image: *docker_image
+ kolla_config: {}
+ docker_config: {}
diff --git a/docker/services/swift-storage.yaml b/docker/services/swift-storage.yaml
new file mode 100644
index 00000000..5b2ec6e6
--- /dev/null
+++ b/docker/services/swift-storage.yaml
@@ -0,0 +1,343 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Swift Storage services.
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSwiftProxyImage:
+ description: image
+ default: 'centos-binary-swift-proxy-server:latest'
+ type: string
+ DockerSwiftAccountImage:
+ description: image
+ default: 'centos-binary-swift-account:latest'
+ type: string
+ DockerSwiftContainerImage:
+ description: image
+ default: 'centos-binary-swift-container:latest'
+ type: string
+ DockerSwiftObjectImage:
+ description: image
+ default: 'centos-binary-swift-object:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+
+resources:
+
+ SwiftStorageBase:
+ type: ../../puppet/services/swift-storage.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the swift storage services.
+ value:
+ service_name: {get_attr: [SwiftStorageBase, role_data, service_name]}
+ config_settings: {get_attr: [SwiftStorageBase, role_data, config_settings]}
+ step_config: {get_attr: [SwiftStorageBase, role_data, step_config]}
+ service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &swift_proxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
+ config_volume: swift
+ config_image: *swift_proxy_image
+ kolla_config:
+ /var/lib/kolla/config_files/swift_account_auditor.json:
+ command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf
+ /var/lib/kolla/config_files/swift_account_reaper.json:
+ command: /usr/bin/swift-account-reaper /etc/swift/account-server.conf
+ /var/lib/kolla/config_files/swift_account_replicator.json:
+ command: /usr/bin/swift-account-replicator /etc/swift/account-server.conf
+ /var/lib/kolla/config_files/swift_account_server.json:
+ command: /usr/bin/swift-account-server /etc/swift/account-server.conf
+ /var/lib/kolla/config_files/swift_container_auditor.json:
+ command: /usr/bin/swift-container-auditor /etc/swift/container-server.conf
+ /var/lib/kolla/config_files/swift_container_replicator.json:
+ command: /usr/bin/swift-container-replicator /etc/swift/container-server.conf
+ /var/lib/kolla/config_files/swift_container_updater.json:
+ command: /usr/bin/swift-container-updater /etc/swift/container-server.conf
+ /var/lib/kolla/config_files/swift_container_server.json:
+ command: /usr/bin/swift-container-server /etc/swift/container-server.conf
+ /var/lib/kolla/config_files/swift_object_auditor.json:
+ command: /usr/bin/swift-object-auditor /etc/swift/object-server.conf
+ /var/lib/kolla/config_files/swift_object_expirer.json:
+ command: /usr/bin/swift-object-expirer /etc/swift/object-expirer.conf
+ /var/lib/kolla/config_files/swift_object_replicator.json:
+ command: /usr/bin/swift-object-replicator /etc/swift/object-server.conf
+ /var/lib/kolla/config_files/swift_object_updater.json:
+ command: /usr/bin/swift-object-updater /etc/swift/object-server.conf
+ /var/lib/kolla/config_files/swift_object_server.json:
+ command: /usr/bin/swift-object-server /etc/swift/object-server.conf
+ docker_config:
+ step_3:
+ # The puppet config sets this up but we don't have a way to mount the named
+ # volume during the configuration stage. We just need to create this
+ # directory and make sure it's owned by swift.
+ swift_setup_srv:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir /srv/node && chown swift:swift /srv/node']
+ volumes:
+ - swift-srv:/srv
+ step_4:
+ swift_account_auditor:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: &kolla_env
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ swift_account_reaper:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_account_replicator:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_account_server:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_container_auditor:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_container_replicator:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_container_updater:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_container_server:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_object_auditor:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_object_expirer:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_object_replicator:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_object_updater:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
+ swift_object_server:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /run:/run
+ - swift-srv:/srv
+ - /dev:/dev
+ environment: *kolla_env
diff --git a/docker/services/zaqar.yaml b/docker/services/zaqar.yaml
new file mode 100644
index 00000000..9f248ce1
--- /dev/null
+++ b/docker/services/zaqar.yaml
@@ -0,0 +1,99 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Zaqar services
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerZaqarImage:
+ description: image
+ default: 'centos-binary-zaqar:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ZaqarBase:
+ type: ../../puppet/services/zaqar.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Zaqar API role.
+ value:
+ service_name: {get_attr: [ZaqarBase, role_data, service_name]}
+ config_settings: {get_attr: [ZaqarBase, role_data, config_settings]}
+ step_config: {get_attr: [ZaqarBase, role_data, step_config]}
+ service_config_settings: {get_attr: [ZaqarBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ docker_image: &zaqar_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
+ puppet_tags: zaqar_config
+ config_volume: zaqar
+ config_image: *zaqar_image
+ kolla_config:
+ /var/lib/kolla/config_files/zaqar.json:
+ command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf
+ config_files:
+ - dest: /etc/zaqar/zaqar.conf
+ owner: zaqar
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf
+ /var/lib/kolla/config_files/zaqar_websocket.json:
+ command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf --config-file /etc/zaqar/1.conf
+ config_files:
+ - dest: /etc/zaqar/zaqar.conf
+ owner: zaqar
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf
+ - dest: /etc/zaqar/1.conf
+ owner: zaqar
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/zaqar/1.conf
+ docker_config:
+ step_4:
+ zaqar:
+ image: *zaqar_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ zaqar_websocket:
+ image: *zaqar_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/environments/auditd.yaml b/environments/auditd.yaml
new file mode 100644
index 00000000..b358c98a
--- /dev/null
+++ b/environments/auditd.yaml
@@ -0,0 +1,119 @@
+resource_registry:
+ OS::TripleO::Services::AuditD: ../puppet/services/auditd.yaml
+
+parameter_defaults:
+ AuditdRules:
+ 'Record attempts to alter time through adjtimex':
+ content: '-a always,exit -F arch=b64 -S adjtimex -k audit_time_rules'
+ order : 1
+ 'Record attempts to alter time through settimeofday':
+ content: '-a always,exit -F arch=b64 -S settimeofday -k audit_time_rules'
+ order : 2
+ 'Record Attempts to Alter Time Through stime':
+ content: '-a always,exit -F arch=b64 -S stime -k audit_time_rules'
+ order : 3
+ 'Record Attempts to Alter Time Through clock_settime':
+ content: '-a always,exit -F arch=b64 -S clock_settime -k audit_time_rules'
+ order : 4
+ 'Record Attempts to Alter the localtime File':
+ content: '-w /etc/localtime -p wa -k audit_time_rules'
+ order : 5
+ 'Record Events that Modify the Systems Discretionary Access Controls - chmod':
+ content: '-a always,exit -F arch=b64 -S chmod -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 5
+ 'Record Events that Modify the Systems Discretionary Access Controls - chown':
+ content: '-a always,exit -F arch=b64 -S chown -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 6
+ 'Record Events that Modify the Systems Discretionary Access Controls - fchmod':
+ content: '-a always,exit -F arch=b64 -S fchmod -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 7
+ 'Record Events that Modify the Systems Discretionary Access Controls - fchmodat':
+ content: '-a always,exit -F arch=b64 -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 8
+ 'Record Events that Modify the Systems Discretionary Access Controls - fchown':
+ content: '-a always,exit -F arch=b64 -S fchown -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 9
+ 'Record Events that Modify the Systems Discretionary Access Controls - fchownat':
+ content: '-a always,exit -F arch=b64 -S fchownat -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 10
+ 'Record Events that Modify the Systems Discretionary Access Controls - fremovexattr':
+ content: '-a always,exit -F arch=b64 -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 11
+ 'Record Events that Modify the Systems Discretionary Access Controls - fsetxattr':
+ content: '-a always,exit -F arch=b64 -S fsetxattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 12
+ 'Record Events that Modify the Systems Discretionary Access Controls - lchown':
+ content: '-a always,exit -F arch=b64 -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 13
+ 'Record Events that Modify the Systems Discretionary Access Controls - lremovexattr':
+ content: '-a always,exit -F arch=b64 -S lremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 14
+ 'Record Events that Modify the Systems Discretionary Access Controls - lsetxattr':
+ content: '-a always,exit -F arch=b64 -S lsetxattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 15
+ 'Record Events that Modify the Systems Discretionary Access Controls - removexattr':
+ content: '-a always,exit -F arch=b64 -S removexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 16
+ 'Record Events that Modify the Systems Discretionary Access Controls - setxattr':
+ content: '-a always,exit -F arch=b64 -S setxattr -F auid>=1000 -F auid!=4294967295 -k perm_mod'
+ order : 17
+ 'Record Events that Modify User/Group Information - /etc/group':
+ content: '-w /etc/group -p wa -k audit_rules_usergroup_modification'
+ order : 18
+ 'Record Events that Modify User/Group Information - /etc/passwd':
+ content: '-w /etc/passwd -p wa -k audit_rules_usergroup_modification'
+ order : 19
+ 'Record Events that Modify User/Group Information - /etc/gshadow':
+ content: '-w /etc/gshadow -p wa -k audit_rules_usergroup_modification'
+ order : 20
+ 'Record Events that Modify User/Group Information - /etc/shadow':
+ content: '-w /etc/shadow -p wa -k audit_rules_usergroup_modification'
+ order : 21
+ 'Record Events that Modify User/Group Information - /etc/opasswd':
+ content: '-w /etc/opasswd -p wa -k audit_rules_usergroup_modification'
+ order : 22
+ 'Record Events that Modify the Systems Network Environment - sethostname / setdomainname':
+ content: '-a always,exit -F arch=b64 -S sethostname -S setdomainname -k audit_rules_networkconfig_modification'
+ order : 23
+ 'Record Events that Modify the Systems Network Environment - /etc/issue':
+ content: '-w /etc/issue -p wa -k audit_rules_networkconfig_modification'
+ order : 24
+ 'Record Events that Modify the Systems Network Environment - /etc/issue.net':
+ content: '-w /etc/issue.net -p wa -k audit_rules_networkconfig_modification'
+ order : 25
+ 'Record Events that Modify the Systems Network Environment - /etc/hosts':
+ content: '-w /etc/hosts -p wa -k audit_rules_networkconfig_modification'
+ order : 26
+ 'Record Events that Modify the Systems Network Environment - /etc/sysconfig/network':
+ content: '-w /etc/sysconfig/network -p wa -k audit_rules_networkconfig_modification'
+ order : 27
+ 'Record Events that Modify the Systems Mandatory Access Controls':
+ content: '-w /etc/selinux/ -p wa -k MAC-policy'
+ order : 28
+ 'Ensure auditd Collects Unauthorized Access Attempts to Files (unsuccessful / EACCES)':
+ content: '-a always,exit -F arch=b64 -S creat -S open -S openat -S open_by_handle_at -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access'
+ order : 29
+ 'Ensure auditd Collects Unauthorized Access Attempts to Files (unsuccessful / EPERM)':
+ content: '-a always,exit -F arch=b64 -S creat -S open -S openat -S open_by_handle_at -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access'
+ order : 30
+ 'Ensure auditd Collects Information on the Use of Privileged Commands':
+ content: '-a always,exit -F path=SETUID_PROG_PATH -F perm=x -F auid>=1000 -F auid!=4294967295 -k privileged'
+ order : 31
+ 'Ensure auditd Collects Information on Exporting to Media (successful)':
+ content: '-a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k export'
+ order : 32
+ 'Ensure auditd Collects File Deletion Events by User':
+ content: '-a always,exit -F arch=b64 -S rmdir -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete'
+ order : 33
+ 'Ensure auditd Collects System Administrator Actions':
+ content: '-w /etc/sudoers -p wa -k actions'
+ order : 34
+ 'Ensure auditd Collects Information on Kernel Module Loading and Unloading (insmod)':
+ content: '-w /usr/sbin/insmod -p x -k modules'
+ order : 35
+ 'Ensure auditd Collects Information on Kernel Module Loading and Unloading (rmmod)':
+ content: '-w /usr/sbin/rmmod -p x -k modules'
+ order : 36
+ 'Ensure auditd Collects Information on Kernel Module Loading and Unloading (modprobe)':
+ content: '-w /usr/sbin/modprobe -p x -k modules'
+ order : 37
diff --git a/environments/cinder-dellps-config.yaml b/environments/cinder-dellps-config.yaml
new file mode 100644
index 00000000..eefd0fd6
--- /dev/null
+++ b/environments/cinder-dellps-config.yaml
@@ -0,0 +1,31 @@
+# Copyright (c) 2016-2017 Dell Inc, or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# A Heat environment file which can be used to enable a
+# a Cinder Dell EMC PS Series backend, configured via puppet
+resource_registry:
+ OS::TripleO::Services::CinderBackendDellPs: ../puppet/services/cinder-backend-dellps.yaml
+
+parameter_defaults:
+ CinderEnableDellPsBackend: true
+ CinderDellPsBackendName: 'tripleo_dellps'
+ CinderDellPsSanIp: ''
+ CinderDellPsSanLogin: ''
+ CinderDellPsSanPassword: ''
+ CinderDellPsSanThinProvision: true
+ CinderDellPsGroupname: 'group-0'
+ CinderDellPsPool: 'default'
+ CinderDellPsChapLogin: ''
+ CinderDellPsChapPassword: ''
+ CinderDellPsUseChap: false
diff --git a/environments/cinder-dellsc-config.yaml b/environments/cinder-dellsc-config.yaml
index 92e257d4..617d640c 100644
--- a/environments/cinder-dellsc-config.yaml
+++ b/environments/cinder-dellsc-config.yaml
@@ -1,7 +1,7 @@
# A Heat environment file which can be used to enable a
-# a Cinder Dell Storage Center ISCSI backend, configured via puppet
+# Cinder Dell EMC Storage Center ISCSI backend, configured via puppet
resource_registry:
- OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
+ OS::TripleO::Services::CinderBackendDellSc: ../puppet/services/cinder-backend-dellsc.yaml
parameter_defaults:
CinderEnableDellScBackend: true
diff --git a/environments/cinder-eqlx-config.yaml b/environments/cinder-eqlx-config.yaml
deleted file mode 100644
index ca2c5e5a..00000000
--- a/environments/cinder-eqlx-config.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-# A Heat environment file which can be used to enable a
-# a Cinder eqlx backen, configured via puppet
-resource_registry:
- OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
-
-parameter_defaults:
- CinderEnableEqlxBackend: true
- CinderEqlxBackendName: 'tripleo_eqlx'
- CinderEqlxSanIp: ''
- CinderEqlxSanLogin: ''
- CinderEqlxSanPassword: ''
- CinderEqlxSanThinProvision: true
- CinderEqlxGroupname: 'group-0'
- CinderEqlxPool: 'default'
- CinderEqlxChapLogin: ''
- CinderEqlxChapPassword: ''
- CinderEqlxUseChap: false
diff --git a/environments/cinder-hpelefthand-config.yaml b/environments/cinder-hpelefthand-config.yaml
new file mode 100644
index 00000000..90d0261e
--- /dev/null
+++ b/environments/cinder-hpelefthand-config.yaml
@@ -0,0 +1,13 @@
+# A Heat environment file which can be used to enable a
+# a Cinder HPELeftHandISCSI backend, configured via puppet
+resource_registry:
+ OS::TripleO::Services::CinderHPELeftHandISCSI: ../puppet/services/cinder-hpelefthand-iscsi.yaml
+
+parameter_defaults:
+ CinderHPELeftHandISCSIApiUrl: ''
+ CinderHPELeftHandISCSIUserName: ''
+ CinderHPELeftHandISCSIPassword: ''
+ CinderHPELeftHandISCSIBackendName: 'tripleo_hpelefthand'
+ CinderHPELeftHandISCSIChapEnabled: false
+ CinderHPELeftHandClusterName: ''
+ CinderHPELeftHandDebug: false
diff --git a/environments/cinder-iser.yaml b/environments/cinder-iser.yaml
new file mode 100644
index 00000000..5eae7c04
--- /dev/null
+++ b/environments/cinder-iser.yaml
@@ -0,0 +1,19 @@
+parameter_defaults:
+
+ ## Whether to enable iscsi backend for Cinder.
+ CinderEnableIscsiBackend: true
+ CinderISCSIProtocol: 'iser'
+ CinderISCSIHelper: 'lioadm'
+
+ ## Whether to enable rbd (Ceph) backend for Cinder.
+ CinderEnableRbdBackend: false
+
+ ## Whether to enable NFS backend for Cinder.
+ CinderEnableNfsBackend: false
+
+ ## Whether to enable rbd (Ceph) backend for Nova ephemeral storage.
+ NovaEnableRbdBackend: false
+
+ ## Glance backend can be either 'rbd' (Ceph), 'swift' or 'file'.
+ ## GlanceBackend: swift
+
diff --git a/environments/cinder-scaleio-config.yaml b/environments/cinder-scaleio-config.yaml
new file mode 100644
index 00000000..cebd619c
--- /dev/null
+++ b/environments/cinder-scaleio-config.yaml
@@ -0,0 +1,35 @@
+# Copyright (c) 2016-2017 Dell Inc, or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# A Heat environment file which can be used to enable a
+# a Cinder Dell EMC SacleIO backend, configured via puppet
+resource_registry:
+ OS::TripleO::Services::CinderBackendScaleIO: ../puppet/services/cinder-backend-scaleio.yaml
+
+parameter_defaults:
+ CinderEnableScaleIOBackend: true
+ CinderScaleIOBackendName: 'tripleo_scaleio'
+ CinderScaleIOSanIp: ''
+ CinderScaleIOSanLogin: ''
+ CinderScaleIOSanPassword: ''
+ CinderScaleIORestServerPort: '443'
+ CinderScaleIOVerifyServerCertificate: false
+ CinderScaleIOServerCertificatePath: ''
+ CinderScaleIOProtectionDomainName: 'domain1'
+ CinderScaleIOStoragePoolName: 'pool1'
+ CinderScaleIOStoragePools: 'domain1:pool1'
+ CinderScaleIORoundVolumeCapacity: true
+ CinderScaleIOUnmapVolumeBeforeDeletion: false
+ CinderScaleIOMaxOverSubscriptionRatio: ''
+ CinderScaleIOSanThinProvision: true
diff --git a/environments/collectd-environment.yaml b/environments/collectd-environment.yaml
new file mode 100644
index 00000000..7780530c
--- /dev/null
+++ b/environments/collectd-environment.yaml
@@ -0,0 +1,23 @@
+resource_registry:
+ OS::TripleO::Services::Collectd: ../puppet/services/metrics/collectd.yaml
+
+# parameter_defaults:
+#
+## You can specify additional plugins to load using the
+## CollectdExtraPlugins key:
+#
+# CollectdExtraPlugins:
+# - disk
+# - df
+#
+## You can use ExtraConfig (or one of the related *ExtraConfig keys)
+## to configure collectd. See the documentation for puppet-collectd at
+## https://github.com/voxpupuli/puppet-collectd for details.
+#
+# ExtraConfig:
+# collectd::plugin::disk::disks:
+# - "/^[vhs]d[a-f][0-9]?$/"
+# collectd::plugin::df::mountpoints:
+# - "/"
+# collectd::plugin::df::ignoreselected: false
+# collectd::plugin::cpu::valuespercentage: true
diff --git a/environments/contrail/contrail-net.yaml b/environments/contrail/contrail-net.yaml
new file mode 100644
index 00000000..1e64f91d
--- /dev/null
+++ b/environments/contrail/contrail-net.yaml
@@ -0,0 +1,26 @@
+resource_registry:
+ OS::TripleO::Compute::Net::SoftwareConfig: contrail-nic-config-compute.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: contrail-nic-config.yaml
+ OS::TripleO::ContrailController::Net::SoftwareConfig: contrail-nic-config.yaml
+ OS::TripleO::ContrailAnalytics::Net::SoftwareConfig: contrail-nic-config.yaml
+ OS::TripleO::ContrailAnalyticsDatabase::Net::SoftwareConfig: contrail-nic-config.yaml
+ OS::TripleO::ContrailTsn::Net::SoftwareConfig: contrail-nic-config-compute.yaml
+
+parameter_defaults:
+ ControlPlaneSubnetCidr: '24'
+ ControlPlaneDefaultRoute: 192.0.2.254
+ InternalApiNetCidr: 10.0.0.0/24
+ InternalApiAllocationPools: [{'start': '10.0.0.10', 'end': '10.0.0.200'}]
+ InternalApiDefaultRoute: 10.0.0.1
+ ManagementNetCidr: 10.1.0.0/24
+ ManagementAllocationPools: [{'start': '10.1.0.10', 'end': '10.1.0.200'}]
+ ManagementInterfaceDefaultRoute: 10.1.0.1
+ ExternalNetCidr: 10.2.0.0/24
+ ExternalAllocationPools: [{'start': '10.2.0.10', 'end': '10.2.0.200'}]
+ EC2MetadataIp: 192.0.2.1 # Generally the IP of the Undercloud
+ DnsServers: ["8.8.8.8","8.8.4.4"]
+ VrouterPhysicalInterface: eth1
+ VrouterGateway: 10.0.0.1
+ VrouterNetmask: 255.255.255.0
+ ControlVirtualInterface: eth0
+ PublicVirtualInterface: vlan10
diff --git a/environments/contrail/contrail-nic-config-compute.yaml b/environments/contrail/contrail-nic-config-compute.yaml
new file mode 100644
index 00000000..3007638a
--- /dev/null
+++ b/environments/contrail/contrail-nic-config-compute.yaml
@@ -0,0 +1,167 @@
+heat_template_version: ocata
+
+description: >
+ Software Config to drive os-net-config to configure multiple interfaces
+ for the compute role. This is an example for a Nova compute node using
+ Contrail vrouter and the vhost0 interface.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ InternalApiDefaultRoute: # Not used by default in this template
+ default: '10.0.0.1'
+ description: The default route of the internal api network.
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ ExternalInterfaceDefaultRoute: # Not used by default in this template
+ default: '10.0.0.1'
+ description: The default route of the external network.
+ type: string
+ ManagementInterfaceDefaultRoute: # Commented out by default in this template
+ default: unset
+ description: The default route of the management network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
+ list_join:
+ - '/'
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - type: interface
+ name: nic2
+ use_dhcp: false
+ - type: interface
+ name: vhost0
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ routes:
+ - default: true
+ next_hop:
+ get_param: InternalApiDefaultRoute
+ - type: linux_bridge
+ name: br0
+ use_dhcp: false
+ members:
+ - type: interface
+ name: nic3
+ - type: vlan
+ vlan_id:
+ get_param: ManagementNetworkVlanID
+ device: br0
+ addresses:
+ - ip_netmask:
+ get_param: ManagementIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: ExternalNetworkVlanID
+ device: br0
+ addresses:
+ - ip_netmask:
+ get_param: ExternalIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageNetworkVlanID
+ device: br0
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: vlan
+ vlan_id:
+ get_param: StorageMgmtNetworkVlanID
+ device: br0
+ addresses:
+ - ip_netmask:
+ get_param: StorageMgmtIpSubnet
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value:
+ get_resource: OsNetConfigImpl
diff --git a/environments/contrail/contrail-services.yaml b/environments/contrail/contrail-services.yaml
new file mode 100644
index 00000000..80ef9d3a
--- /dev/null
+++ b/environments/contrail/contrail-services.yaml
@@ -0,0 +1,45 @@
+# A Heat environment file which can be used to enable OpenContrail
+# # extensions, configured via puppet
+resource_registry:
+ OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginContrail
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::TripleO::Services::ComputeNeutronCorePluginContrail
+ OS::TripleO::NodeUserData: ../../firstboot/install_vrouter_kmod.yaml
+ OS::TripleO::Services::ContrailHeat: ../../puppet/services/network/contrail-heat.yaml
+ OS::TripleO::Services::ContrailAnalytics: ../../puppet/services/network/contrail-analytics.yaml
+ OS::TripleO::Services::ContrailAnalyticsDatabase: ../../puppet/services/network/contrail-analytics-database.yaml
+ OS::TripleO::Services::ContrailConfig: ../../puppet/services/network/contrail-config.yaml
+ OS::TripleO::Services::ContrailControl: ../../puppet/services/network/contrail-control.yaml
+ OS::TripleO::Services::ContrailDatabase: ../../puppet/services/network/contrail-database.yaml
+ OS::TripleO::Services::ContrailWebUI: ../../puppet/services/network/contrail-webui.yaml
+ OS::TripleO::Services::ContrailTsn: ../../puppet/services/network/contrail-tsn.yaml
+ OS::TripleO::Services::ComputeNeutronCorePluginContrail: ../../puppet/services/network/contrail-vrouter.yaml
+ OS::TripleO::Services::NeutronCorePluginContrail: ../../puppet/services/network/contrail-neutron-plugin.yaml
+parameter_defaults:
+ ContrailRepo: http://192.168.24.1/contrail-3.2.0.0-19
+ EnablePackageInstall: true
+# ContrailConfigIfmapUserName: api-server
+# ContrailConfigIfmapUserPassword: api-server
+ OvercloudControlFlavor: control
+ OvercloudContrailControllerFlavor: contrail-controller
+ OvercloudContrailAnalyticsFlavor: contrail-analytics
+ OvercloudContrailAnalyticsDatabaseFlavor: contrail-analytics-database
+ OvercloudContrailTsnFlavor: contrail-tsn
+ OvercloudComputeFlavor: compute
+ ControllerCount: 3
+ ContrailControllerCount: 3
+ ContrailAnalyticsCount: 3
+ ContrailAnalyticsDatabaseCount: 3
+ ContrailTsnCount: 1
+ ComputeCount: 3
+ DnsServers: ["8.8.8.8","8.8.4.4"]
+ NtpServer: 10.0.0.1
+ NeutronCorePlugin: neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2
+ NeutronServicePlugins: ''
+ NeutronTunnelTypes: ''
+# NeutronMetadataProxySharedSecret:
+# ContrailControlRNDCSecret: # sda1/256 hmac key, e.g. echo -n "values" | openssl dgst -sha256 -hmac key -binary | base64
diff --git a/environments/contrail/roles_data_contrail.yaml b/environments/contrail/roles_data_contrail.yaml
new file mode 100644
index 00000000..5f6c4691
--- /dev/null
+++ b/environments/contrail/roles_data_contrail.yaml
@@ -0,0 +1,237 @@
+# Specifies which roles (groups of nodes) will be deployed
+# Note this is used as an input to the various *.j2.yaml
+# jinja2 templates, so that they are converted into *.yaml
+# during the plan creation (via a mistral action/workflow).
+#
+# The format is a list, with the following format:
+#
+# * name: (string) mandatory, name of the role, must be unique
+#
+# CountDefault: (number) optional, default number of nodes, defaults to 0
+# sets the default for the {{role.name}}Count parameter in overcloud.yaml
+#
+# HostnameFormatDefault: (string) optional default format string for hostname
+# defaults to '%stackname%-{{role.name.lower()}}-%index%'
+# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
+#
+# disable_constraints: (boolean) optional, whether to disable Nova and Glance
+# constraints for each role specified in the templates.
+#
+# upgrade_batch_size: (number): batch size for upgrades where tasks are
+# specified by services to run in batches vs all nodes at once.
+# This defaults to 1, but larger batches may be specified here.
+#
+# ServicesDefault: (list) optional default list of services to be deployed
+# on the role, defaults to an empty list. Sets the default for the
+# {{role.name}}Services parameter in overcloud.yaml
+
+- name: Controller # the 'primary' role goes first
+ CountDefault: 1
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephMds
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::CephRbdMirror
+ - OS::TripleO::Services::CephRgw
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderBackup
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::ContrailHeat
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::NeutronApi
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::Redis
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::MongoDb
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaConsoleauth
+ - OS::TripleO::Services::NovaVncProxy
+ - OS::TripleO::Services::Ec2Api
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::CeilometerApi
+ - OS::TripleO::Services::CeilometerCollector
+ - OS::TripleO::Services::CeilometerExpirer
+ - OS::TripleO::Services::CeilometerAgentCentral
+ - OS::TripleO::Services::CeilometerAgentNotification
+ - OS::TripleO::Services::Horizon
+ - OS::TripleO::Services::GnocchiApi
+ - OS::TripleO::Services::GnocchiMetricd
+ - OS::TripleO::Services::GnocchiStatsd
+ - OS::TripleO::Services::ManilaApi
+ - OS::TripleO::Services::ManilaScheduler
+ - OS::TripleO::Services::ManilaBackendGeneric
+ - OS::TripleO::Services::ManilaBackendNetapp
+ - OS::TripleO::Services::ManilaBackendCephFs
+ - OS::TripleO::Services::ManilaShare
+ - OS::TripleO::Services::AodhApi
+ - OS::TripleO::Services::AodhEvaluator
+ - OS::TripleO::Services::AodhNotifier
+ - OS::TripleO::Services::AodhListener
+ - OS::TripleO::Services::SaharaApi
+ - OS::TripleO::Services::SaharaEngine
+ - OS::TripleO::Services::IronicApi
+ - OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::NovaIronic
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::OpenDaylightApi
+ - OS::TripleO::Services::OpenDaylightOvs
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::BarbicanApi
+ - OS::TripleO::Services::PankoApi
+ - OS::TripleO::Services::Zaqar
+ - OS::TripleO::Services::OVNDBs
+ - OS::TripleO::Services::CinderHPELeftHandISCSI
+ - OS::TripleO::Services::Etcd
+ - OS::TripleO::Services::AuditD
+
+- name: Compute
+ CountDefault: 1
+ HostnameFormatDefault: '%stackname%-novacompute-%index%'
+ disable_upgrade_deployment: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephClient
+ - OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::ComputeNeutronCorePlugin
+ - OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::ComputeCeilometerAgent
+ - OS::TripleO::Services::ComputeNeutronL3Agent
+ - OS::TripleO::Services::ComputeNeutronMetadataAgent
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::NeutronSriovAgent
+ - OS::TripleO::Services::OpenDaylightOvs
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::AuditD
+
+- name: BlockStorage
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::BlockStorageCinderVolume
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::AuditD
+
+- name: ObjectStorage
+ disable_upgrade_deployment: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::AuditD
+
+- name: CephStorage
+ disable_upgrade_deployment: True
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::AuditD
+
+- name: ContrailController
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::ContrailConfig
+ - OS::TripleO::Services::ContrailControl
+ - OS::TripleO::Services::ContrailDatabase
+ - OS::TripleO::Services::ContrailWebUI
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+
+- name: ContrailAnalytics
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::ContrailAnalytics
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+
+- name: ContrailAnalyticsDatabase
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::ContrailAnalyticsDatabase
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
+
+- name: ContrailTsn
+ ServicesDefault:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::ContrailTsn
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::FluentdClient
diff --git a/environments/deployed-server-bootstrap-environment-centos.yaml b/environments/deployed-server-bootstrap-environment-centos.yaml
new file mode 100644
index 00000000..ebcdfc2b
--- /dev/null
+++ b/environments/deployed-server-bootstrap-environment-centos.yaml
@@ -0,0 +1,7 @@
+# An environment that can be used with the deployed-server.yaml template to do
+# initial bootstrapping of the deployed servers.
+resource_registry:
+ OS::TripleO::DeployedServer::Bootstrap: ../deployed-server/deployed-server-bootstrap-centos.yaml
+
+parameter_defaults:
+ EnablePackageInstall: True
diff --git a/environments/deployed-server-bootstrap-environment-rhel.yaml b/environments/deployed-server-bootstrap-environment-rhel.yaml
new file mode 100644
index 00000000..f614a91a
--- /dev/null
+++ b/environments/deployed-server-bootstrap-environment-rhel.yaml
@@ -0,0 +1,7 @@
+# An environment that can be used with the deployed-server.yaml template to do
+# initial bootstrapping of the deployed servers.
+resource_registry:
+ OS::TripleO::DeployedServer::Bootstrap: ../deployed-server/deployed-server-bootstrap-rhel.yaml
+
+parameter_defaults:
+ EnablePackageInstall: True
diff --git a/environments/deployed-server-environment.yaml b/environments/deployed-server-environment.yaml
index c63d399a..7bc1bd9b 100644
--- a/environments/deployed-server-environment.yaml
+++ b/environments/deployed-server-environment.yaml
@@ -1,4 +1,4 @@
resource_registry:
OS::TripleO::Server: ../deployed-server/deployed-server.yaml
- OS::TripleO::DeployedServerConfig: ../deployed-server/deployed-server-config.yaml
- OS::TripleO::DeployedServer::ControlPlanePort: ../deployed-server/ctlplane-port.yaml
+ OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
+ OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
diff --git a/environments/deployed-server-noop-ctlplane.yaml b/environments/deployed-server-noop-ctlplane.yaml
index cfda314d..8835d5b1 100644
--- a/environments/deployed-server-noop-ctlplane.yaml
+++ b/environments/deployed-server-noop-ctlplane.yaml
@@ -1,4 +1,4 @@
resource_registry:
+ OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
OS::TripleO::Server: ../deployed-server/deployed-server.yaml
- OS::TripleO::DeployedServerConfig: ../deployed-server/deployed-server-config.yaml
- OS::TripleO::DeployedServer::ControlPlanePort: OS::Heat::None
+ OS::TripleO::DeployedServer::ControlPlanePort: ../deployed-server/deployed-neutron-port.yaml
diff --git a/environments/deployed-server-pacemaker-environment.yaml b/environments/deployed-server-pacemaker-environment.yaml
new file mode 100644
index 00000000..85fa7d2f
--- /dev/null
+++ b/environments/deployed-server-pacemaker-environment.yaml
@@ -0,0 +1,4 @@
+resource_registry:
+ OS::TripleO::Tasks::ControllerDeployedServerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerDeployedServerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerDeployedServerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
diff --git a/environments/docker-network-isolation.yaml b/environments/docker-network-isolation.yaml
deleted file mode 100644
index 87c81d0b..00000000
--- a/environments/docker-network-isolation.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-parameter_defaults:
- NeutronOpenvswitchAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini"
- NeutronOpenvswitchAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro"
- NeutronOpenvswitchAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro"
diff --git a/environments/docker.yaml b/environments/docker.yaml
index 0755c61f..ca3715b4 100644
--- a/environments/docker.yaml
+++ b/environments/docker.yaml
@@ -1,31 +1,49 @@
resource_registry:
- # Docker container with heat agents for containerized compute node.
- OS::TripleO::NodeUserData: ../docker/firstboot/install_docker_agents.yaml
+ OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+
+ #NOTE (dprince) add roles to be docker enabled as we support them
OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
OS::TripleO::Services::NovaCompute: ../docker/services/nova-compute.yaml
- # NOTE (dprince) here we set new roles to be docker enabled as we add support
- #OS::TripleO::ComputePostDeploySteps: ../docker/post.yaml
- # NOTE (mandre) Defining per role post deploy steps doesn't work yet
- # Set a global PostDeploySteps that works for both containerized and
- # non-containerized roles
+ OS::TripleO::Services::Keystone: ../docker/services/keystone.yaml
+ OS::TripleO::Services::GlanceApi: ../docker/services/glance-api.yaml
+ OS::TripleO::Services::HeatApi: ../docker/services/heat-api.yaml
+ OS::TripleO::Services::HeatApiCfn: ../docker/services/heat-api-cfn.yaml
+ OS::TripleO::Services::HeatEngine: ../docker/services/heat-engine.yaml
+ OS::TripleO::Services::NovaApi: ../docker/services/nova-api.yaml
+ OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml
+ OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml
+ OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml
+ # FIXME: these need to go into a environments/services-docker dir?
+ OS::TripleO::Services::NovaIronic: ../docker/services/nova-ironic.yaml
+ OS::TripleO::Services::IronicApi: ../docker/services/ironic-api.yaml
+ OS::TripleO::Services::IronicConductor: ../docker/services/ironic-conductor.yaml
+ OS::TripleO::Services::IronicPxe: ../docker/services/ironic-pxe.yaml
+ OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
+ OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
+ OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
+ OS::TripleO::Services::NeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
+ OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
+ OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
+ OS::TripleO::Services::MistralApi: ../docker/services/mistral-api.yaml
+ OS::TripleO::Services::MistralEngine: ../docker/services/mistral-engine.yaml
+ OS::TripleO::Services::MistralExecutor: ../docker/services/mistral-executor.yaml
+ OS::TripleO::Services::Zaqar: ../docker/services/zaqar.yaml
+ OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
+ OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
+ OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml
+ OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
+ OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml
+ OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml
+
OS::TripleO::PostDeploySteps: ../docker/post.yaml
OS::TripleO::Services: ../docker/services/services.yaml
parameter_defaults:
- NovaImage: atomic-image
# Defaults to 'tripleoupstream'. Specify a local docker registry
- # Example: 192.0.2.1:8787/tripleoupstream
+ # Example: 192.168.24.1:8787/tripleoupstream
DockerNamespace: tripleoupstream
- # Enable local Docker registry
DockerNamespaceIsRegistry: false
- DockerAgentImage: heat-docker-agents:newton
- # Docker containers
- DockerNovaComputeImage: centos-binary-nova-compute:newton
- DockerLibvirtImage: centos-binary-nova-libvirt:newton
- DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:newton
- DockerOvsVswitchdImage: centos-binary-openvswitch-vswitchd:newton
- DockerOpenvswitchDBImage: centos-binary-openvswitch-db-server:newton
ComputeServices:
- OS::TripleO::Services::NovaCompute
diff --git a/environments/enable-internal-tls.yaml b/environments/enable-internal-tls.yaml
index c01b4888..ff4ecfbe 100644
--- a/environments/enable-internal-tls.yaml
+++ b/environments/enable-internal-tls.yaml
@@ -2,6 +2,18 @@
# a TLS for in the internal network via certmonger
parameter_defaults:
EnableInternalTLS: true
+
+ # Required for novajoin to enroll the overcloud nodes
+ ServerMetadata:
+ ipa_enroll: True
+
resource_registry:
+ OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml
OS::TripleO::Services::ApacheTLS: ../puppet/services/apache-internal-tls-certmonger.yaml
OS::TripleO::Services::MySQLTLS: ../puppet/services/database/mysql-internal-tls-certmonger.yaml
+ # We use apache as a TLS proxy
+ OS::TripleO::Services::TLSProxyBase: ../puppet/services/apache.yaml
+
+ # Creates nova metadata that will create the extra service principals per
+ # node.
+ OS::TripleO::ServiceServerMetadataHook: ../extraconfig/nova_metadata/krb-service-principals.yaml
diff --git a/environments/enable-swap-partition.yaml b/environments/enable-swap-partition.yaml
new file mode 100644
index 00000000..71b70ec9
--- /dev/null
+++ b/environments/enable-swap-partition.yaml
@@ -0,0 +1,3 @@
+# Use this environment to create a swap partition in all Overcloud nodes
+resource_registry:
+ OS::TripleO::AllNodesExtraConfig: ../extraconfig/all_nodes/swap-partition.yaml
diff --git a/environments/enable-swap.yaml b/environments/enable-swap.yaml
new file mode 100644
index 00000000..9ba08642
--- /dev/null
+++ b/environments/enable-swap.yaml
@@ -0,0 +1,3 @@
+# Use this environment to create a swap file in all Overcloud nodes
+resource_registry:
+ OS::TripleO::AllNodesExtraConfig: ../extraconfig/all_nodes/swap.yaml
diff --git a/environments/enable_congress.yaml b/environments/enable_congress.yaml
new file mode 100644
index 00000000..1eea7f5e
--- /dev/null
+++ b/environments/enable_congress.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::Congress: ../puppet/services/congress.yaml
diff --git a/environments/enable_tacker.yaml b/environments/enable_tacker.yaml
new file mode 100644
index 00000000..1f9eca01
--- /dev/null
+++ b/environments/enable_tacker.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::Tacker: ../puppet/services/tacker.yaml
diff --git a/environments/external-loadbalancer-vip-v6.yaml b/environments/external-loadbalancer-vip-v6.yaml
index 5a2ef505..fbd1fb98 100644
--- a/environments/external-loadbalancer-vip-v6.yaml
+++ b/environments/external-loadbalancer-vip-v6.yaml
@@ -1,29 +1,24 @@
resource_registry:
- OS::TripleO::Network::Ports::NetVipMap: ../network/ports/net_vip_map_external_v6.yaml
- OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/from_service_v6.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool_v6.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool_v6.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool_v6.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool_v6.yaml
# OVS doesn't support IPv6 endpoints for tunneling yet, so this remains IPv4 for now.
OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+ # Management network is optional and disabled by default
+ #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management_from_pool_v6.yaml
parameter_defaults:
# When using an external loadbalancer set the following in parameter_defaults
# to control your VIPs (currently one per network)
# NOTE: we will eventually move to one VIP per service
#
- ControlPlaneIP: 192.0.2.251
- ExternalNetworkVip: 2001:db8:fd00:1000:0000:0000:0000:0005
- InternalApiNetworkVip: fd00:fd00:fd00:2000:0000:0000:0000:0005
- StorageNetworkVip: fd00:fd00:fd00:3000:0000:0000:0000:0005
- StorageMgmtNetworkVip: fd00:fd00:fd00:4000:0000:0000:0000:0005
- ServiceVips:
- redis: fd00:fd00:fd00:2000:0000:0000:0000:0006
+ ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+ PublicVirtualFixedIPs: [{'ip_address':'2001:db8:fd00:1000:0000:0000:0000:0005'}]
+ InternalApiVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0005'}]
+ StorageVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:3000:0000:0000:0000:0005'}]
+ StorageMgmtVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:4000:0000:0000:0000:0005'}]
+ RedisVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0006'}]
ControllerIPs:
external:
- 2001:db8:fd00:1000:0000:0000:0000:0007
diff --git a/environments/external-loadbalancer-vip.yaml b/environments/external-loadbalancer-vip.yaml
index 8656ba1a..1759c04c 100644
--- a/environments/external-loadbalancer-vip.yaml
+++ b/environments/external-loadbalancer-vip.yaml
@@ -1,10 +1,4 @@
resource_registry:
- OS::TripleO::Network::Ports::NetVipMap: ../network/ports/net_vip_map_external.yaml
- OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/noop.yaml
- OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/from_service.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
@@ -18,13 +12,12 @@ parameter_defaults:
# to control your VIPs (currently one per network)
# NOTE: we will eventually move to one VIP per service
#
- ControlPlaneIP: 192.0.2.251
- ExternalNetworkVip: 10.0.0.251
- InternalApiNetworkVip: 172.16.2.251
- StorageNetworkVip: 172.16.1.251
- StorageMgmtNetworkVip: 172.16.3.251
- ServiceVips:
- redis: 172.16.2.252
+ ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+ PublicVirtualFixedIPs: [{'ip_address':'10.0.0.251'}]
+ InternalApiVirtualFixedIPs: [{'ip_address':'172.16.2.251'}]
+ StorageVirtualFixedIPs: [{'ip_address':'172.16.1.251'}]
+ StorageMgmtVirtualFixedIPs: [{'ip_address':'172.16.3.251'}]
+ RedisVirtualFixedIPs: [{'ip_address':'172.16.2.252'}]
ControllerIPs:
external:
- 10.0.0.253
diff --git a/environments/horizon_password_validation.yaml b/environments/horizon_password_validation.yaml
new file mode 100644
index 00000000..1a0f92cc
--- /dev/null
+++ b/environments/horizon_password_validation.yaml
@@ -0,0 +1,5 @@
+# Use this enviroment to pass in validation regex for horizons password
+# validation checks
+parameter_defaults:
+ HorizonPasswordValidator: '.*'
+ HorizonPasswordValidatorHelp: 'Your password does not meet the requirements.'
diff --git a/environments/host-config-pre-network.j2.yaml b/environments/host-config-pre-network.j2.yaml
new file mode 100644
index 00000000..c79e28b4
--- /dev/null
+++ b/environments/host-config-pre-network.j2.yaml
@@ -0,0 +1,16 @@
+resource_registry:
+# Create the registry only for roles with the word "Compute" in it. Like ComputeOvsDpdk, ComputeSriov, etc.,
+{%- for role in roles -%}
+{% if "Compute" in role.name %}
+ OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/{{role.name.lower()}}-host_config_and_reboot.yaml
+{%- endif -%}
+{% endfor %}
+
+#parameter_defaults:
+ # Sample parameters for Compute and ComputeOvsDpdk roles
+ #ComputeKernelArgs: ""
+ #ComputeTunedProfileName: ""
+ #ComputeHostCpuList: ""
+ #ComputeOvsDpdkKernelArgs: ""
+ #ComputeOvsDpdkTunedProfileName: ""
+ #ComputeOvsDpdkHostCpuList: ""
diff --git a/environments/low-memory-usage.yaml b/environments/low-memory-usage.yaml
index 47b2003d..3a606336 100644
--- a/environments/low-memory-usage.yaml
+++ b/environments/low-memory-usage.yaml
@@ -11,8 +11,8 @@ parameter_defaults:
SwiftWorkers: 1
GnocchiMetricdWorkers: 1
- ApacheMaxRequestWorkers: 32
- ApacheServerLimit: 32
+ ApacheMaxRequestWorkers: 100
+ ApacheServerLimit: 100
ControllerExtraConfig:
'nova::network::neutron::neutron_url_timeout': '60'
diff --git a/environments/major-upgrade-all-in-one.yaml b/environments/major-upgrade-all-in-one.yaml
new file mode 100644
index 00000000..4283b212
--- /dev/null
+++ b/environments/major-upgrade-all-in-one.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
diff --git a/environments/major-upgrade-composable-steps.yaml b/environments/major-upgrade-composable-steps.yaml
index 7e10014b..9e3cddba 100644
--- a/environments/major-upgrade-composable-steps.yaml
+++ b/environments/major-upgrade-composable-steps.yaml
@@ -1,3 +1,15 @@
resource_registry:
- OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml
- OS::TripleO::PostDeploySteps: OS::Heat::None
+ OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
+parameter_defaults:
+ UpgradeLevelNovaCompute: auto
+ UpgradeInitCommonCommand: |
+ #!/bin/bash
+ # Newton to Ocata, we need to remove old hiera hook data and
+ # install ansible heat agents and ansible-pacemaker
+ set -eu
+ yum install -y python-heat-agent-*
+ yum install -y ansible-pacemaker
+ rm -f /usr/libexec/os-apply-config/templates/etc/puppet/hiera.yaml
+ rm -f /usr/libexec/os-refresh-config/configure.d/40-hiera-datafiles
+ rm -f /etc/puppet/hieradata/*.yaml
+
diff --git a/environments/major-upgrade-converge.yaml b/environments/major-upgrade-converge.yaml
new file mode 100644
index 00000000..f09fb20e
--- /dev/null
+++ b/environments/major-upgrade-converge.yaml
@@ -0,0 +1,7 @@
+# Use this to reset any mappings only used for upgrades after the
+# update of all nodes is completed
+resource_registry:
+ OS::TripleO::PostDeploySteps: ../puppet/post.yaml
+parameter_defaults:
+ UpgradeLevelNovaCompute: ''
+ UpgradeInitCommonCommand: ''
diff --git a/environments/net-bond-with-vlans-no-external.yaml b/environments/net-bond-with-vlans-no-external.yaml
index 75959a0b..cc27d4f0 100644
--- a/environments/net-bond-with-vlans-no-external.yaml
+++ b/environments/net-bond-with-vlans-no-external.yaml
@@ -20,7 +20,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-no-external.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml
-
-# NOTE: with no external interface we should be able to use the
-# default Neutron l3_agent.ini setting for the external bridge (br-ex)
-# i.e. No need to set: NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-bond-with-vlans-v6.yaml b/environments/net-bond-with-vlans-v6.yaml
index 73dda3d9..dc6fdfe3 100644
--- a/environments/net-bond-with-vlans-v6.yaml
+++ b/environments/net-bond-with-vlans-v6.yaml
@@ -12,9 +12,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-v6.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml
-
-parameter_defaults:
- # This sets 'external_network_bridge' in l3_agent.ini to an empty string
- # so that external networks act like provider bridge networks (they
- # will plug into br-int instead of br-ex)
- NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-bond-with-vlans.yaml b/environments/net-bond-with-vlans.yaml
index de8f8f74..38c31cac 100644
--- a/environments/net-bond-with-vlans.yaml
+++ b/environments/net-bond-with-vlans.yaml
@@ -11,9 +11,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml
-
-parameter_defaults:
- # This sets 'external_network_bridge' in l3_agent.ini to an empty string
- # so that external networks act like provider bridge networks (they
- # will plug into br-int instead of br-ex)
- NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-linux-bridge-with-vlans.yaml b/environments/net-single-nic-linux-bridge-with-vlans.yaml
index fd80bb9b..f34cfb92 100644
--- a/environments/net-single-nic-linux-bridge-with-vlans.yaml
+++ b/environments/net-single-nic-linux-bridge-with-vlans.yaml
@@ -11,9 +11,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
-
-parameter_defaults:
- # This sets 'external_network_bridge' in l3_agent.ini to an empty string
- # so that external networks act like provider bridge networks (they
- # will plug into br-int instead of br-ex)
- NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-with-vlans-no-external.yaml b/environments/net-single-nic-with-vlans-no-external.yaml
index c7594b32..65d38137 100644
--- a/environments/net-single-nic-with-vlans-no-external.yaml
+++ b/environments/net-single-nic-with-vlans-no-external.yaml
@@ -19,7 +19,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-no-external.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml
-
-# NOTE: with no external interface we should be able to use the
-# default Neutron l3_agent.ini setting for the external bridge (br-ex)
-# i.e. No need to set: NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-with-vlans-v6.yaml b/environments/net-single-nic-with-vlans-v6.yaml
index 8210bad3..966e5fe9 100644
--- a/environments/net-single-nic-with-vlans-v6.yaml
+++ b/environments/net-single-nic-with-vlans-v6.yaml
@@ -11,9 +11,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-v6.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml
-
-parameter_defaults:
- # This sets 'external_network_bridge' in l3_agent.ini to an empty string
- # so that external networks act like provider bridge networks (they
- # will plug into br-int instead of br-ex)
- NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-with-vlans.yaml b/environments/net-single-nic-with-vlans.yaml
index a61bc6e1..b087b3e4 100644
--- a/environments/net-single-nic-with-vlans.yaml
+++ b/environments/net-single-nic-with-vlans.yaml
@@ -11,9 +11,3 @@ resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml
-
-parameter_defaults:
- # This sets 'external_network_bridge' in l3_agent.ini to an empty string
- # so that external networks act like provider bridge networks (they
- # will plug into br-int instead of br-ex)
- NeutronExternalNetworkBridge: "''"
diff --git a/environments/network-environment.yaml b/environments/network-environment.yaml
index b02fc198..210b6b03 100644
--- a/environments/network-environment.yaml
+++ b/environments/network-environment.yaml
@@ -43,13 +43,11 @@ parameter_defaults:
ExternalInterfaceDefaultRoute: 10.0.0.1
# Uncomment if using the Management Network (see network-management.yaml)
# ManagementNetCidr: 10.0.1.0/24
- # ManagementAllocationPools: [{'start': '10.0.1.10', 'end', '10.0.1.50'}]
+ # ManagementAllocationPools: [{'start': '10.0.1.10', 'end': '10.0.1.50'}]
# Use either this parameter or ControlPlaneDefaultRoute in the NIC templates
# ManagementInterfaceDefaultRoute: 10.0.1.1
# Define the DNS servers (maximum 2) for the overcloud nodes
DnsServers: ["8.8.8.8","8.8.4.4"]
- # Set to empty string to enable multiple external networks or VLANs
- NeutronExternalNetworkBridge: "''"
# List of Neutron network types for tenant networks (will be used in order)
NeutronNetworkType: 'vxlan,vlan'
# The tunnel type for the tenant network (vxlan or gre). Set to '' to disable tunneling.
diff --git a/environments/network-isolation-no-tunneling.yaml b/environments/network-isolation-no-tunneling.yaml
index 5d2a915b..ff1d7887 100644
--- a/environments/network-isolation-no-tunneling.yaml
+++ b/environments/network-isolation-no-tunneling.yaml
@@ -8,30 +8,54 @@ resource_registry:
OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
OS::TripleO::Network::Storage: ../network/storage.yaml
+ OS::TripleO::Network::Tenant: ../network/noop.yaml
+ # Management network is optional and disabled by default.
+ # To enable it, include environments/network-management.yaml
+ #OS::TripleO::Network::Management: ../network/management.yaml
+
+ # Port assignments for the VIPs
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
# Port assignments for the controller role
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Controller::Ports::TenantPort: ../network/ports/noop.yaml
+ #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
# Port assignments for the compute role
+ OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
+ OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::TenantPort: ../network/ports/noop.yaml
+ #OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
# Port assignments for the ceph storage role
+ OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
+ #OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
# Port assignments for the swift storage role
+ OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
+ #OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
# Port assignments for the block storage role
+ OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-
- # Port assignments for service virtual IPs for the controller role
- OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml
+ OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
+ #OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml
index 737d7d36..a6b4b8ae 100644
--- a/environments/network-isolation.yaml
+++ b/environments/network-isolation.yaml
@@ -18,8 +18,6 @@ resource_registry:
OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
- # Port assignments for service virtual IPs for the controller role
- OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml
# Port assignments for the controller role
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
@@ -59,4 +57,3 @@ resource_registry:
OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
#OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
diff --git a/environments/neutron-ml2-fujitsu-cfab.yaml b/environments/neutron-ml2-fujitsu-cfab.yaml
new file mode 100644
index 00000000..f14f7ee2
--- /dev/null
+++ b/environments/neutron-ml2-fujitsu-cfab.yaml
@@ -0,0 +1,21 @@
+# A Heat environment file which can be used to enable Fujitsu C-Fabric
+# plugin, configured via puppet
+resource_registry:
+ OS::TripleO::Services::NeutronML2FujitsuCfab: ../puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml
+
+parameter_defaults:
+ # Fixed
+ NeutronMechanismDrivers: ['openvswitch','fujitsu_cfab']
+ NeutronTypeDrivers: 'vlan'
+ NeutronNetworkType: 'vlan'
+
+ # Required
+ NeutronFujitsuCfabAddress: '192.168.0.1'
+ NeutronFujitsuCfabUserName: 'admin'
+ NeutronFujitsuCfabPassword:
+
+ # Optional
+ #NeutronFujitsuCfabPhysicalNetworks:
+ #NeutronFujitsuCfabSharePprofile:
+ #NeutronFujitsuCfabPprofilePrefix:
+ #NeutronFujitsuCfabSaveConfig:
diff --git a/environments/neutron-ml2-fujitsu-fossw.yaml b/environments/neutron-ml2-fujitsu-fossw.yaml
new file mode 100644
index 00000000..8db8da75
--- /dev/null
+++ b/environments/neutron-ml2-fujitsu-fossw.yaml
@@ -0,0 +1,22 @@
+# A Heat environment file which can be used to enable Fujitsu fossw
+# plugin, configured via puppet
+resource_registry:
+ OS::TripleO::Services::NeutronML2FujitsuFossw: ../puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml
+
+parameter_defaults:
+ # Fixed
+ NeutronMechanismDrivers: ['openvswitch','fujitsu_fossw']
+ NeutronTypeDrivers: ['vlan','vxlan']
+ NeutronNetworkType: ['vlan','vxlan']
+
+ # Required
+ NeutronFujitsuFosswIps: '192.168.0.1,192.168.0.2'
+ NeutronFujitsuFosswUserName:
+ NeutronFujitsuFosswPassword:
+
+ # Optional
+ #NeutronFujitsuFosswPort:
+ #NeutronFujitsuFosswTimeout:
+ #NeutronFujitsuFosswUdpDestPort:
+ #NeutronFujitsuFosswOvsdbVlanidRangeMin:
+ #NeutronFujitsuFosswOvsdbPort:
diff --git a/environments/neutron-ml2-ovn.yaml b/environments/neutron-ml2-ovn.yaml
index bafb2a73..7483bdbb 100644
--- a/environments/neutron-ml2-ovn.yaml
+++ b/environments/neutron-ml2-ovn.yaml
@@ -3,19 +3,22 @@
resource_registry:
OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginML2OVN
OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-ovn.yaml
# Disabling Neutron services that overlap with OVN
OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::OVNDBs: ../puppet/services/ovn-dbs.yaml
parameter_defaults:
NeutronMechanismDrivers: ovn
- OVNDbHost: '0.0.0.0'
- OVNSouthboundServerPort: 6642
- OVNNorthboundServerPort: 6641
- OVNDbConnectionTimeout: 60
OVNVifType: ovs
OVNNeutronSyncMode: log
OVNQosDriver: ovn-qos
OVNTunnelEncapType: geneve
+ NeutronEnableDHCPAgent: false
+ NeutronTypeDrivers: 'geneve,vxlan,vlan,flat'
+ NeutronNetworkType: 'geneve'
+ NeutronServicePlugins: 'qos,ovn-router'
+ NeutronVniRanges: ['1:65536', ]
diff --git a/environments/neutron-opencontrail.yaml b/environments/neutron-opencontrail.yaml
deleted file mode 100644
index 51575b86..00000000
--- a/environments/neutron-opencontrail.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-# A Heat environment file which can be used to enable OpenContrail
-# extensions, configured via puppet
-resource_registry:
- OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
- OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
- OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
- OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
- OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
- OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
- # Override the NeutronCorePlugin to use Nuage
- OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginOpencontrail
- OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-opencontrail.yaml
-
-parameter_defaults:
- NeutronCorePlugin: neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2
- NeutronServicePlugins: neutron_plugin_contrail.plugins.opencontrail.loadbalancer.v2.plugin.LoadBalancerPluginV2
- NeutronTunnelTypes: ''
-
- # required params:
- #ContrailApiServerIp:
- #ContrailExtensions: ''
-
- # optional params
- # ContrailApiServerPort: 8082
- # ContrailMultiTenancy: false
diff --git a/environments/neutron-opendaylight-l3.yaml b/environments/neutron-opendaylight-l3.yaml
deleted file mode 100644
index 00be3048..00000000
--- a/environments/neutron-opendaylight-l3.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-# A Heat environment that can be used to deploy OpenDaylight with L3 DVR
-resource_registry:
- OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
- OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
- OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
- OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
- OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
- OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
-
-parameter_defaults:
- NeutronEnableForceMetadata: true
- NeutronMechanismDrivers: 'opendaylight'
- NeutronServicePlugins: "networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin"
- OpenDaylightEnableL3: "'yes'"
diff --git a/environments/neutron-opendaylight.yaml b/environments/neutron-opendaylight.yaml
index 35c90aab..ed7292b7 100644
--- a/environments/neutron-opendaylight.yaml
+++ b/environments/neutron-opendaylight.yaml
@@ -1,11 +1,13 @@
-# A Heat environment that can be used to deploy OpenDaylight
+# A Heat environment that can be used to deploy OpenDaylight with L3 DVR
resource_registry:
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
parameter_defaults:
NeutronEnableForceMetadata: true
- NeutronMechanismDrivers: 'opendaylight'
+ NeutronMechanismDrivers: 'opendaylight_v2'
+ NeutronServicePlugins: 'odl-router_v2,trunk'
diff --git a/environments/neutron-sriov.yaml b/environments/neutron-sriov.yaml
index 9b7e51f9..5e9e15e3 100755
--- a/environments/neutron-sriov.yaml
+++ b/environments/neutron-sriov.yaml
@@ -9,9 +9,6 @@ parameter_defaults:
#NovaSchedulerDefaultFilters: ['RetryFilter','AvailabilityZoneFilter','RamFilter','ComputeFilter','ComputeCapabilitiesFilter','ImagePropertiesFilter','ServerGroupAntiAffinityFilter','ServerGroupAffinityFilter','PciPassthroughFilter']
#NovaSchedulerAvailableFilters: ["nova.scheduler.filters.all_filters","nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter"]
- # Provide the vendorid:productid of the VFs
- #NeutronSupportedPCIVendorDevs: ['8086:154c','8086:10ca','8086:1520']
-
#NeutronPhysicalDevMappings: "datacentre:ens20f2"
# Number of VFs that needs to be configured for a physical interface
diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml
index 06e4f7aa..5f8b02ad 100644
--- a/environments/puppet-ceph-external.yaml
+++ b/environments/puppet-ceph-external.yaml
@@ -30,5 +30,8 @@ parameter_defaults:
# finally we disable the Cinder LVM backend
CinderEnableIscsiBackend: false
+ # Uncomment if connecting to a pre-Jewel or RHCS1.3 Ceph Cluster
+ # RbdDefaultFeatures: 1
+
# Backward compatibility setting, will be removed in the future
CephAdminKey: ''
diff --git a/environments/puppet-ceph.yaml b/environments/puppet-ceph.yaml
new file mode 100644
index 00000000..57af540a
--- /dev/null
+++ b/environments/puppet-ceph.yaml
@@ -0,0 +1,12 @@
+resource_registry:
+ OS::TripleO::Services::CephMon: ../puppet/services/ceph-mon.yaml
+ OS::TripleO::Services::CephOSD: ../puppet/services/ceph-osd.yaml
+ OS::TripleO::Services::CephClient: ../puppet/services/ceph-client.yaml
+
+parameter_defaults:
+ CinderEnableIscsiBackend: false
+ CinderEnableRbdBackend: true
+ CinderBackupBackend: ceph
+ NovaEnableRbdBackend: true
+ GlanceBackend: rbd
+ GnocchiBackend: rbd
diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml
index b8e93f20..da607a72 100644
--- a/environments/puppet-pacemaker.yaml
+++ b/environments/puppet-pacemaker.yaml
@@ -1,7 +1,6 @@
# An environment which enables configuration of an
# Overcloud controller with Pacemaker.
resource_registry:
- OS::TripleO::ControllerConfig: ../puppet/controller-config-pacemaker.yaml
OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
@@ -13,6 +12,7 @@ resource_registry:
OS::TripleO::Services::RabbitMQ: ../puppet/services/pacemaker/rabbitmq.yaml
OS::TripleO::Services::HAproxy: ../puppet/services/pacemaker/haproxy.yaml
OS::TripleO::Services::Pacemaker: ../puppet/services/pacemaker.yaml
+ OS::TripleO::Services::PacemakerRemote: ../puppet/services/pacemaker_remote.yaml
OS::TripleO::Services::Redis: ../puppet/services/pacemaker/database/redis.yaml
OS::TripleO::Services::MySQL: ../puppet/services/pacemaker/database/mysql.yaml
# Services that are disabled by default (use relevant environment files):
diff --git a/environments/services/ceph-mds.yaml b/environments/services/ceph-mds.yaml
new file mode 100644
index 00000000..2b51374c
--- /dev/null
+++ b/environments/services/ceph-mds.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml \ No newline at end of file
diff --git a/environments/services/ceph-rbdmirror.yaml b/environments/services/ceph-rbdmirror.yaml
new file mode 100644
index 00000000..b350e4c5
--- /dev/null
+++ b/environments/services/ceph-rbdmirror.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::CephRbdMirror: ../../puppet/services/pacemaker/ceph-rbdmirror.yaml
diff --git a/environments/services/disable-ceilometer-api.yaml b/environments/services/disable-ceilometer-api.yaml
new file mode 100644
index 00000000..94cd8d5d
--- /dev/null
+++ b/environments/services/disable-ceilometer-api.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::CeilometerApi: OS::Heat::None
diff --git a/environments/services/ec2-api.yaml b/environments/services/ec2-api.yaml
new file mode 100644
index 00000000..d751ba23
--- /dev/null
+++ b/environments/services/ec2-api.yaml
@@ -0,0 +1,3 @@
+# A Heat environment file which can be used to enable EC2-API service.
+resource_registry:
+ OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml
diff --git a/environments/services/etcd.yaml b/environments/services/etcd.yaml
new file mode 100644
index 00000000..08d54d58
--- /dev/null
+++ b/environments/services/etcd.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::Etcd: ../../puppet/services/etcd.yaml
diff --git a/environments/services/octavia.yaml b/environments/services/octavia.yaml
new file mode 100644
index 00000000..24c57b8c
--- /dev/null
+++ b/environments/services/octavia.yaml
@@ -0,0 +1,9 @@
+resource_registry:
+ OS::TripleO::Services::OctaviaApi: ../../puppet/services/octavia-api.yaml
+ OS::TripleO::Services::OctaviaHealthManager: ../../puppet/services/octavia-health-manager.yaml
+ OS::TripleO::Services::OctaviaHousekeeping: ../../puppet/services/octavia-housekeeping.yaml
+ OS::TripleO::Services::OctaviaWorker: ../../puppet/services/octavia-worker.yaml
+
+parameter_defaults:
+ NeutronServicePlugins: "qos,router,trunk,lbaasv2"
+ NeutronEnableForceMetadata: true
diff --git a/environments/sshd-banner.yaml b/environments/sshd-banner.yaml
new file mode 100644
index 00000000..041c0990
--- /dev/null
+++ b/environments/sshd-banner.yaml
@@ -0,0 +1,13 @@
+resource_registry:
+ OS::TripleO::Services::Sshd: ../puppet/services/sshd.yaml
+
+parameter_defaults:
+ BannerText: |
+ ******************************************************************
+ * This system is for the use of authorized users only. Usage of *
+ * this system may be monitored and recorded by system personnel. *
+ * Anyone using this system expressly consents to such monitoring *
+ * and is advised that if such monitoring reveals possible *
+ * evidence of criminal activity, system personnel may provide *
+ * the evidence from such monitoring to law enforcement officials.*
+ ******************************************************************
diff --git a/environments/tls-endpoints-public-dns.yaml b/environments/tls-endpoints-public-dns.yaml
index e91c7bc3..1b666c5b 100644
--- a/environments/tls-endpoints-public-dns.yaml
+++ b/environments/tls-endpoints-public-dns.yaml
@@ -17,10 +17,48 @@ parameter_defaults:
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
- GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
@@ -50,9 +88,15 @@ parameter_defaults:
NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
@@ -62,9 +106,12 @@ parameter_defaults:
SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
- ZaqarWebSocketAdmin: {protocol: 'http', port: '9000', host: 'IP_ADDRESS'}
- ZaqarWebSocketInternal: {protocol: 'http', port: '9000', host: 'IP_ADDRESS'}
- ZaqarWebSocketPublic: {protocol: 'https', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
diff --git a/environments/tls-endpoints-public-ip.yaml b/environments/tls-endpoints-public-ip.yaml
index c9096f44..7311a1f9 100644
--- a/environments/tls-endpoints-public-ip.yaml
+++ b/environments/tls-endpoints-public-ip.yaml
@@ -17,10 +17,48 @@ parameter_defaults:
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'}
+ CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'IP_ADDRESS'}
GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
GlancePublic: {protocol: 'https', port: '13292', host: 'IP_ADDRESS'}
- GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
GnocchiPublic: {protocol: 'https', port: '13041', host: 'IP_ADDRESS'}
@@ -50,9 +88,15 @@ parameter_defaults:
NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
NovaPublic: {protocol: 'https', port: '13774', host: 'IP_ADDRESS'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'IP_ADDRESS'}
NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'IP_ADDRESS'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
@@ -62,9 +106,12 @@ parameter_defaults:
SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
SwiftPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+ TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
ZaqarPublic: {protocol: 'https', port: '13888', host: 'IP_ADDRESS'}
- ZaqarWebSocketAdmin: {protocol: 'http', port: '9000', host: 'IP_ADDRESS'}
- ZaqarWebSocketInternal: {protocol: 'http', port: '9000', host: 'IP_ADDRESS'}
- ZaqarWebSocketPublic: {protocol: 'https', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'IP_ADDRESS'}
diff --git a/environments/tls-everywhere-endpoints-dns.yaml b/environments/tls-everywhere-endpoints-dns.yaml
index 365b0a54..e6608b57 100644
--- a/environments/tls-everywhere-endpoints-dns.yaml
+++ b/environments/tls-everywhere-endpoints-dns.yaml
@@ -17,10 +17,48 @@ parameter_defaults:
CinderAdmin: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
CinderInternal: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ CongressAdmin: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+ CongressInternal: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiInternal: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
GlanceAdmin: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
GlanceInternal: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
- GlanceRegistryInternal: {protocol: 'https', port: '9191', host: 'CLOUDNAME'}
GnocchiAdmin: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
GnocchiInternal: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
@@ -50,9 +88,15 @@ parameter_defaults:
NovaAdmin: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
NovaInternal: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementInternal: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
NovaVNCProxyAdmin: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
NovaVNCProxyInternal: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
@@ -62,9 +106,12 @@ parameter_defaults:
SwiftAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
SwiftInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ TackerAdmin: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+ TackerInternal: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
ZaqarAdmin: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
ZaqarInternal: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
- ZaqarWebSocketAdmin: {protocol: 'https', port: '9000', host: 'CLOUDNAME'}
- ZaqarWebSocketInternal: {protocol: 'https', port: '9000', host: 'CLOUDNAME'}
- ZaqarWebSocketPublic: {protocol: 'https', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketInternal: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
diff --git a/environments/undercloud.yaml b/environments/undercloud.yaml
new file mode 100644
index 00000000..0fd01920
--- /dev/null
+++ b/environments/undercloud.yaml
@@ -0,0 +1,18 @@
+resource_registry:
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/noop.yaml
+ OS::TripleO::Network::Ports::ControlPlaneVipPort: ../deployed-server/deployed-neutron-port.yaml
+ OS::TripleO::Undercloud::Net::SoftwareConfig: ../net-config-undercloud.yaml
+ OS::TripleO::NodeExtraConfigPost: ../extraconfig/post_deploy/undercloud_post.yaml
+
+parameter_defaults:
+ StackAction: CREATE
+ SoftwareConfigTransport: POLL_SERVER_HEAT
+ NeutronTunnelTypes: []
+ NeutronBridgeMappings: ctlplane:br-ctlplane
+ NeutronAgentExtensions: []
+ NeutronFlatNetworks: '*'
+ NovaSchedulerAvailableFilters: 'tripleo_common.filters.list.tripleo_filters'
+ NovaSchedulerDefaultFilters: ['RetryFilter', 'TripleOCapabilitiesFilter', 'ComputeCapabilitiesFilter', 'AvailabilityZoneFilter', 'RamFilter', 'DiskFilter', 'ComputeFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
+ NeutronDhcpAgentsPerNetwork: 2
+ HeatConvergenceEngine: false
+ HeatMaxResourcesPerStack: -1
diff --git a/environments/updates/README.md b/environments/updates/README.md
index 426d7329..93714ed8 100644
--- a/environments/updates/README.md
+++ b/environments/updates/README.md
@@ -10,3 +10,6 @@ Contents
**update-from-publicvip-on-ctlplane.yaml**
To be used if the PublicVirtualIP resource was deployed as an additional VIP on the 'ctlplane'.
+
+**update-from-deloyed-server-newton.yaml**
+ To be used when updating from the deployed-server template from Newton.
diff --git a/environments/updates/update-from-deployed-server-newton.yaml b/environments/updates/update-from-deployed-server-newton.yaml
new file mode 100644
index 00000000..6fe3a4cb
--- /dev/null
+++ b/environments/updates/update-from-deployed-server-newton.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::DeployedServer::ControlPlanePort: ../../deployed-server/ctlplane-port.yaml
diff --git a/environments/updates/update-from-keystone-admin-internal-api.yaml b/environments/updates/update-from-keystone-admin-internal-api.yaml
index a5075300..97687c6a 100644
--- a/environments/updates/update-from-keystone-admin-internal-api.yaml
+++ b/environments/updates/update-from-keystone-admin-internal-api.yaml
@@ -2,32 +2,5 @@
# Keystone Admin API service is running on the Internal API network
parameter_defaults:
- ServiceNetMapDefaults:
- NeutronTenantNetwork: tenant
- CeilometerApiNetwork: internal_api
- MongodbNetwork: internal_api
- CinderApiNetwork: internal_api
- CinderIscsiNetwork: storage
- GlanceApiNetwork: storage
- GlanceRegistryNetwork: internal_api
+ ServiceNetMap:
KeystoneAdminApiNetwork: internal_api
- KeystonePublicApiNetwork: internal_api
- NeutronApiNetwork: internal_api
- HeatApiNetwork: internal_api
- NovaApiNetwork: internal_api
- NovaMetadataNetwork: internal_api
- NovaVncProxyNetwork: internal_api
- SwiftMgmtNetwork: storage_mgmt
- SwiftProxyNetwork: storage
- HorizonNetwork: internal_api
- MemcachedNetwork: internal_api
- RabbitmqNetwork: internal_api
- RedisNetwork: internal_api
- MysqlNetwork: internal_api
- CephClusterNetwork: storage_mgmt
- CephPublicNetwork: storage
- ControllerHostnameResolveNetwork: internal_api
- ComputeHostnameResolveNetwork: internal_api
- BlockStorageHostnameResolveNetwork: internal_api
- ObjectStorageHostnameResolveNetwork: internal_api
- CephStorageHostnameResolveNetwork: storage
diff --git a/extraconfig/all_nodes/mac_hostname.j2.yaml b/extraconfig/all_nodes/mac_hostname.j2.yaml
index 75ffc9e6..fcf022ae 100644
--- a/extraconfig/all_nodes/mac_hostname.j2.yaml
+++ b/extraconfig/all_nodes/mac_hostname.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Example extra config for cluster config
diff --git a/extraconfig/all_nodes/random_string.j2.yaml b/extraconfig/all_nodes/random_string.j2.yaml
index 9ce2ca8a..77d4b381 100644
--- a/extraconfig/all_nodes/random_string.j2.yaml
+++ b/extraconfig/all_nodes/random_string.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Example extra config for cluster config
diff --git a/extraconfig/all_nodes/swap-partition.j2.yaml b/extraconfig/all_nodes/swap-partition.j2.yaml
index 36076b0c..b6fef79f 100644
--- a/extraconfig/all_nodes/swap-partition.j2.yaml
+++ b/extraconfig/all_nodes/swap-partition.j2.yaml
@@ -1,11 +1,7 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
-description: >
- Extra config to add swap space to nodes.
+description: Template file to add a swap partition to a node.
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
parameters:
servers:
type: json
@@ -14,9 +10,7 @@ parameters:
description: Swap partition label
default: 'swap1'
-
resources:
-
SwapConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -25,8 +19,13 @@ resources:
#!/bin/bash
set -eux
swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label)
- swapon $swap_partition
- echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
+ if [ -f "$swap_partition" ]; then
+ swapon $swap_partition
+ echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
+ else
+ echo "$swap_partition needs to be a valid path"
+ echo "Check that $swap_partition_label is a valid partition label"
+ fi
inputs:
- name: swap_partition_label
description: Swap partition label
diff --git a/extraconfig/all_nodes/swap.j2.yaml b/extraconfig/all_nodes/swap.j2.yaml
index ce65dacb..044f817c 100644
--- a/extraconfig/all_nodes/swap.j2.yaml
+++ b/extraconfig/all_nodes/swap.j2.yaml
@@ -1,11 +1,7 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
-description: >
- Extra config to add swap space to nodes.
+description: Template file to add a swap file to a node.
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
parameters:
servers:
type: json
@@ -18,9 +14,7 @@ parameters:
description: Full path to location of swap file
default: '/swap'
-
resources:
-
SwapConfig:
type: OS::Heat::SoftwareConfig
properties:
diff --git a/extraconfig/nova_metadata/krb-service-principals.yaml b/extraconfig/nova_metadata/krb-service-principals.yaml
new file mode 100644
index 00000000..c66e6460
--- /dev/null
+++ b/extraconfig/nova_metadata/krb-service-principals.yaml
@@ -0,0 +1,84 @@
+heat_template_version: ocata
+description: 'Generates the relevant service principals for a server'
+
+parameters:
+ RoleData:
+ type: json
+ description: the list containing the 'role_data' output for the ServiceChain
+
+ # Coming from parameter_defaults
+ CloudName:
+ default: overcloud.localdomain
+ description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
+ type: string
+ CloudNameInternal:
+ default: overcloud.internalapi.localdomain
+ description: >
+ The DNS name of this cloud's internal API endpoint. E.g.
+ 'ci-overcloud.internalapi.tripleo.org'.
+ type: string
+ CloudNameStorage:
+ default: overcloud.storage.localdomain
+ description: >
+ The DNS name of this cloud's storage endpoint. E.g.
+ 'ci-overcloud.storage.tripleo.org'.
+ type: string
+ CloudNameStorageManagement:
+ default: overcloud.storagemgmt.localdomain
+ description: >
+ The DNS name of this cloud's storage management endpoint. E.g.
+ 'ci-overcloud.storagemgmt.tripleo.org'.
+ type: string
+ CloudNameCtlplane:
+ default: overcloud.ctlplane.localdomain
+ description: >
+ The DNS name of this cloud's storage management endpoint. E.g.
+ 'ci-overcloud.management.tripleo.org'.
+ type: string
+
+resources:
+
+ IncomingMetadataSettings:
+ type: OS::Heat::Value
+ properties:
+ value:
+ yaql:
+ # Filter null values and values that contain don't contain
+ # 'metadata_settings', get the values from that key and get the
+ # unique ones.
+ expression: list($.data.where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct())
+ data: {get_param: RoleData}
+
+ # Generates entries for nova metadata with the following format:
+ # 'managed_service_<id>' : <service>/<fqdn>
+ # Depending on the requested network
+ IndividualServices:
+ type: OS::Heat::Value
+ properties:
+ value:
+ yaql:
+ expression: let(fqdns => $.data.fqdns) -> dict($.data.metadata.where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))]))
+ data:
+ metadata: {get_attr: [IncomingMetadataSettings, value]}
+ fqdns:
+ external: {get_param: CloudName}
+ internal_api: {get_param: CloudNameInternal}
+ storage: {get_param: CloudNameStorage}
+ storage_mgmt: {get_param: CloudNameStorageManagement}
+ ctlplane: {get_param: CloudNameCtlplane}
+
+ CompactServices:
+ type: OS::Heat::Value
+ properties:
+ value:
+ yaql:
+ expression: dict($.data.where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1]))
+ data: {get_attr: [IncomingMetadataSettings, value]}
+
+outputs:
+ metadata:
+ description: actual metadata entries that will be passed to the server.
+ value:
+ map_merge:
+ - {get_attr: [IndividualServices, value]}
+ - compact_services: {get_attr: [CompactServices, value]}
diff --git a/extraconfig/post_deploy/default.yaml b/extraconfig/post_deploy/default.yaml
index ddfe0243..4da54ead 100644
--- a/extraconfig/post_deploy/default.yaml
+++ b/extraconfig/post_deploy/default.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Extra Post Deployment Config'
parameters:
servers:
diff --git a/extraconfig/post_deploy/example.yaml b/extraconfig/post_deploy/example.yaml
index f83dff76..8ac7eb73 100644
--- a/extraconfig/post_deploy/example.yaml
+++ b/extraconfig/post_deploy/example.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Example extra config for post-deployment
diff --git a/extraconfig/post_deploy/example_run_on_update.yaml b/extraconfig/post_deploy/example_run_on_update.yaml
index 234488af..738e263b 100644
--- a/extraconfig/post_deploy/example_run_on_update.yaml
+++ b/extraconfig/post_deploy/example_run_on_update.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Example extra config for post-deployment, this re-runs every update
diff --git a/extraconfig/post_deploy/undercloud_post.sh b/extraconfig/post_deploy/undercloud_post.sh
new file mode 100755
index 00000000..8bcae1d3
--- /dev/null
+++ b/extraconfig/post_deploy/undercloud_post.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+set -eux
+
+ln -sf /etc/puppet/hiera.yaml /etc/hiera.yaml
+
+
+# WRITE OUT STACKRC
+if [ ! -e /root/stackrc ]; then
+ touch /root/stackrc
+ chmod 0600 /root/stackrc
+
+cat >> /root/stackrc <<-EOF_CAT
+export OS_PASSWORD=$admin_password
+export OS_AUTH_URL=$auth_url
+export OS_USERNAME=admin
+export OS_TENANT_NAME=admin
+export COMPUTE_API_VERSION=1.1
+export NOVA_VERSION=1.1
+export OS_BAREMETAL_API_VERSION=1.15
+export OS_NO_CACHE=True
+export OS_CLOUDNAME=undercloud
+EOF_CAT
+
+ if [ -n "$ssl_certificate" ]; then
+cat >> /root/stackrc <<-EOF_CAT
+export PYTHONWARNINGS="ignore:Certificate has no, ignore:A true SSLContext object is not available"
+EOF_CAT
+ fi
+fi
+
+source /root/stackrc
+
+if [ ! -f /root/.ssh/authorized_keys ]; then
+ sudo mkdir -p /root/.ssh
+ sudo chmod 7000 /root/.ssh/
+ sudo touch /root/.ssh/authorized_keys
+ sudo chmod 600 /root/.ssh/authorized_keys
+fi
+
+if [ ! -f /root/.ssh/id_rsa ]; then
+ ssh-keygen -b 1024 -N '' -f /root/.ssh/id_rsa
+fi
+
+if ! grep "$(cat /root/.ssh/id_rsa.pub)" /root/.ssh/authorized_keys; then
+ cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
+fi
+
+PHYSICAL_NETWORK=ctlplane
+
+ctlplane_id=$(openstack network list -f csv -c ID -c Name --quote none | tail -n +2 | grep ctlplane | cut -d, -f1)
+subnet_ids=$(openstack subnet list -f csv -c ID --quote none | tail -n +2)
+subnet_id=
+
+for subnet_id in $subnet_ids; do
+ network_id=$(openstack subnet show -f value -c network_id $subnet_id)
+ if [ "$network_id" = "$ctlplane_id" ]; then
+ break
+ fi
+done
+
+net_create=1
+if [ -n "$subnet_id" ]; then
+ cidr=$(openstack subnet show $subnet_id -f value -c cidr)
+ if [ "$cidr" = "$undercloud_network_cidr" ]; then
+ net_create=0
+ else
+ echo "New cidr $undercloud_network_cidr does not equal old cidr $cidr"
+ echo "Will attempt to delete and recreate subnet $subnet_id"
+ fi
+fi
+
+if [ "$net_create" -eq "1" ]; then
+ # Delete the subnet and network to make sure it doesn't already exist
+ if openstack subnet list | grep start; then
+ openstack subnet delete $(openstack subnet list | grep start | awk '{print $4}')
+ fi
+ if openstack network show ctlplane; then
+ openstack network delete ctlplane
+ fi
+
+
+ NETWORK_ID=$(openstack network create --provider-network-type=flat --provider-physical-network=ctlplane ctlplane | grep " id " | awk '{print $4}')
+
+ NAMESERVER_ARG=""
+ if [ -n "${undercloud_nameserver:-}" ]; then
+ NAMESERVER_ARG="--dns-nameserver $undercloud_nameserver"
+ fi
+
+ openstack subnet create --network=$NETWORK_ID \
+ --gateway=$undercloud_network_gateway \
+ --subnet-range=$undercloud_network_cidr \
+ --allocation-pool start=$undercloud_dhcp_start,end=$undercloud_dhcp_end \
+ --host-route destination=169.254.169.254/32,gateway=$local_ip \
+ $NAMESERVER_ARG ctlplane
+fi
+
+# Disable nova quotas
+openstack quota set --cores -1 --instances -1 --ram -1 $(openstack project show admin | awk '$2=="id" {print $4}')
+
+# MISTRAL WORKFLOW CONFIGURATION
+if [ "$(hiera mistral_api_enabled)" = "true" ]; then
+ # load workflows
+ for workbook in $(openstack workbook list | grep tripleo | cut -f 2 -d ' '); do
+ openstack workbook delete $workbook
+ done
+ for workflow in $(openstack workflow list | grep tripleo | cut -f 2 -d ' '); do
+ openstack workflow delete $workflow
+ done
+ for workbook in $(ls /usr/share/openstack-tripleo-common/workbooks/*); do
+ openstack workbook create $workbook
+ done
+
+ # Store the SNMP password in a mistral environment
+ if ! openstack workflow env show tripleo.undercloud-config &>/dev/null; then
+ TMP_MISTRAL_ENV=$(mktemp)
+ echo "{\"name\": \"tripleo.undercloud-config\", \"variables\": {\"undercloud_ceilometer_snmpd_password\": \"$snmp_readonly_user_password\"}}" > $TMP_MISTRAL_ENV
+ openstack workflow env create $TMP_MISTRAL_ENV
+ fi
+
+fi
+
+# IP forwarding is needed to allow the overcloud nodes access to the outside
+# internet in cases where they are on an isolated network.
+sysctl -w net.ipv4.ip_forward=1
+# Make it persistent
+echo "net.ipv4.ip_forward=1" > /etc/sysctl.d/ip-forward.conf
diff --git a/extraconfig/post_deploy/undercloud_post.yaml b/extraconfig/post_deploy/undercloud_post.yaml
new file mode 100644
index 00000000..38a9181e
--- /dev/null
+++ b/extraconfig/post_deploy/undercloud_post.yaml
@@ -0,0 +1,93 @@
+heat_template_version: ocata
+
+description: >
+ Post-deployment for the TripleO undercloud
+
+parameters:
+ servers:
+ type: json
+ DeployedServerPortMap:
+ default: {}
+ type: json
+ UndercloudDhcpRangeStart:
+ type: string
+ default: '192.168.24.5'
+ UndercloudDhcpRangeEnd:
+ type: string
+ default: '192.168.24.24'
+ UndercloudNetworkCidr:
+ type: string
+ default: '192.168.24.0/24'
+ UndercloudNetworkGateway:
+ type: string
+ default: '192.168.24.1'
+ UndercloudNameserver:
+ type: string
+ default: ''
+ AdminPassword: #supplied by tripleo-undercloud-passwords.yaml
+ type: string
+ description: The password for the keystone admin account, used for monitoring, querying neutron etc.
+ hidden: True
+ SSLCertificate:
+ description: >
+ The content of the SSL certificate (without Key) in PEM format.
+ type: string
+ default: ""
+ hidden: True
+ SnmpdReadonlyUserPassword:
+ description: The user password for SNMPd with readonly rights running on all Overcloud nodes
+ type: string
+ hidden: true
+
+conditions:
+
+ ssl_disabled: {equals : [{get_param: SSLCertificate}, ""]}
+
+resources:
+
+ UndercloudPostConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: deploy_identifier
+ - name: local_ip
+ - name: undercloud_dhcp_start
+ - name: undercloud_dhcp_end
+ - name: undercloud_network_cidr
+ - name: undercloud_network_gateway
+ - name: undercloud_nameserver
+ - name: admin_password
+ - name: auth_url
+ - name: snmp_readonly_user_password
+ config: {get_file: ./undercloud_post.sh}
+
+ UndercloudPostDeployment:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: UndercloudPostConfig}
+ input_values:
+ local_ip: {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]}
+ undercloud_dhcp_start: {get_param: UndercloudDhcpRangeStart}
+ undercloud_dhcp_end: {get_param: UndercloudDhcpRangeEnd}
+ undercloud_network_cidr: {get_param: UndercloudNetworkCidr}
+ undercloud_network_gateway: {get_param: UndercloudNetworkGateway}
+ undercloud_nameserver: {get_param: UndercloudNameserver}
+ ssl_certificate: {get_param: SSLCertificate}
+ admin_password: {get_param: AdminPassword}
+ snmp_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
+ # if SSL is enabled we use the public virtual ip as the stackrc endpoint
+ auth_url:
+ if:
+ - ssl_disabled
+ - list_join:
+ - ''
+ - - 'http://'
+ - {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]}
+ - ':5000/v2.0'
+ - list_join:
+ - ''
+ - - 'https://'
+ - {get_param: [DeployedServerPortMap, 'public_virtual_ip', fixed_ips, 0, ip_address]}
+ - ':13000/v2.0'
diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
index 7c65bd8b..fdf2e957 100644
--- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
RHEL Registration and unregistration software deployments.
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
index 71ab0767..2650a967 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
@@ -11,6 +11,7 @@ if [ -e $OK ] ; then
exit 0
fi
+retryCount=0
opts=
attach_opts=
sat5_opts=
@@ -96,12 +97,28 @@ if [ -n "${REG_TYPE:-}" ]; then
opts="$opts --type=$REG_TYPE"
fi
+function retry() {
+ if [[ $retryCount < 3 ]]; then
+ $@
+ if ! [[ $? == 0 ]]; then
+ retryCount=$(echo $retryCount + 1 | bc)
+ echo "WARN: Failed to connect when running '$@', retrying..."
+ retry $@
+ else
+ retryCount=0
+ fi
+ else
+ echo "ERROR: Failed to connect after 3 attempts when running '$@'"
+ exit 1
+ fi
+}
+
function detect_satellite_version {
ping_api=$REG_SAT_URL/katello/api/ping
- if curl -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
+ if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
echo Satellite 6 detected at $REG_SAT_URL
satellite_version=6
- elif curl -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
echo Satellite 5 detected at $REG_SAT_URL
satellite_version=5
else
@@ -112,29 +129,29 @@ function detect_satellite_version {
case "${REG_METHOD:-}" in
portal)
- subscription-manager register $opts
+ retry subscription-manager register $opts
if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then
- subscription-manager attach $attach_opts
+ retry subscription-manager attach $attach_opts
fi
- subscription-manager repos --disable '*'
- subscription-manager $repos
+ retry subscription-manager repos --disable '*'
+ retry subscription-manager $repos
;;
satellite)
detect_satellite_version
if [ "$satellite_version" = "6" ]; then
repos="$repos --enable ${satellite_repo}"
- curl -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
- subscription-manager register $opts
- subscription-manager $repos
- yum install -y katello-agent || true # needed for errata reporting to satellite6
+ retry subscription-manager register $opts
+ retry subscription-manager $repos
+ retry yum install -y katello-agent || true # needed for errata reporting to satellite6
katello-package-upload
- subscription-manager repos --disable ${satellite_repo}
+ retry subscription-manager repos --disable ${satellite_repo}
else
pushd /usr/share/rhn/
- curl -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
popd
- rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
+ retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
fi
;;
disable)
diff --git a/extraconfig/pre_network/ansible_host_config.ansible b/extraconfig/pre_network/ansible_host_config.ansible
new file mode 100644
index 00000000..c126c1a1
--- /dev/null
+++ b/extraconfig/pre_network/ansible_host_config.ansible
@@ -0,0 +1,58 @@
+---
+- name: Configuration to be applied before rebooting the node
+ connection: local
+ hosts: localhost
+
+ tasks:
+ # Kernel Args Configuration
+ - block:
+ - name: Ensure the kernel args ( {{ _KERNEL_ARGS_ }} ) is present as TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS
+ lineinfile:
+ dest: /etc/default/grub
+ regexp: '^TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS.*'
+ insertafter: '^GRUB_CMDLINE_LINUX.*'
+ line: 'TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS=" {{ _KERNEL_ARGS_ }} "'
+ - name: Add TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS to the GRUB_CMDLINE_LINUX parameter
+ lineinfile:
+ dest: /etc/default/grub
+ line: 'GRUB_CMDLINE_LINUX="${GRUB_CMDLINE_LINUX:+$GRUB_CMDLINE_LINUX }${TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS}"'
+ insertafter: '^TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS.*'
+ - name: Generate grub config file
+ command: grub2-mkconfig -o /boot/grub2/grub.cfg
+ become: true
+ when: _KERNEL_ARGS_|default("") != ""
+
+ # Tune-d Configuration
+ - block:
+ - name: Tune-d Configuration
+ lineinfile:
+ dest: /etc/tuned/cpu-partitioning-variables.conf
+ regexp: '^isolated_cores=.*'
+ line: 'isolated_cores={{ _HOST_CPUS_LIST_ }}'
+ when: _HOST_CPUS_LIST_|default("") != ""
+
+ - name: Tune-d provile activation
+ shell: tuned-adm profile {{ _TUNED_PROFILE_NAME_ }}
+ become: true
+ when: _TUNED_PROFILE_NAME_|default("") != ""
+
+ # Provisioning Network workaround
+ # The script will be executed before os-net-config, in which case, only Provisioning network will have IP
+ # BOOTPROTO of all interface config files (except provisioning), will be set to "none" to avoid reboot failing to acquire IP on other networks
+ - block:
+ - find:
+ paths: /etc/sysconfig/network-scripts/
+ patterns: ifcfg-*
+ register: ifcfg_files
+
+ - replace:
+ dest: "{{ item.path }}"
+ regexp: '^BOOTPROTO=.*'
+ replace: 'BOOTPROTO=none'
+ when:
+ - item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') != "lo"
+ # This condition will list all the interfaces except the one with valid IP (which is Provisioning network at this stage)
+ # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4']['address'] is undefined
+ - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4']['address'] is undefined
+ with_items:
+ - "{{ ifcfg_files.files }}"
diff --git a/extraconfig/pre_network/config_then_reboot.yaml b/extraconfig/pre_network/config_then_reboot.yaml
new file mode 100644
index 00000000..bb0b9511
--- /dev/null
+++ b/extraconfig/pre_network/config_then_reboot.yaml
@@ -0,0 +1,48 @@
+heat_template_version: ocata
+
+description: >
+ Do some configuration, then reboot - sometimes needed for early-boot
+ changes such as modifying kernel configuration
+
+parameters:
+ server:
+ type: string
+
+resources:
+
+ SomeConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ echo "did some config before reboot" > /root/pre-reboot-config
+
+ SomeDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ name: SomeDeployment
+ server: {get_param: server}
+ config: {get_resource: SomeConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+
+ RebootConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ # Stop os-collect-config to avoid any race collecting another
+ # deployment before reboot happens
+ systemctl stop os-collect-config.service
+ /sbin/reboot
+
+ RebootDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: SomeDeployment
+ properties:
+ name: RebootDeployment
+ server: {get_param: server}
+ config: {get_resource: RebootConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+ signal_transport: NO_SIGNAL
diff --git a/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml b/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
new file mode 100644
index 00000000..4ad53cb8
--- /dev/null
+++ b/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
@@ -0,0 +1,100 @@
+heat_template_version: ocata
+
+description: >
+ Do some configuration, then reboot - sometimes needed for early-boot
+ changes such as modifying kernel configuration
+
+parameters:
+ server:
+ type: string
+ {{role}}KernelArgs:
+ type: string
+ default: ""
+ {{role}}TunedProfileName:
+ type: string
+ default: ""
+ {{role}}HostCpusList:
+ type: string
+ default: ""
+
+conditions:
+ param_exists:
+ or:
+ - not:
+ equals:
+ - get_param: {{role}}KernelArgs
+ - ""
+ - not:
+ equals:
+ - get_param: {{role}}TunedProfileName
+ - ""
+
+resources:
+
+ HostParametersConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: param_exists
+ properties:
+ group: ansible
+ inputs:
+ - name: _KERNEL_ARGS_
+ - name: _TUNED_PROFILE_NAME_
+ - name: _HOST_CPUS_LIST_
+ outputs:
+ - name: result
+ config:
+ get_file: ansible_host_config.ansible
+
+ HostParametersDeployment:
+ type: OS::Heat::SoftwareDeployment
+ condition: param_exists
+ properties:
+ name: HostParametersDeployment
+ server: {get_param: server}
+ config: {get_resource: HostParametersConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+ input_values:
+ _KERNEL_ARGS_: {get_param: {{role}}KernelArgs}
+ _TUNED_PROFILE_NAME_: {get_param: {{role}}TunedProfileName}
+ _HOST_CPUS_LIST_: {get_param: {{role}}HostCpusList}
+
+ RebootConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: param_exists
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ # Stop os-collect-config to avoid any race collecting another
+ # deployment before reboot happens
+ systemctl stop os-collect-config.service
+ /sbin/reboot
+
+ RebootDeployment:
+ type: OS::Heat::SoftwareDeployment
+ condition: param_exists
+ depends_on: HostParametersDeployment
+ properties:
+ name: RebootDeployment
+ server: {get_param: server}
+ config: {get_resource: RebootConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+ signal_transport: NO_SIGNAL
+
+outputs:
+ result:
+ value:
+ get_attr: [HostParametersDeployment, result]
+ condition: param_exists
+ stdout:
+ value:
+ get_attr: [HostParametersDeployment, deploy_stdout]
+ condition: param_exists
+ stderr:
+ value:
+ get_attr: [HostParametersDeployment, deploy_stderr]
+ condition: param_exists
+ status_code:
+ value:
+ get_attr: [HostParametersDeployment, deploy_status_code]
+ condition: param_exists
diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh
deleted file mode 100644
index 39861826..00000000
--- a/extraconfig/tasks/major_upgrade_block_storage.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-#
-# This runs an upgrade of Cinder Block Storage nodes.
-#
-set -eu
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
index c87e6824..cf5d7a84 100644
--- a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
+++ b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Software-config for ceilometer configuration under httpd during upgrades
diff --git a/extraconfig/tasks/major_upgrade_ceph_mon.sh b/extraconfig/tasks/major_upgrade_ceph_mon.sh
deleted file mode 100755
index e0d160f1..00000000
--- a/extraconfig/tasks/major_upgrade_ceph_mon.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-set -eu
-set -o pipefail
-
-echo INFO: starting $(basename "$0")
-
-# Exit if not running
-if ! pidof ceph-mon &> /dev/null; then
- echo INFO: ceph-mon is not running, skipping
- exit 0
-fi
-
-# Exit if not Hammer
-INSTALLED_VERSION=$(ceph --version | awk '{print $3}')
-if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then
- echo INFO: version of Ceph installed is not 0.94, skipping
- exit 0
-fi
-
-CEPH_STATUS=$(ceph health | awk '{print $1}')
-if [ ${CEPH_STATUS} = HEALTH_ERR ]; then
- echo ERROR: Ceph cluster status is HEALTH_ERR, cannot be upgraded
- exit 1
-fi
-
-# Useful when upgrading with OSDs num < replica size
-if [[ ${ignore_ceph_upgrade_warnings:-False} != [Tt]rue ]]; then
- timeout 300 bash -c "while [ ${CEPH_STATUS} != HEALTH_OK ]; do
- echo WARNING: Waiting for Ceph cluster status to go HEALTH_OK;
- sleep 30;
- CEPH_STATUS=$(ceph health | awk '{print $1}')
- done"
-fi
-
-MON_PID=$(pidof ceph-mon)
-MON_ID=$(hostname -s)
-
-# Stop daemon using Hammer sysvinit script
-service ceph stop mon.${MON_ID}
-
-# Ensure it's stopped
-timeout 60 bash -c "while kill -0 ${MON_PID} 2> /dev/null; do
- sleep 2;
-done"
-
-# Update to Jewel
-yum -y -q update ceph-mon ceph
-
-# Restart/Exit if not on Jewel, only in that case we need the changes
-UPDATED_VERSION=$(ceph --version | awk '{print $3}')
-if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
- echo WARNING: Ceph was not upgraded, restarting daemons
- service ceph start mon.${MON_ID}
-elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
- # RPM could own some of these but we can't take risks on the pre-existing files
- for d in /var/lib/ceph/mon /var/log/ceph /var/run/ceph /etc/ceph; do
- chown -L -R ceph:ceph $d || echo WARNING: chown of $d failed
- done
-
- # Replay udev events with newer rules
- udevadm trigger
-
- # Enable systemd unit
- systemctl enable ceph-mon.target
- systemctl enable ceph-mon@${MON_ID}
- systemctl start ceph-mon@${MON_ID}
-
- # Wait for daemon to be back in the quorum
- timeout 300 bash -c "until (ceph quorum_status | jq .quorum_names | grep -sq ${MON_ID}); do
- echo WARNING: Waiting for mon.${MON_ID} to re-join quorum;
- sleep 10;
- done"
-
- # if tunables become legacy, cluster status will be HEALTH_WARN causing
- # upgrade to fail on following node
- ceph osd crush tunables default
-
- echo INFO: Ceph was upgraded to Jewel
-else
- echo ERROR: Ceph was upgraded to an unknown release, daemon is stopped, need manual intervention
- exit 1
-fi
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
deleted file mode 100644
index d84cad45..00000000
--- a/extraconfig/tasks/major_upgrade_ceph_storage.sh
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/bin/bash
-#
-# This delivers the ceph-storage upgrade script to be invoked as part of the tripleo
-# major upgrade workflow.
-#
-set -eu
-set -o pipefail
-
-UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
-
-cat > $UPGRADE_SCRIPT << 'ENDOFCAT'
-#!/bin/bash
-### DO NOT MODIFY THIS FILE
-### This file is automatically delivered to the ceph-storage nodes as part of the
-### tripleo upgrades workflow
-set -eu
-
-echo INFO: starting $(basename "$0")
-
-# Exit if not running
-if ! pidof ceph-osd &> /dev/null; then
- echo INFO: ceph-osd is not running, skipping
- exit 0
-fi
-
-# Exit if not Hammer
-INSTALLED_VERSION=$(ceph --version | awk '{print $3}')
-if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then
- echo INFO: version of Ceph installed is not 0.94, skipping
- exit 0
-fi
-
-OSD_PIDS=$(pidof ceph-osd)
-OSD_IDS=$(ls /var/lib/ceph/osd | awk 'BEGIN { FS = "-" } ; { print $2 }')
-
-# "so that mirrors aren't rebalanced as if the OSD died" - gfidente / leseb
-ceph osd set noout
-ceph osd set norebalance
-ceph osd set nodeep-scrub
-ceph osd set noscrub
-
-# Stop daemon using Hammer sysvinit script
-for OSD_ID in $OSD_IDS; do
- service ceph stop osd.${OSD_ID}
-done
-
-# Nice guy will return non-0 only when all failed
-timeout 60 bash -c "while kill -0 ${OSD_PIDS} 2> /dev/null; do
- sleep 2;
-done"
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
-
-# Update (Ceph to Jewel)
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y update
-
-# Restart/Exit if not on Jewel, only in that case we need the changes
-UPDATED_VERSION=$(ceph --version | awk '{print $3}')
-if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
- echo WARNING: Ceph was not upgraded, restarting daemon
- for OSD_ID in $OSD_IDS; do
- service ceph start osd.${OSD_ID}
- done
-elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
- # RPM could own some of these but we can't take risks on the pre-existing files
- for d in /var/lib/ceph/osd /var/log/ceph /var/run/ceph /etc/ceph; do
- chown -L -R ceph:ceph $d || echo WARNING: chown of $d failed
- done
-
- # Replay udev events with newer rules
- udevadm trigger && udevadm settle
-
- # If on ext4, we need to enforce lower values for name and namespace len
- # or ceph-osd will refuse to start, see: http://tracker.ceph.com/issues/16187
- for OSD_ID in $OSD_IDS; do
- OSD_FS=$(findmnt -n -o FSTYPE -T /var/lib/ceph/osd/ceph-${OSD_ID})
- if [ ${OSD_FS} = ext4 ]; then
- crudini --set /etc/ceph/ceph.conf global osd_max_object_name_len 256
- crudini --set /etc/ceph/ceph.conf global osd_max_object_namespace_len 64
- fi
- done
-
- # Enable systemd unit
- systemctl enable ceph-osd.target
- for OSD_ID in $OSD_IDS; do
- systemctl enable ceph-osd@${OSD_ID}
- systemctl start ceph-osd@${OSD_ID}
- done
-
- echo INFO: Ceph was upgraded to Jewel
-else
- echo ERROR: Ceph was upgraded to an unknown release, daemon is stopped, need manual intervention
- exit 1
-fi
-
-ceph osd unset noout
-ceph osd unset norebalance
-ceph osd unset nodeep-scrub
-ceph osd unset noscrub
-ENDOFCAT
-
-# ensure the permissions are OK
-chmod 0755 $UPGRADE_SCRIPT
diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh
deleted file mode 100644
index b0d42806..00000000
--- a/extraconfig/tasks/major_upgrade_compute.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-#
-# This delivers the compute upgrade script to be invoked as part of the tripleo
-# major upgrade workflow.
-#
-set -eu
-
-UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
-
-cat > $UPGRADE_SCRIPT << ENDOFCAT
-### DO NOT MODIFY THIS FILE
-### This file is automatically delivered to the compute nodes as part of the
-### tripleo upgrades workflow
-
-set -eu
-
-# pin nova to kilo (messaging +-1) for the nova-compute service
-
-crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y update
-
-# Due to bug#1640177 we need to restart compute agent
-echo "Restarting openstack ceilometer agent compute"
-systemctl restart openstack-ceilometer-compute
-
-ENDOFCAT
-
-# ensure the permissions are OK
-chmod 0755 $UPGRADE_SCRIPT
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
index 7cc6735f..6bfe1239 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -100,18 +100,7 @@ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
fi
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
yum -y install python-zaqarclient # needed for os-collect-config
yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
index 6748f891..a3cbd945 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
@@ -57,10 +57,10 @@ if [[ -n $(is_bootstrap_node) ]]; then
# TODO: check if this can be triggered in puppet and removed from here
ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
cinder-manage db sync
- glance-manage --config-file=/etc/glance/glance-registry.conf db_sync
+ glance-manage db_sync
heat-manage --config-file /etc/heat/heat.conf db_sync
keystone-manage db_sync
- neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+ neutron-db-manage upgrade heads
nova-manage db sync
nova-manage api_db sync
nova-manage db online_data_migrations
diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh
deleted file mode 100644
index 2667bb16..00000000
--- a/extraconfig/tasks/major_upgrade_object_storage.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/bash
-#
-# This delivers the swift-storage upgrade script to be invoked as part of the tripleo
-# major upgrade workflow.
-#
-set -eu
-
-UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
-
-cat > $UPGRADE_SCRIPT << ENDOFCAT
-### DO NOT MODIFY THIS FILE
-### This file is automatically delivered to the swift-storage nodes as part of the
-### tripleo upgrades workflow
-
-set -eu
-
-function systemctl_swift {
- action=\$1
- for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
- openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
- openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object; do
- systemctl \$action \$S
- done
-}
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
-
-systemctl_swift stop
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y update
-
-systemctl_swift start
-
-
-
-ENDOFCAT
-
-# ensure the permissions are OK
-chmod 0755 $UPGRADE_SCRIPT
-
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index b0418a56..8c91027d 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'Upgrade for Pacemaker deployments'
parameters:
@@ -33,33 +33,6 @@ resources:
# map_merge with input_values instead of feeding params into scripts
# via str_replace on bash snippets
- CephMonUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- ignore_ceph_upgrade_warnings='IGNORE_CEPH_UPGRADE_WARNINGS'
- params:
- IGNORE_CEPH_UPGRADE_WARNINGS: {get_param: IgnoreCephUpgradeWarnings}
- - get_file: major_upgrade_ceph_mon.sh
-
- CephMonUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CephMonUpgradeConfig}
- input_values: {get_param: input_values}
- update_policy:
- batch_create:
- max_batch_size: 1
- rolling_update:
- max_batch_size: 1
-
ControllerPacemakerUpgradeConfig_Step1:
type: OS::Heat::SoftwareConfig
properties:
@@ -86,26 +59,11 @@ resources:
ControllerPacemakerUpgradeDeployment_Step1:
type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CephMonUpgradeDeployment
properties:
servers: {get_param: [servers, Controller]}
config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
input_values: {get_param: input_values}
- BlockStorageUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- depends_on: ControllerPacemakerUpgradeDeployment_Step1
- properties:
- group: script
- config: {get_file: major_upgrade_block_storage.sh}
-
- BlockStorageUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, BlockStorage]}
- config: {get_resource: BlockStorageUpgradeConfig}
- input_values: {get_param: input_values}
-
ControllerPacemakerUpgradeConfig_Step2:
type: OS::Heat::SoftwareConfig
properties:
@@ -132,7 +90,7 @@ resources:
ControllerPacemakerUpgradeDeployment_Step2:
type: OS::Heat::SoftwareDeploymentGroup
- depends_on: BlockStorageUpgradeDeployment
+ depends_on: ControllerPacemakerUpgradeDeployment_Step1
properties:
servers: {get_param: [servers, Controller]}
config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
deleted file mode 100644
index f6aa3066..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-heat_template_version: 2014-10-16
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
-
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-
- UpgradeInitCommand:
- type: string
- description: |
- Command or script snippet to run on all overcloud nodes to
- initialize the upgrade process. E.g. a repository switch.
- default: ''
- UpgradeLevelNovaCompute:
- type: string
- description: Nova Compute upgrade level
- default: ''
-
-resources:
-
- # For the UpgradeInit also rename /etc/resolv.conf.save for +bug/1567004
-
- UpgradeInitConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - "#!/bin/bash\n\n"
- - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- - get_param: UpgradeInitCommand
-
- # TODO(jistr): for Mitaka->Newton upgrades and further we can use
- # map_merge with input_values instead of feeding params into scripts
- # via str_replace on bash snippets
-
- # FIXME(shardy) we have hard-coded per-role *ScriptConfig's here
- # Would be better to have a common config for all roles
- ComputeDeliverUpgradeScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - get_file: major_upgrade_compute.sh
-
- ObjectStorageDeliverUpgradeScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: major_upgrade_object_storage.sh}
-
- CephStorageDeliverUpgradeScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: major_upgrade_ceph_storage.sh}
-
-{% for role in roles %}
- UpgradeInit{{role.name}}Deployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- {% if not role.name in ['Controller', 'BlockStorage'] %}
- {{role.name}}DeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
- {% endif %}
-{% endfor %}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
index 6d02acc8..ae22a1e7 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
@@ -83,7 +83,6 @@ function services_to_migrate {
openstack-cinder-api-clone
openstack-cinder-scheduler-clone
openstack-glance-api-clone
- openstack-glance-registry-clone
openstack-gnocchi-metricd-clone
openstack-gnocchi-statsd-clone
openstack-heat-api-cfn-clone
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
index b9a87d33..45933fb7 100644
--- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
+++ b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Software-config for performing aodh data migration
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index 2c7dfc35..aae4a2de 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -297,3 +297,27 @@ function systemctl_swift {
manage_systemd_service $action $service
done
}
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+function special_case_ovs_upgrade_if_needed {
+ if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ rm -rf OVS_UPGRADE
+ mkdir OVS_UPGRADE && pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ for pkg in $(ls -1 *.rpm); do
+ if rpm -U --test $pkg 2>&1 | grep "already installed" ; then
+ echo "Looks like newer version of $pkg is already installed, skipping"
+ else
+ echo "Updating $pkg with nopostun option"
+ rpm -U --replacepkgs --nopostun $pkg
+ fi
+ done
+ popd
+ else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
+ fi
+
+}
+
diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml
index b62502f8..a63868c9 100644
--- a/extraconfig/tasks/post_puppet_pacemaker.yaml
+++ b/extraconfig/tasks/post_puppet_pacemaker.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Post-Puppet Config for Pacemaker deployments'
parameters:
diff --git a/extraconfig/tasks/post_puppet_pacemaker_restart.yaml b/extraconfig/tasks/post_puppet_pacemaker_restart.yaml
index 52760c87..475a6688 100644
--- a/extraconfig/tasks/post_puppet_pacemaker_restart.yaml
+++ b/extraconfig/tasks/post_puppet_pacemaker_restart.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Post-Puppet restart config for Pacemaker deployments'
parameters:
diff --git a/extraconfig/tasks/pre_puppet_pacemaker.yaml b/extraconfig/tasks/pre_puppet_pacemaker.yaml
index 82546588..aa7514f9 100644
--- a/extraconfig/tasks/pre_puppet_pacemaker.yaml
+++ b/extraconfig/tasks/pre_puppet_pacemaker.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Pre-Puppet Config for Pacemaker deployments'
parameters:
diff --git a/extraconfig/tasks/run_puppet.sh b/extraconfig/tasks/run_puppet.sh
new file mode 100755
index 00000000..b7771e33
--- /dev/null
+++ b/extraconfig/tasks/run_puppet.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+function run_puppet {
+ set -eux
+ local manifest="$1"
+ local role="$2"
+ local step="$3"
+ local rc=0
+
+ export FACTER_deploy_config_name="${role}Deployment_Step${step}"
+ if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then
+ set +e
+ puppet apply --detailed-exitcodes "${manifest}"
+ rc=$?
+ echo "puppet apply exited with exit code $rc"
+ else
+ echo "Step${step} doesn't exist for ${role}"
+ fi
+ set -e
+
+ if [ $rc -eq 2 -o $rc -eq 0 ]; then
+ set +xu
+ return 0
+ fi
+ set +xu
+ return $rc
+}
diff --git a/extraconfig/tasks/swift-ring-deploy.yaml b/extraconfig/tasks/swift-ring-deploy.yaml
new file mode 100644
index 00000000..d17f78ae
--- /dev/null
+++ b/extraconfig/tasks/swift-ring-deploy.yaml
@@ -0,0 +1,31 @@
+heat_template_version: ocata
+
+parameters:
+ servers:
+ type: json
+ SwiftRingGetTempurl:
+ default: ''
+ description: A temporary Swift URL to download rings from.
+ type: string
+
+resources:
+ SwiftRingDeployConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: swift_ring_get_tempurl
+ config: |
+ #!/bin/sh
+ pushd /
+ curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true
+ popd
+
+ SwiftRingDeploy:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ name: SwiftRingDeploy
+ config: {get_resource: SwiftRingDeployConfig}
+ servers: {get_param: servers}
+ input_values:
+ swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
diff --git a/extraconfig/tasks/swift-ring-update.yaml b/extraconfig/tasks/swift-ring-update.yaml
new file mode 100644
index 00000000..440c6883
--- /dev/null
+++ b/extraconfig/tasks/swift-ring-update.yaml
@@ -0,0 +1,42 @@
+heat_template_version: ocata
+
+parameters:
+ servers:
+ type: json
+ SwiftRingPutTempurl:
+ default: ''
+ description: A temporary Swift URL to upload rings to.
+ type: string
+
+resources:
+ SwiftRingUpdateConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: swift_ring_put_tempurl
+ config: |
+ #!/bin/sh
+ TMP_DATA=$(mktemp -d)
+ function cleanup {
+ rm -Rf "$TMP_DATA"
+ }
+ trap cleanup EXIT
+ # sanity check in case rings are not consistent within cluster
+ swift-recon --md5 | grep -q "doesn't match" && exit 1
+ pushd ${TMP_DATA}
+ tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/*
+ resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz`
+ popd
+ if [ "$resp" != "201" ]; then
+ exit 1
+ fi
+
+ SwiftRingUpdate:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ name: SwiftRingUpdate
+ config: {get_resource: SwiftRingUpdateConfig}
+ servers: {get_param: servers}
+ input_values:
+ swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh
new file mode 100644
index 00000000..c2565410
--- /dev/null
+++ b/extraconfig/tasks/tripleo_upgrade_node.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+#
+# This delivers the operator driven upgrade script to be invoked as part of
+# the tripleo major upgrade workflow. The utility 'upgrade-non-controller.sh'
+# is used from the undercloud to invoke the /root/tripleo_upgrade_node.sh
+#
+set -eu
+
+UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
+
+cat > $UPGRADE_SCRIPT << ENDOFCAT
+### DO NOT MODIFY THIS FILE
+### This file is automatically delivered to those nodes where the
+### disable_upgrade_deployment flag is set in roles_data.yaml.
+
+set -eu
+NOVA_COMPUTE=""
+if hiera -c /etc/puppet/hiera.yaml service_names | grep nova_compute ; then
+ NOVA_COMPUTE="true"
+fi
+SWIFT_STORAGE=""
+if hiera -c /etc/puppet/hiera.yaml service_names | grep swift_storage ; then
+ SWIFT_STORAGE="true"
+fi
+
+DEBUG="true"
+SCRIPT_NAME=$(basename $0)
+$(declare -f log_debug)
+$(declare -f manage_systemd_service)
+$(declare -f systemctl_swift)
+
+# pin nova messaging +-1 for the nova-compute service
+if [[ -n \$NOVA_COMPUTE ]]; then
+ crudini --set /etc/nova/nova.conf upgrade_levels compute auto
+fi
+
+$(declare -f special_case_ovs_upgrade_if_needed)
+special_case_ovs_upgrade_if_needed
+
+yum -y install python-zaqarclient # needed for os-collect-config
+if [[ -n \$SWIFT_STORAGE ]]; then
+ systemctl_swift stop
+fi
+yum -y update
+if [[ -n \$SWIFT_STORAGE ]]; then
+ systemctl_swift start
+fi
+# Due to bug#1640177 we need to restart compute agent
+if [[ -n \$NOVA_COMPUTE ]]; then
+ echo "Restarting openstack ceilometer agent compute"
+ systemctl restart openstack-ceilometer-compute
+fi
+
+# Apply puppet manifest to converge just right after the ${ROLE} upgrade
+$(declare -f run_puppet)
+for step in 1 2 3 4 5 6; do
+ if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then
+ echo "Puppet failure at step \${step}"
+ exit 1
+ fi
+done
+ENDOFCAT
+
+# ensure the permissions are OK
+chmod 0755 $UPGRADE_SCRIPT
+
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index 8a88ee64..c66dd01f 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -10,6 +10,11 @@
echo "Started yum_update.sh on server $deploy_server_id at `date`"
echo -n "false" > $heat_outputs_path.update_managed_packages
+if [ -f /.dockerenv ]; then
+ echo "Not running due to running inside a container"
+ exit 0
+fi
+
if [[ -z "$update_identifier" ]]; then
echo "Not running due to unset update_identifier"
exit 0
@@ -42,7 +47,7 @@ if [[ "$list_updates" == "" ]]; then
exit 0
fi
-pacemaker_status=$(systemctl is-active pacemaker)
+pacemaker_status=$(systemctl is-active pacemaker || :)
# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455
# and https://bugs.launchpad.net/tripleo/+bug/1634851
@@ -63,18 +68,7 @@ if [[ "$pacemaker_status" == "active" && \
fi
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Pacemaker running, stopping cluster node and doing full package update"
diff --git a/extraconfig/tasks/yum_update.yaml b/extraconfig/tasks/yum_update.yaml
index d313ca9f..8cff838e 100644
--- a/extraconfig/tasks/yum_update.yaml
+++ b/extraconfig/tasks/yum_update.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
Software-config for performing package updates using yum
@@ -9,7 +9,12 @@ resources:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: yum_update.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: yum_update.sh
+
inputs:
- name: update_identifier
description: yum will only run for previously unused values of update_identifier
diff --git a/extraconfig/tasks/yum_update_noop.yaml b/extraconfig/tasks/yum_update_noop.yaml
index b759d9c5..9400c1d2 100644
--- a/extraconfig/tasks/yum_update_noop.yaml
+++ b/extraconfig/tasks/yum_update_noop.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'No-op yum update task'
resources:
diff --git a/firstboot/install_vrouter_kmod.yaml b/firstboot/install_vrouter_kmod.yaml
new file mode 100644
index 00000000..e936e605
--- /dev/null
+++ b/firstboot/install_vrouter_kmod.yaml
@@ -0,0 +1,105 @@
+heat_template_version: ocata
+
+parameters:
+ ContrailRepo:
+ type: string
+ default: http://192.168.24.1/contrail
+ VrouterPhysicalInterface:
+ default: 'eth0'
+ description: vRouter physical interface
+ type: string
+
+description: >
+ Prepares vhost0 interface to be used by os-net-config
+
+resources:
+ userdata:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: {get_resource: vrouter_module_config}
+
+ vrouter_module_config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ config:
+ str_replace:
+ template: |
+ #!/bin/bash
+ sed -i '/\[main\]/a \ \ \ \ \parser = future' /etc/puppet/puppet.conf
+ cat <<EOF > /etc/yum.repos.d/contrail.repo
+ [Contrail]
+ name=Contrail Repo
+ baseurl=$contrail_repo
+ enabled=1
+ gpgcheck=0
+ protect=1
+ EOF
+ if [[ `hostname |awk -F"-" '{print $2}'` == "novacompute" || `hostname |awk -F"-" '{print $2}'` == "contrailtsn" ]]; then
+ yum install -y contrail-vrouter-utils
+ function pkt_setup () {
+ for f in /sys/class/net/$1/queues/rx-*
+ do
+ q="$(echo $f | cut -d '-' -f2)"
+ r=$(($q%32))
+ s=$(($q/32))
+ ((mask=1<<$r))
+ str=(`printf "%x" $mask`)
+ if [ $s -gt 0 ]; then
+ for ((i=0; i < $s; i++))
+ do
+ str+=,00000000
+ done
+ fi
+ echo $str > $f/rps_cpus
+ done
+ ifconfig $1 up
+ }
+ function insert_vrouter() {
+ insmod /tmp/vrouter.ko
+ if [ -f /sys/class/net/pkt1/queues/rx-0/rps_cpus ]; then
+ pkt_setup pkt1
+ fi
+ if [ -f /sys/class/net/pkt2/queues/rx-0/rps_cpus ]; then
+ pkt_setup pkt2
+ fi
+ if [ -f /sys/class/net/pkt3/queues/rx-0/rps_cpus ]; then
+ pkt_setup pkt3
+ fi
+ DEV_MAC=$(cat /sys/class/net/$phy_int/address)
+ vif --create vhost0 --mac $DEV_MAC
+ vif --add $phy_int --mac $DEV_MAC --vrf 0 --vhost-phys --type physical
+ vif --add vhost0 --mac $DEV_MAC --vrf 0 --type vhost --xconnect $phy_int
+ ip link set vhost0 up
+ return 0
+ }
+ yumdownloader contrail-vrouter --destdir /tmp
+ cd /tmp
+ rpm2cpio /tmp/contrail-vrouter*.rpm | cpio -idmv
+ cp `find /tmp/lib/modules -name vrouter.ko |tail -1` /tmp
+ insert_vrouter
+ if [[ `ifconfig $dev |grep "inet "` ]]; then
+ def_gw=''
+ if [[ `ip route show |grep default|grep $dev` ]]; then
+ def_gw=`ip route show |grep default|grep $dev|awk '{print $3}'`
+ fi
+ ip=`ifconfig $dev |grep "inet "|awk '{print $2}'`
+ mask=`ifconfig $dev |grep "inet "|awk '{print $4}'`
+ ip address delete $ip/$mask dev $dev
+ ip address add $ip/$mask dev vhost0
+ if [[ $def_gw ]]; then
+ ip route add default via $def_gw
+ fi
+ fi
+ fi
+ params:
+ $phy_int: {get_param: VrouterPhysicalInterface}
+ $contrail_repo: {get_param: ContrailRepo}
+
+outputs:
+ # This means get_resource from the parent template will get the userdata, see:
+ # http://docs.openstack.org/developer/heat/template_guide/composition.html#making-your-template-resource-more-transparent
+ # Note this is new-for-kilo, an alternative is returning a value then using
+ # get_attr in the parent template instead.
+ OS::stack_id:
+ value: {get_resource: userdata}
diff --git a/firstboot/os-net-config-mappings.yaml b/firstboot/os-net-config-mappings.yaml
index caf18421..f82bc19f 100644
--- a/firstboot/os-net-config-mappings.yaml
+++ b/firstboot/os-net-config-mappings.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Configure os-net-config mappings for specific nodes
diff --git a/firstboot/userdata_default.yaml b/firstboot/userdata_default.yaml
index 140d2bf8..bc379f4c 100644
--- a/firstboot/userdata_default.yaml
+++ b/firstboot/userdata_default.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
This is a default no-op template which provides empty user-data
diff --git a/firstboot/userdata_dev_rsync.yaml b/firstboot/userdata_dev_rsync.yaml
index 7dc7bd4d..d412b93a 100644
--- a/firstboot/userdata_dev_rsync.yaml
+++ b/firstboot/userdata_dev_rsync.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: >
This is first boot configuration for development purposes. It allows
diff --git a/firstboot/userdata_example.yaml b/firstboot/userdata_example.yaml
index a0d8c7ac..a352093f 100644
--- a/firstboot/userdata_example.yaml
+++ b/firstboot/userdata_example.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
# NOTE: You don't need to pass the parameter explicitly from the
# parent template, it can be specified via the parameter_defaults
diff --git a/firstboot/userdata_heat_admin.yaml b/firstboot/userdata_heat_admin.yaml
index 63d5bbf8..ed8302dc 100644
--- a/firstboot/userdata_heat_admin.yaml
+++ b/firstboot/userdata_heat_admin.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
parameters:
# Can be overridden via parameter_defaults in the environment
diff --git a/firstboot/userdata_root_password.yaml b/firstboot/userdata_root_password.yaml
new file mode 100644
index 00000000..63dd5a9c
--- /dev/null
+++ b/firstboot/userdata_root_password.yaml
@@ -0,0 +1,38 @@
+heat_template_version: ocata
+
+description: >
+ Uses cloud-init to enable root logins and set the root password.
+ Note this is less secure than the default configuration and may not be
+ appropriate for production environments, it's intended for illustration
+ and development/debugging only.
+
+parameters:
+ NodeRootPassword:
+ description: Root password for the nodes
+ hidden: true
+ type: string
+
+resources:
+ userdata:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: {get_resource: root_config}
+
+ root_config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ ssh_pwauth: true
+ disable_root: false
+ chpasswd:
+ list:
+ str_replace:
+ template: "root:PASSWORD"
+ params:
+ PASSWORD: {get_param: NodeRootPassword}
+ expire: False
+
+outputs:
+ OS::stack_id:
+ value: {get_resource: userdata}
diff --git a/hosts-config.yaml b/hosts-config.yaml
index a24b9bb4..5a211716 100644
--- a/hosts-config.yaml
+++ b/hosts-config.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'All Hosts Config'
parameters:
diff --git a/net-config-bond.yaml b/net-config-bond.yaml
index db6ff2c7..3ae09c98 100644
--- a/net-config-bond.yaml
+++ b/net-config-bond.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge.
parameters:
diff --git a/net-config-bridge.yaml b/net-config-bridge.yaml
index e7b96695..10d53880 100644
--- a/net-config-bridge.yaml
+++ b/net-config-bridge.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config for a simple bridge.
parameters:
diff --git a/net-config-linux-bridge.yaml b/net-config-linux-bridge.yaml
index d8274f3c..04664818 100644
--- a/net-config-linux-bridge.yaml
+++ b/net-config-linux-bridge.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config for a simple bridge.
parameters:
diff --git a/net-config-noop.yaml b/net-config-noop.yaml
index 94c492c6..be05cc11 100644
--- a/net-config-noop.yaml
+++ b/net-config-noop.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Software Config to no-op for os-net-config. Using this will allow you
@@ -38,8 +38,8 @@ resources:
OsNetConfigImpl:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
- config:
+ group: apply-config
+ config: {}
outputs:
OS::stack_id:
diff --git a/net-config-static-bridge-with-external-dhcp.yaml b/net-config-static-bridge-with-external-dhcp.yaml
index a1d86728..12374a28 100644
--- a/net-config-static-bridge-with-external-dhcp.yaml
+++ b/net-config-static-bridge-with-external-dhcp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
parameters:
diff --git a/net-config-static-bridge.yaml b/net-config-static-bridge.yaml
index 1e1498b3..50e541be 100644
--- a/net-config-static-bridge.yaml
+++ b/net-config-static-bridge.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
parameters:
diff --git a/net-config-static.yaml b/net-config-static.yaml
index c67b4e99..a52e22ba 100644
--- a/net-config-static.yaml
+++ b/net-config-static.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config for a simple bridge.
parameters:
diff --git a/net-config-undercloud.yaml b/net-config-undercloud.yaml
new file mode 100644
index 00000000..9be51c0f
--- /dev/null
+++ b/net-config-undercloud.yaml
@@ -0,0 +1,77 @@
+heat_template_version: ocata
+description: >
+ Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet:
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: disable_configure_safe_defaults
+ default: true
+ config:
+ str_replace:
+ template:
+ get_file: network/scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: ovs_bridge
+ name: br-ctlplane
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
+ list_join:
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ members:
+ - type: interface
+ name: eth1
+ # force the MAC address of the bridge to this interface
+ primary: true
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/bond-with-vlans/ceph-storage.yaml b/network/config/bond-with-vlans/ceph-storage.yaml
index 2f92f4b5..703fea08 100644
--- a/network/config/bond-with-vlans/ceph-storage.yaml
+++ b/network/config/bond-with-vlans/ceph-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the ceph storage role.
parameters:
diff --git a/network/config/bond-with-vlans/cinder-storage.yaml b/network/config/bond-with-vlans/cinder-storage.yaml
index 0e53e202..df15cd63 100644
--- a/network/config/bond-with-vlans/cinder-storage.yaml
+++ b/network/config/bond-with-vlans/cinder-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the cinder storage role.
parameters:
diff --git a/network/config/bond-with-vlans/compute-dpdk.yaml b/network/config/bond-with-vlans/compute-dpdk.yaml
index a9b314a4..4677241b 100644
--- a/network/config/bond-with-vlans/compute-dpdk.yaml
+++ b/network/config/bond-with-vlans/compute-dpdk.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the compute role.
parameters:
diff --git a/network/config/bond-with-vlans/compute.yaml b/network/config/bond-with-vlans/compute.yaml
index 4cac448b..f9c926d3 100644
--- a/network/config/bond-with-vlans/compute.yaml
+++ b/network/config/bond-with-vlans/compute.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the compute role.
parameters:
diff --git a/network/config/bond-with-vlans/controller-no-external.yaml b/network/config/bond-with-vlans/controller-no-external.yaml
index 46090974..ce1e8654 100644
--- a/network/config/bond-with-vlans/controller-no-external.yaml
+++ b/network/config/bond-with-vlans/controller-no-external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role.
parameters:
diff --git a/network/config/bond-with-vlans/controller-v6.yaml b/network/config/bond-with-vlans/controller-v6.yaml
index d07a26ff..bb4ac274 100644
--- a/network/config/bond-with-vlans/controller-v6.yaml
+++ b/network/config/bond-with-vlans/controller-v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role with IPv6
on the External network. The IPv6 default route is on the External network, and the IPv4 default route is on the Control
diff --git a/network/config/bond-with-vlans/controller.yaml b/network/config/bond-with-vlans/controller.yaml
index e2973a72..91515385 100644
--- a/network/config/bond-with-vlans/controller.yaml
+++ b/network/config/bond-with-vlans/controller.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role.
parameters:
diff --git a/network/config/bond-with-vlans/swift-storage.yaml b/network/config/bond-with-vlans/swift-storage.yaml
index 5bdba802..6d4e3681 100644
--- a/network/config/bond-with-vlans/swift-storage.yaml
+++ b/network/config/bond-with-vlans/swift-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the swift storage role.
parameters:
diff --git a/network/config/multiple-nics/ceph-storage.yaml b/network/config/multiple-nics/ceph-storage.yaml
index e9c34213..6a788063 100644
--- a/network/config/multiple-nics/ceph-storage.yaml
+++ b/network/config/multiple-nics/ceph-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure multiple interfaces for the ceph storage role.
parameters:
diff --git a/network/config/multiple-nics/cinder-storage.yaml b/network/config/multiple-nics/cinder-storage.yaml
index f58f1168..d2384445 100644
--- a/network/config/multiple-nics/cinder-storage.yaml
+++ b/network/config/multiple-nics/cinder-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure multiple interfaces for the cinder storage role.
parameters:
diff --git a/network/config/multiple-nics/compute-dvr.yaml b/network/config/multiple-nics/compute-dvr.yaml
new file mode 100644
index 00000000..abfd323f
--- /dev/null
+++ b/network/config/multiple-nics/compute-dvr.yaml
@@ -0,0 +1,162 @@
+heat_template_version: ocata
+description: >
+ Software Config to drive os-net-config to configure multiple interfaces for the
+ compute role with external bridge for DVR.
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
+ ControlPlaneSubnetCidr: # Override this via parameter_defaults
+ default: '24'
+ description: The subnet CIDR of the control plane network.
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ ExternalInterfaceDefaultRoute: # Not used by default in this template
+ default: 10.0.0.1
+ description: The default route of the external network.
+ type: string
+ ManagementInterfaceDefaultRoute: # Commented out by default in this template
+ default: unset
+ description: The default route of the management network.
+ type: string
+ DnsServers: # Override this via parameter_defaults
+ default: []
+ description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
+ type: comma_delimited_list
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ str_replace:
+ template:
+ get_file: ../../scripts/run-os-net-config.sh
+ params:
+ $network_config:
+ network_config:
+ - type: interface
+ name: nic1
+ use_dhcp: false
+ dns_servers:
+ get_param: DnsServers
+ addresses:
+ - ip_netmask:
+ list_join:
+ - /
+ - - get_param: ControlPlaneIp
+ - get_param: ControlPlaneSubnetCidr
+ routes:
+ - ip_netmask: 169.254.169.254/32
+ next_hop:
+ get_param: EC2MetadataIp
+ - default: true
+ next_hop:
+ get_param: ControlPlaneDefaultRoute
+ - type: interface
+ name: nic2
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: StorageIpSubnet
+ - type: interface
+ name: nic4
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: InternalApiIpSubnet
+ - type: ovs_bridge
+ name: br-tenant
+ use_dhcp: false
+ addresses:
+ - ip_netmask:
+ get_param: TenantIpSubnet
+ members:
+ - type: interface
+ name: nic5
+ use_dhcp: false
+ primary: true
+ # External bridge for DVR (no IP address required)
+ - type: ovs_bridge
+ name: bridge_name
+ dns_servers:
+ get_param: DnsServers
+ use_dhcp: false
+ members:
+ - type: interface
+ name: nic6
+ primary: true
+ # Uncomment when including environments/network-management.yaml
+ # If setting default route on the Management interface, comment
+ # out the default route on the Control Plane.
+ #-
+ # type: interface
+ # name: nic7
+ # use_dhcp: false
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
+ # routes:
+ # -
+ # default: true
+ # next_hop: {get_param: ManagementInterfaceDefaultRoute}
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value:
+ get_resource: OsNetConfigImpl
+
diff --git a/network/config/multiple-nics/compute.yaml b/network/config/multiple-nics/compute.yaml
index 9b0c8c02..101a08d3 100644
--- a/network/config/multiple-nics/compute.yaml
+++ b/network/config/multiple-nics/compute.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure multiple interfaces for the compute role.
parameters:
diff --git a/network/config/multiple-nics/controller-v6.yaml b/network/config/multiple-nics/controller-v6.yaml
index a0ed9f78..4fae435a 100644
--- a/network/config/multiple-nics/controller-v6.yaml
+++ b/network/config/multiple-nics/controller-v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure multiple interfaces for the controller role with IPv6 on the External
network. The IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
diff --git a/network/config/multiple-nics/controller.yaml b/network/config/multiple-nics/controller.yaml
index e38c545c..ba9f8fd4 100644
--- a/network/config/multiple-nics/controller.yaml
+++ b/network/config/multiple-nics/controller.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure multiple interfaces for the controller role.
parameters:
diff --git a/network/config/multiple-nics/swift-storage.yaml b/network/config/multiple-nics/swift-storage.yaml
index 1ad503a7..4019012a 100644
--- a/network/config/multiple-nics/swift-storage.yaml
+++ b/network/config/multiple-nics/swift-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure multiple interfaces for the swift storage role.
parameters:
diff --git a/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml b/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
index 0a6faa79..448df69c 100644
--- a/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the ceph storage role.
parameters:
diff --git a/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml b/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
index 5abaea66..465555d3 100644
--- a/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the cinder storage role.
parameters:
diff --git a/network/config/single-nic-linux-bridge-vlans/compute.yaml b/network/config/single-nic-linux-bridge-vlans/compute.yaml
index aa63dd3a..a21bc8f9 100644
--- a/network/config/single-nic-linux-bridge-vlans/compute.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/compute.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the compute role.
parameters:
diff --git a/network/config/single-nic-linux-bridge-vlans/controller-v6.yaml b/network/config/single-nic-linux-bridge-vlans/controller-v6.yaml
index 28cf6ced..bb8bb9c2 100644
--- a/network/config/single-nic-linux-bridge-vlans/controller-v6.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/controller-v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the controller role with IPv6 on the External network. The
IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
diff --git a/network/config/single-nic-linux-bridge-vlans/controller.yaml b/network/config/single-nic-linux-bridge-vlans/controller.yaml
index 566f1feb..a9689ce9 100644
--- a/network/config/single-nic-linux-bridge-vlans/controller.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/controller.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the controller role.
parameters:
diff --git a/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml b/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
index fe948ad1..c8e4db29 100644
--- a/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
+++ b/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the swift storage role.
parameters:
diff --git a/network/config/single-nic-vlans/ceph-storage.yaml b/network/config/single-nic-vlans/ceph-storage.yaml
index 6e0a97da..0b5eb0c9 100644
--- a/network/config/single-nic-vlans/ceph-storage.yaml
+++ b/network/config/single-nic-vlans/ceph-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the ceph storage role.
parameters:
diff --git a/network/config/single-nic-vlans/cinder-storage.yaml b/network/config/single-nic-vlans/cinder-storage.yaml
index f58665f7..882d6ebc 100644
--- a/network/config/single-nic-vlans/cinder-storage.yaml
+++ b/network/config/single-nic-vlans/cinder-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the cinder storage role.
parameters:
diff --git a/network/config/single-nic-vlans/compute.yaml b/network/config/single-nic-vlans/compute.yaml
index 40264284..42cfd781 100644
--- a/network/config/single-nic-vlans/compute.yaml
+++ b/network/config/single-nic-vlans/compute.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the compute role.
parameters:
diff --git a/network/config/single-nic-vlans/controller-no-external.yaml b/network/config/single-nic-vlans/controller-no-external.yaml
index b9aec1ea..9e0680ea 100644
--- a/network/config/single-nic-vlans/controller-no-external.yaml
+++ b/network/config/single-nic-vlans/controller-no-external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the controller role. No external IP is configured.
parameters:
diff --git a/network/config/single-nic-vlans/controller-v6.yaml b/network/config/single-nic-vlans/controller-v6.yaml
index 4f065d1e..1f9a67d6 100644
--- a/network/config/single-nic-vlans/controller-v6.yaml
+++ b/network/config/single-nic-vlans/controller-v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the controller role with IPv6 on the External network. The
IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
diff --git a/network/config/single-nic-vlans/controller.yaml b/network/config/single-nic-vlans/controller.yaml
index 4a615d91..4ac18315 100644
--- a/network/config/single-nic-vlans/controller.yaml
+++ b/network/config/single-nic-vlans/controller.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the controller role.
parameters:
diff --git a/network/config/single-nic-vlans/swift-storage.yaml b/network/config/single-nic-vlans/swift-storage.yaml
index 88f69b4d..605b8ee4 100644
--- a/network/config/single-nic-vlans/swift-storage.yaml
+++ b/network/config/single-nic-vlans/swift-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: '2016-10-14'
+heat_template_version: ocata
description: >
Software Config to drive os-net-config to configure VLANs for the swift storage role.
parameters:
diff --git a/network/endpoints/build_endpoint_map.py b/network/endpoints/build_endpoint_map.py
index 964f58f7..990cbabc 100755
--- a/network/endpoints/build_endpoint_map.py
+++ b/network/endpoints/build_endpoint_map.py
@@ -191,7 +191,7 @@ def template_endpoint_items(config):
def generate_endpoint_map_template(config):
return collections.OrderedDict([
- ('heat_template_version', '2015-04-30'),
+ ('heat_template_version', 'ocata'),
('description', 'A map of OpenStack endpoints. Since the endpoints '
'are URLs, we need to have brackets around IPv6 IP addresses. The '
'inputs to these parameters come from net_ip_uri_map, which will '
@@ -280,8 +280,9 @@ def main():
try:
if options.check:
if not check_up_to_date(options.output_file, options.input_file):
- print('EndpointMap template does not match input data',
- file=sys.stderr)
+ print('EndpointMap template does not match input data. Please '
+ 'run the build_endpoint_map.py tool to update the '
+ 'template.', file=sys.stderr)
sys.exit(2)
else:
build_endpoint_map(options.output_file, options.input_file)
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
index 0178c4dd..277bd676 100644
--- a/network/endpoints/endpoint_data.yaml
+++ b/network/endpoints/endpoint_data.yaml
@@ -28,6 +28,96 @@ Ceilometer:
net_param: CeilometerApi
port: 8777
+ContrailConfig:
+ Internal:
+ net_param: ContrailConfig
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ContrailConfig
+ port: 8082
+
+ContrailDiscovery:
+ Internal:
+ net_param: ContrailConfig
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ContrailConfig
+ port: 5998
+
+ContrailAnalyticsCollectorHttp:
+ Internal:
+ net_param: ContrailAnalytics
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ContrailAnalytics
+ port: 8089
+
+ContrailAnalyticsApi:
+ Internal:
+ net_param: ContrailAnalytics
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ContrailAnalytics
+ port: 8081
+
+ContrailAnalyticsHttp:
+ Internal:
+ net_param: ContrailAnalytics
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ContrailAnalytics
+ port: 8090
+
+ContrailAnalyticsCollectorSandesh:
+ Internal:
+ net_param: ContrailAnalytics
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ContrailAnalytics
+ port: 8086
+
+ContrailAnalyticsRedis:
+ Internal:
+ net_param: ContrailAnalytics
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ContrailAnalytics
+ port: 6379
+
+ContrailWebuiHttp:
+ Internal:
+ net_param: ContrailConfig
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ContrailConfig
+ port: 8080
+
+ContrailWebuiHttps:
+ Internal:
+ net_param: ContrailConfig
+ Public:
+ net_param: Public
+ Admin:
+ net_param: ContrailConfig
+ port: 8143
+
+Ec2Api:
+ Internal:
+ net_param: Ec2Api
+ Public:
+ net_param: Public
+ Admin:
+ net_param: Ec2Api
+ port: 8788
+
Gnocchi:
Internal:
net_param: GnocchiApi
@@ -67,6 +157,15 @@ Cinder:
V3: /v3/%(tenant_id)s
port: 8776
+Congress:
+ Internal:
+ net_param: CongressApi
+ Public:
+ net_param: Public
+ Admin:
+ net_param: CongressApi
+ port: 1789
+
Glance:
Internal:
net_param: GlanceApi
@@ -76,11 +175,6 @@ Glance:
net_param: GlanceApi
port: 9292
-GlanceRegistry:
- Internal:
- net_param: GlanceRegistry
- port: 9191
-
Mysql:
Internal:
net_param: Mysql
@@ -205,6 +299,21 @@ Nova:
'': /v2.1
port: 8774
+NovaPlacement:
+ Internal:
+ net_param: NovaPlacement
+ uri_suffixes:
+ '': /placement
+ Public:
+ net_param: Public
+ uri_suffixes:
+ '': /placement
+ Admin:
+ net_param: NovaPlacement
+ uri_suffixes:
+ '': /placement
+ port: 8778
+
NovaVNCProxy:
Internal:
net_param: NovaApi
@@ -262,6 +371,15 @@ Sahara:
'': /v1.1/%(tenant_id)s
port: 8386
+Tacker:
+ Internal:
+ net_param: TackerApi
+ Public:
+ net_param: Public
+ Admin:
+ net_param: TackerApi
+ port: 9890
+
Ironic:
Internal:
net_param: IronicApi
@@ -294,3 +412,13 @@ ZaqarWebSocket:
Admin:
net_param: ZaqarApi
port: 9000
+ protocol: ws
+
+Octavia:
+ Internal:
+ net_param: OctaviaApi
+ Public:
+ net_param: Public
+ Admin:
+ net_param: OctaviaApi
+ port: 9876
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index 7ebb318f..fecac0af 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -2,7 +2,7 @@
### This file is automatically generated from endpoint_data.yaml
### by the script build_endpoint_map.py
-heat_template_version: '2015-04-30'
+heat_template_version: ocata
description: A map of OpenStack endpoints. Since the endpoints are URLs,
we need to have brackets around IPv6 IP addresses. The inputs to these
parameters come from net_ip_uri_map, which will include these brackets
@@ -34,10 +34,48 @@ parameters:
CinderAdmin: {protocol: http, port: '8776', host: IP_ADDRESS}
CinderInternal: {protocol: http, port: '8776', host: IP_ADDRESS}
CinderPublic: {protocol: http, port: '8776', host: IP_ADDRESS}
+ CongressAdmin: {protocol: http, port: '1789', host: IP_ADDRESS}
+ CongressInternal: {protocol: http, port: '1789', host: IP_ADDRESS}
+ CongressPublic: {protocol: http, port: '1789', host: IP_ADDRESS}
+ ContrailAnalyticsApiAdmin: {protocol: http, port: '8081', host: IP_ADDRESS}
+ ContrailAnalyticsApiInternal: {protocol: http, port: '8081', host: IP_ADDRESS}
+ ContrailAnalyticsApiPublic: {protocol: http, port: '8081', host: IP_ADDRESS}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: http, port: '8089',
+ host: IP_ADDRESS}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: http, port: '8089',
+ host: IP_ADDRESS}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: http, port: '8089',
+ host: IP_ADDRESS}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: http, port: '8086',
+ host: IP_ADDRESS}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: http, port: '8086',
+ host: IP_ADDRESS}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: http, port: '8086',
+ host: IP_ADDRESS}
+ ContrailAnalyticsHttpAdmin: {protocol: http, port: '8090', host: IP_ADDRESS}
+ ContrailAnalyticsHttpInternal: {protocol: http, port: '8090', host: IP_ADDRESS}
+ ContrailAnalyticsHttpPublic: {protocol: http, port: '8090', host: IP_ADDRESS}
+ ContrailAnalyticsRedisAdmin: {protocol: http, port: '6379', host: IP_ADDRESS}
+ ContrailAnalyticsRedisInternal: {protocol: http, port: '6379', host: IP_ADDRESS}
+ ContrailAnalyticsRedisPublic: {protocol: http, port: '6379', host: IP_ADDRESS}
+ ContrailConfigAdmin: {protocol: http, port: '8082', host: IP_ADDRESS}
+ ContrailConfigInternal: {protocol: http, port: '8082', host: IP_ADDRESS}
+ ContrailConfigPublic: {protocol: http, port: '8082', host: IP_ADDRESS}
+ ContrailDiscoveryAdmin: {protocol: http, port: '5998', host: IP_ADDRESS}
+ ContrailDiscoveryInternal: {protocol: http, port: '5998', host: IP_ADDRESS}
+ ContrailDiscoveryPublic: {protocol: http, port: '5998', host: IP_ADDRESS}
+ ContrailWebuiHttpAdmin: {protocol: http, port: '8080', host: IP_ADDRESS}
+ ContrailWebuiHttpInternal: {protocol: http, port: '8080', host: IP_ADDRESS}
+ ContrailWebuiHttpPublic: {protocol: http, port: '8080', host: IP_ADDRESS}
+ ContrailWebuiHttpsAdmin: {protocol: http, port: '8143', host: IP_ADDRESS}
+ ContrailWebuiHttpsInternal: {protocol: http, port: '8143', host: IP_ADDRESS}
+ ContrailWebuiHttpsPublic: {protocol: http, port: '8143', host: IP_ADDRESS}
+ Ec2ApiAdmin: {protocol: http, port: '8788', host: IP_ADDRESS}
+ Ec2ApiInternal: {protocol: http, port: '8788', host: IP_ADDRESS}
+ Ec2ApiPublic: {protocol: http, port: '8788', host: IP_ADDRESS}
GlanceAdmin: {protocol: http, port: '9292', host: IP_ADDRESS}
GlanceInternal: {protocol: http, port: '9292', host: IP_ADDRESS}
GlancePublic: {protocol: http, port: '9292', host: IP_ADDRESS}
- GlanceRegistryInternal: {protocol: http, port: '9191', host: IP_ADDRESS}
GnocchiAdmin: {protocol: http, port: '8041', host: IP_ADDRESS}
GnocchiInternal: {protocol: http, port: '8041', host: IP_ADDRESS}
GnocchiPublic: {protocol: http, port: '8041', host: IP_ADDRESS}
@@ -67,9 +105,15 @@ parameters:
NovaAdmin: {protocol: http, port: '8774', host: IP_ADDRESS}
NovaInternal: {protocol: http, port: '8774', host: IP_ADDRESS}
NovaPublic: {protocol: http, port: '8774', host: IP_ADDRESS}
+ NovaPlacementAdmin: {protocol: http, port: '8778', host: IP_ADDRESS}
+ NovaPlacementInternal: {protocol: http, port: '8778', host: IP_ADDRESS}
+ NovaPlacementPublic: {protocol: http, port: '8778', host: IP_ADDRESS}
NovaVNCProxyAdmin: {protocol: http, port: '6080', host: IP_ADDRESS}
NovaVNCProxyInternal: {protocol: http, port: '6080', host: IP_ADDRESS}
NovaVNCProxyPublic: {protocol: http, port: '6080', host: IP_ADDRESS}
+ OctaviaAdmin: {protocol: http, port: '9876', host: IP_ADDRESS}
+ OctaviaInternal: {protocol: http, port: '9876', host: IP_ADDRESS}
+ OctaviaPublic: {protocol: http, port: '9876', host: IP_ADDRESS}
PankoAdmin: {protocol: http, port: '8779', host: IP_ADDRESS}
PankoInternal: {protocol: http, port: '8779', host: IP_ADDRESS}
PankoPublic: {protocol: http, port: '8779', host: IP_ADDRESS}
@@ -79,12 +123,15 @@ parameters:
SwiftAdmin: {protocol: http, port: '8080', host: IP_ADDRESS}
SwiftInternal: {protocol: http, port: '8080', host: IP_ADDRESS}
SwiftPublic: {protocol: http, port: '8080', host: IP_ADDRESS}
+ TackerAdmin: {protocol: http, port: '9890', host: IP_ADDRESS}
+ TackerInternal: {protocol: http, port: '9890', host: IP_ADDRESS}
+ TackerPublic: {protocol: http, port: '9890', host: IP_ADDRESS}
ZaqarAdmin: {protocol: http, port: '8888', host: IP_ADDRESS}
ZaqarInternal: {protocol: http, port: '8888', host: IP_ADDRESS}
ZaqarPublic: {protocol: http, port: '8888', host: IP_ADDRESS}
- ZaqarWebSocketAdmin: {protocol: http, port: '9000', host: IP_ADDRESS}
- ZaqarWebSocketInternal: {protocol: http, port: '9000', host: IP_ADDRESS}
- ZaqarWebSocketPublic: {protocol: http, port: '9000', host: IP_ADDRESS}
+ ZaqarWebSocketAdmin: {protocol: ws, port: '9000', host: IP_ADDRESS}
+ ZaqarWebSocketInternal: {protocol: ws, port: '9000', host: IP_ADDRESS}
+ ZaqarWebSocketPublic: {protocol: ws, port: '9000', host: IP_ADDRESS}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
CloudEndpoints:
@@ -1808,173 +1855,173 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, CinderPublic, port]
- GlanceAdmin:
+ CongressAdmin:
host:
str_replace:
template:
- get_param: [EndpointMap, GlanceAdmin, host]
+ get_param: [EndpointMap, CongressAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, CongressApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, CongressApiNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
- get_param: [EndpointMap, GlanceAdmin, host]
+ get_param: [EndpointMap, CongressAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, CongressApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, CongressApiNetwork]
port:
- get_param: [EndpointMap, GlanceAdmin, port]
+ get_param: [EndpointMap, CongressAdmin, port]
protocol:
- get_param: [EndpointMap, GlanceAdmin, protocol]
+ get_param: [EndpointMap, CongressAdmin, protocol]
uri:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceAdmin, protocol]
+ - - get_param: [EndpointMap, CongressAdmin, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceAdmin, host]
+ get_param: [EndpointMap, CongressAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, CongressApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, CongressApiNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceAdmin, port]
+ - get_param: [EndpointMap, CongressAdmin, port]
uri_no_suffix:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceAdmin, protocol]
+ - - get_param: [EndpointMap, CongressAdmin, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceAdmin, host]
+ get_param: [EndpointMap, CongressAdmin, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, CongressApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, CongressApiNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceAdmin, port]
- GlanceInternal:
+ - get_param: [EndpointMap, CongressAdmin, port]
+ CongressInternal:
host:
str_replace:
template:
- get_param: [EndpointMap, GlanceInternal, host]
+ get_param: [EndpointMap, CongressInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, CongressApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, CongressApiNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
- get_param: [EndpointMap, GlanceInternal, host]
+ get_param: [EndpointMap, CongressInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, CongressApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, CongressApiNetwork]
port:
- get_param: [EndpointMap, GlanceInternal, port]
+ get_param: [EndpointMap, CongressInternal, port]
protocol:
- get_param: [EndpointMap, GlanceInternal, protocol]
+ get_param: [EndpointMap, CongressInternal, protocol]
uri:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceInternal, protocol]
+ - - get_param: [EndpointMap, CongressInternal, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceInternal, host]
+ get_param: [EndpointMap, CongressInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, CongressApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, CongressApiNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceInternal, port]
+ - get_param: [EndpointMap, CongressInternal, port]
uri_no_suffix:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceInternal, protocol]
+ - - get_param: [EndpointMap, CongressInternal, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceInternal, host]
+ get_param: [EndpointMap, CongressInternal, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceApiNetwork]
+ - get_param: [ServiceNetMap, CongressApiNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceApiNetwork]
+ get_param: [ServiceNetMap, CongressApiNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceInternal, port]
- GlancePublic:
+ - get_param: [EndpointMap, CongressInternal, port]
+ CongressPublic:
host:
str_replace:
template:
- get_param: [EndpointMap, GlancePublic, host]
+ get_param: [EndpointMap, CongressPublic, host]
params:
CLOUDNAME:
get_param:
@@ -1991,7 +2038,7 @@ outputs:
host_nobrackets:
str_replace:
template:
- get_param: [EndpointMap, GlancePublic, host]
+ get_param: [EndpointMap, CongressPublic, host]
params:
CLOUDNAME:
get_param:
@@ -2002,17 +2049,17 @@ outputs:
- NetIpMap
- get_param: [ServiceNetMap, PublicNetwork]
port:
- get_param: [EndpointMap, GlancePublic, port]
+ get_param: [EndpointMap, CongressPublic, port]
protocol:
- get_param: [EndpointMap, GlancePublic, protocol]
+ get_param: [EndpointMap, CongressPublic, protocol]
uri:
list_join:
- ''
- - - get_param: [EndpointMap, GlancePublic, protocol]
+ - - get_param: [EndpointMap, CongressPublic, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlancePublic, host]
+ get_param: [EndpointMap, CongressPublic, host]
params:
CLOUDNAME:
get_param:
@@ -2027,15 +2074,15 @@ outputs:
get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlancePublic, port]
+ - get_param: [EndpointMap, CongressPublic, port]
uri_no_suffix:
list_join:
- ''
- - - get_param: [EndpointMap, GlancePublic, protocol]
+ - - get_param: [EndpointMap, CongressPublic, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlancePublic, host]
+ get_param: [EndpointMap, CongressPublic, host]
params:
CLOUDNAME:
get_param:
@@ -2050,88 +2097,2776 @@ outputs:
get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlancePublic, port]
- GlanceRegistryInternal:
+ - get_param: [EndpointMap, CongressPublic, port]
+ ContrailAnalyticsApiAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsApiAdmin, port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsApiAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsApiAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsApiAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsApiAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsApiAdmin, port]
+ ContrailAnalyticsApiInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsApiInternal, port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsApiInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsApiInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsApiInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsApiInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsApiInternal, port]
+ ContrailAnalyticsApiPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsApiPublic, port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsApiPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsApiPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsApiPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsApiPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsApiPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsApiPublic, port]
+ ContrailAnalyticsCollectorHttpAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin,
+ port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin,
+ protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin,
+ port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin,
+ port]
+ ContrailAnalyticsCollectorHttpInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal,
+ port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal,
+ protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal,
+ port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal,
+ port]
+ ContrailAnalyticsCollectorHttpPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic,
+ port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic,
+ protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic,
+ port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic,
+ port]
+ ContrailAnalyticsCollectorSandeshAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin,
+ port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin,
+ protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin,
+ port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin,
+ port]
+ ContrailAnalyticsCollectorSandeshInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal,
+ port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal,
+ protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal,
+ port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal,
+ port]
+ ContrailAnalyticsCollectorSandeshPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic,
+ port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic,
+ protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic,
+ port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic,
+ port]
+ ContrailAnalyticsHttpAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, port]
+ ContrailAnalyticsHttpInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsHttpInternal, port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsHttpInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsHttpInternal,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsHttpInternal,
+ port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsHttpInternal,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsHttpInternal,
+ port]
+ ContrailAnalyticsHttpPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsHttpPublic, port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsHttpPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsHttpPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsHttpPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsHttpPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsHttpPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsHttpPublic, port]
+ ContrailAnalyticsRedisAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisAdmin,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, port]
+ ContrailAnalyticsRedisInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsRedisInternal, port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsRedisInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsRedisInternal,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsRedisInternal,
+ port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsRedisInternal,
+ protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailAnalyticsNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsRedisInternal,
+ port]
+ ContrailAnalyticsRedisPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ContrailAnalyticsRedisPublic, port]
+ protocol:
+ get_param: [EndpointMap, ContrailAnalyticsRedisPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsRedisPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsRedisPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailAnalyticsRedisPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailAnalyticsRedisPublic,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailAnalyticsRedisPublic, port]
+ ContrailConfigAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ port:
+ get_param: [EndpointMap, ContrailConfigAdmin, port]
+ protocol:
+ get_param: [EndpointMap, ContrailConfigAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailConfigAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailConfigAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailConfigAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailConfigAdmin, port]
+ ContrailConfigInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ port:
+ get_param: [EndpointMap, ContrailConfigInternal, port]
+ protocol:
+ get_param: [EndpointMap, ContrailConfigInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailConfigInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailConfigInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailConfigInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailConfigInternal, port]
+ ContrailConfigPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ContrailConfigPublic, port]
+ protocol:
+ get_param: [EndpointMap, ContrailConfigPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailConfigPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailConfigPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailConfigPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailConfigPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailConfigPublic, port]
+ ContrailDiscoveryAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ port:
+ get_param: [EndpointMap, ContrailDiscoveryAdmin, port]
+ protocol:
+ get_param: [EndpointMap, ContrailDiscoveryAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailDiscoveryAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailDiscoveryAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailDiscoveryAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailDiscoveryAdmin, port]
+ ContrailDiscoveryInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ port:
+ get_param: [EndpointMap, ContrailDiscoveryInternal, port]
+ protocol:
+ get_param: [EndpointMap, ContrailDiscoveryInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailDiscoveryInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailDiscoveryInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailDiscoveryInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailDiscoveryInternal, port]
+ ContrailDiscoveryPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ContrailDiscoveryPublic, port]
+ protocol:
+ get_param: [EndpointMap, ContrailDiscoveryPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailDiscoveryPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailDiscoveryPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailDiscoveryPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailDiscoveryPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailDiscoveryPublic, port]
+ ContrailWebuiHttpAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ port:
+ get_param: [EndpointMap, ContrailWebuiHttpAdmin, port]
+ protocol:
+ get_param: [EndpointMap, ContrailWebuiHttpAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpAdmin, port]
+ ContrailWebuiHttpInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ port:
+ get_param: [EndpointMap, ContrailWebuiHttpInternal, port]
+ protocol:
+ get_param: [EndpointMap, ContrailWebuiHttpInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpInternal, port]
+ ContrailWebuiHttpPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ContrailWebuiHttpPublic, port]
+ protocol:
+ get_param: [EndpointMap, ContrailWebuiHttpPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpPublic, port]
+ ContrailWebuiHttpsAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ port:
+ get_param: [EndpointMap, ContrailWebuiHttpsAdmin, port]
+ protocol:
+ get_param: [EndpointMap, ContrailWebuiHttpsAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpsAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpsAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpsAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpsAdmin, port]
+ ContrailWebuiHttpsInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ port:
+ get_param: [EndpointMap, ContrailWebuiHttpsInternal, port]
+ protocol:
+ get_param: [EndpointMap, ContrailWebuiHttpsInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpsInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpsInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpsInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsInternal,
+ host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, ContrailConfigNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, ContrailConfigNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpsInternal, port]
+ ContrailWebuiHttpsPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, ContrailWebuiHttpsPublic, port]
+ protocol:
+ get_param: [EndpointMap, ContrailWebuiHttpsPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpsPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpsPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, ContrailWebuiHttpsPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, ContrailWebuiHttpsPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, ContrailWebuiHttpsPublic, port]
+ Ec2ApiAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
+ port:
+ get_param: [EndpointMap, Ec2ApiAdmin, port]
+ protocol:
+ get_param: [EndpointMap, Ec2ApiAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, Ec2ApiAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, Ec2ApiAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, Ec2ApiAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, Ec2ApiAdmin, port]
+ Ec2ApiInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
+ port:
+ get_param: [EndpointMap, Ec2ApiInternal, port]
+ protocol:
+ get_param: [EndpointMap, Ec2ApiInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, Ec2ApiInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, Ec2ApiInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, Ec2ApiInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, Ec2ApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, Ec2ApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, Ec2ApiInternal, port]
+ Ec2ApiPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, Ec2ApiPublic, port]
+ protocol:
+ get_param: [EndpointMap, Ec2ApiPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, Ec2ApiPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, Ec2ApiPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, Ec2ApiPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, Ec2ApiPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, Ec2ApiPublic, port]
+ GlanceAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ port:
+ get_param: [EndpointMap, GlanceAdmin, port]
+ protocol:
+ get_param: [EndpointMap, GlanceAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, GlanceAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, GlanceAdmin, port]
+ GlanceInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlanceInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, GlanceInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ port:
+ get_param: [EndpointMap, GlanceInternal, port]
+ protocol:
+ get_param: [EndpointMap, GlanceInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, GlanceInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, GlanceInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, GlanceInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, GlanceApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, GlanceInternal, port]
+ GlancePublic:
host:
str_replace:
template:
- get_param: [EndpointMap, GlanceRegistryInternal, host]
+ get_param: [EndpointMap, GlancePublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ - get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
host_nobrackets:
str_replace:
template:
- get_param: [EndpointMap, GlanceRegistryInternal, host]
+ get_param: [EndpointMap, GlancePublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ - get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- - get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ - get_param: [ServiceNetMap, PublicNetwork]
port:
- get_param: [EndpointMap, GlanceRegistryInternal, port]
+ get_param: [EndpointMap, GlancePublic, port]
protocol:
- get_param: [EndpointMap, GlanceRegistryInternal, protocol]
+ get_param: [EndpointMap, GlancePublic, protocol]
uri:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceRegistryInternal, protocol]
+ - - get_param: [EndpointMap, GlancePublic, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceRegistryInternal, host]
+ get_param: [EndpointMap, GlancePublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ - get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceRegistryInternal, port]
+ - get_param: [EndpointMap, GlancePublic, port]
uri_no_suffix:
list_join:
- ''
- - - get_param: [EndpointMap, GlanceRegistryInternal, protocol]
+ - - get_param: [EndpointMap, GlancePublic, protocol]
- ://
- str_replace:
template:
- get_param: [EndpointMap, GlanceRegistryInternal, host]
+ get_param: [EndpointMap, GlancePublic, host]
params:
CLOUDNAME:
get_param:
- CloudEndpoints
- - get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ - get_param: [ServiceNetMap, PublicNetwork]
IP_ADDRESS:
get_param:
- NetIpMap
- str_replace:
params:
NETWORK:
- get_param: [ServiceNetMap, GlanceRegistryNetwork]
+ get_param: [ServiceNetMap, PublicNetwork]
template: NETWORK_uri
- ':'
- - get_param: [EndpointMap, GlanceRegistryInternal, port]
+ - get_param: [EndpointMap, GlancePublic, port]
GnocchiAdmin:
host:
str_replace:
@@ -5077,6 +7812,252 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, NovaPublic, port]
+ NovaPlacementAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ port:
+ get_param: [EndpointMap, NovaPlacementAdmin, port]
+ protocol:
+ get_param: [EndpointMap, NovaPlacementAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementAdmin, port]
+ - /placement
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementAdmin, port]
+ NovaPlacementInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ port:
+ get_param: [EndpointMap, NovaPlacementInternal, port]
+ protocol:
+ get_param: [EndpointMap, NovaPlacementInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementInternal, port]
+ - /placement
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, NovaPlacementNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, NovaPlacementNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementInternal, port]
+ NovaPlacementPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, NovaPlacementPublic, port]
+ protocol:
+ get_param: [EndpointMap, NovaPlacementPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementPublic, port]
+ - /placement
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, NovaPlacementPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, NovaPlacementPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, NovaPlacementPublic, port]
NovaVNCProxyAdmin:
host:
str_replace:
@@ -5320,6 +8301,249 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, NovaVNCProxyPublic, port]
+ OctaviaAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ port:
+ get_param: [EndpointMap, OctaviaAdmin, port]
+ protocol:
+ get_param: [EndpointMap, OctaviaAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaAdmin, port]
+ OctaviaInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ port:
+ get_param: [EndpointMap, OctaviaInternal, port]
+ protocol:
+ get_param: [EndpointMap, OctaviaInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, OctaviaApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, OctaviaApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaInternal, port]
+ OctaviaPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, OctaviaPublic, port]
+ protocol:
+ get_param: [EndpointMap, OctaviaPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, OctaviaPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, OctaviaPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, OctaviaPublic, port]
PankoAdmin:
host:
str_replace:
@@ -6297,6 +9521,249 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, SwiftPublic, port]
+ TackerAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, TackerAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, TackerApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, TackerApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, TackerAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, TackerApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, TackerApiNetwork]
+ port:
+ get_param: [EndpointMap, TackerAdmin, port]
+ protocol:
+ get_param: [EndpointMap, TackerAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, TackerAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, TackerAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, TackerApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, TackerApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, TackerAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, TackerAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, TackerAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, TackerApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, TackerApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, TackerAdmin, port]
+ TackerInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, TackerInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, TackerApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, TackerApiNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, TackerInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, TackerApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, TackerApiNetwork]
+ port:
+ get_param: [EndpointMap, TackerInternal, port]
+ protocol:
+ get_param: [EndpointMap, TackerInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, TackerInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, TackerInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, TackerApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, TackerApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, TackerInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, TackerInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, TackerInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, TackerApiNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, TackerApiNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, TackerInternal, port]
+ TackerPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, TackerPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, TackerPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, TackerPublic, port]
+ protocol:
+ get_param: [EndpointMap, TackerPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, TackerPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, TackerPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, TackerPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, TackerPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, TackerPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, TackerPublic, port]
ZaqarAdmin:
host:
str_replace:
diff --git a/network/external.yaml b/network/external.yaml
index 4dfbc77e..21260d3f 100644
--- a/network/external.yaml
+++ b/network/external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
diff --git a/network/external_v6.yaml b/network/external_v6.yaml
index e0736ab7..51000bb7 100644
--- a/network/external_v6.yaml
+++ b/network/external_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
diff --git a/network/internal_api.yaml b/network/internal_api.yaml
index 090e38f7..793535c6 100644
--- a/network/internal_api.yaml
+++ b/network/internal_api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Internal API network. Used for most APIs, Database, RPC.
diff --git a/network/internal_api_v6.yaml b/network/internal_api_v6.yaml
index 19d64b0a..53950656 100644
--- a/network/internal_api_v6.yaml
+++ b/network/internal_api_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Internal API network. Used for most APIs, Database, RPC.
diff --git a/network/management.yaml b/network/management.yaml
index 6798e11e..77fcd4ea 100644
--- a/network/management.yaml
+++ b/network/management.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Management network. System administration, SSH, DNS, NTP, etc. This network
diff --git a/network/management_v6.yaml b/network/management_v6.yaml
index a5e70667..e1391ad2 100644
--- a/network/management_v6.yaml
+++ b/network/management_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Management network. System administration, SSH, DNS, NTP, etc. This network
diff --git a/network/networks.yaml b/network/networks.yaml
index d3ae482b..26033ee2 100644
--- a/network/networks.yaml
+++ b/network/networks.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Create networks to split out Overcloud traffic
diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml
index 5ac7d344..0f21e3e8 100644
--- a/network/ports/ctlplane_vip.yaml
+++ b/network/ports/ctlplane_vip.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port for a VIP on the undercloud ctlplane network.
diff --git a/network/ports/external.yaml b/network/ports/external.yaml
index c4f815fb..c33643e7 100644
--- a/network/ports/external.yaml
+++ b/network/ports/external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the external network. The IP address will be chosen
diff --git a/network/ports/external_from_pool.yaml b/network/ports/external_from_pool.yaml
index 867176e3..893b26d9 100644
--- a/network/ports/external_from_pool.yaml
+++ b/network/ports/external_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/external_from_pool_v6.yaml b/network/ports/external_from_pool_v6.yaml
index e541049d..c67789af 100644
--- a/network/ports/external_from_pool_v6.yaml
+++ b/network/ports/external_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
diff --git a/network/ports/external_v6.yaml b/network/ports/external_v6.yaml
index bfe2686f..905974f5 100644
--- a/network/ports/external_v6.yaml
+++ b/network/ports/external_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the external network. The IP address will be chosen
diff --git a/network/ports/from_service.yaml b/network/ports/from_service.yaml
index 782b6b07..69a887ea 100644
--- a/network/ports/from_service.yaml
+++ b/network/ports/from_service.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Returns an IP from a service mapped list of IPs
diff --git a/network/ports/from_service_v6.yaml b/network/ports/from_service_v6.yaml
index 80060b57..c9673dd7 100644
--- a/network/ports/from_service_v6.yaml
+++ b/network/ports/from_service_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Returns an IP from a service mapped list of IPv6 IPs
diff --git a/network/ports/internal_api.yaml b/network/ports/internal_api.yaml
index 1d521a8d..1f96e3f2 100644
--- a/network/ports/internal_api.yaml
+++ b/network/ports/internal_api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the internal_api network.
diff --git a/network/ports/internal_api_from_pool.yaml b/network/ports/internal_api_from_pool.yaml
index d7b67e26..3f16f30c 100644
--- a/network/ports/internal_api_from_pool.yaml
+++ b/network/ports/internal_api_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/internal_api_from_pool_v6.yaml b/network/ports/internal_api_from_pool_v6.yaml
index afb144ba..b36ef235 100644
--- a/network/ports/internal_api_from_pool_v6.yaml
+++ b/network/ports/internal_api_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
diff --git a/network/ports/internal_api_v6.yaml b/network/ports/internal_api_v6.yaml
index 14738b33..e236156d 100644
--- a/network/ports/internal_api_v6.yaml
+++ b/network/ports/internal_api_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the internal_api network.
diff --git a/network/ports/management.yaml b/network/ports/management.yaml
index 967b66e1..b626bc20 100644
--- a/network/ports/management.yaml
+++ b/network/ports/management.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the management network. The IP address will be chosen
diff --git a/network/ports/management_from_pool.yaml b/network/ports/management_from_pool.yaml
index 451677b2..05fedb90 100644
--- a/network/ports/management_from_pool.yaml
+++ b/network/ports/management_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/management_from_pool_v6.yaml b/network/ports/management_from_pool_v6.yaml
index 4c1cc216..64758bf9 100644
--- a/network/ports/management_from_pool_v6.yaml
+++ b/network/ports/management_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
diff --git a/network/ports/management_v6.yaml b/network/ports/management_v6.yaml
index a94ebc7b..9e6a35b8 100644
--- a/network/ports/management_v6.yaml
+++ b/network/ports/management_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the management network. The IP address will be chosen
diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml
index d7863e02..5782bbe9 100644
--- a/network/ports/net_ip_list_map.yaml
+++ b/network/ports/net_ip_list_map.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
parameters:
ControlPlaneIpList:
@@ -138,3 +138,20 @@ outputs:
SERVICE_short_node_names: {get_param: ServiceHostnameList}
for_each:
SERVICE: {get_attr: [EnabledServicesValue, value]}
+ short_service_bootstrap_hostnames:
+ description: >
+ Map of enabled services to a list of hostnames where they're running regardless of the network
+ Used for bootstrap purposes
+ value:
+ yaql:
+ # If ServiceHostnameList is empty the role is deployed with zero nodes
+ # therefore we don't want to add any *_node_names to the map
+ expression: dict($.data.map.items().where(len($[1]) > 0))
+ data:
+ map:
+ map_merge:
+ repeat:
+ template:
+ SERVICE_short_bootstrap_node_name: {get_param: ServiceHostnameList}
+ for_each:
+ SERVICE: {get_attr: [EnabledServicesValue, value]}
diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml
index fcf2eeee..c8cf733f 100644
--- a/network/ports/net_ip_map.yaml
+++ b/network/ports/net_ip_map.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
parameters:
ControlPlaneIp:
diff --git a/network/ports/net_vip_map_external.yaml b/network/ports/net_vip_map_external.yaml
index 71e6e811..58f96e65 100644
--- a/network/ports/net_vip_map_external.yaml
+++ b/network/ports/net_vip_map_external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
parameters:
# Set these via parameter defaults to configure external VIPs
diff --git a/network/ports/net_vip_map_external_v6.yaml b/network/ports/net_vip_map_external_v6.yaml
index 8d054349..12db8d2d 100644
--- a/network/ports/net_vip_map_external_v6.yaml
+++ b/network/ports/net_vip_map_external_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
parameters:
# Set these via parameter defaults to configure external VIPs
diff --git a/network/ports/noop.yaml b/network/ports/noop.yaml
index 96c461e0..e2004cb0 100644
--- a/network/ports/noop.yaml
+++ b/network/ports/noop.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Returns the control plane port (provisioning network) as the ip_address.
diff --git a/network/ports/storage.yaml b/network/ports/storage.yaml
index 1ed5cca1..80400412 100644
--- a/network/ports/storage.yaml
+++ b/network/ports/storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the storage network.
diff --git a/network/ports/storage_from_pool.yaml b/network/ports/storage_from_pool.yaml
index 0a3d394c..dfab49ae 100644
--- a/network/ports/storage_from_pool.yaml
+++ b/network/ports/storage_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/storage_from_pool_v6.yaml b/network/ports/storage_from_pool_v6.yaml
index 18faf1bd..a6cde5fc 100644
--- a/network/ports/storage_from_pool_v6.yaml
+++ b/network/ports/storage_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
diff --git a/network/ports/storage_mgmt.yaml b/network/ports/storage_mgmt.yaml
index 548d226a..b96fbd0e 100644
--- a/network/ports/storage_mgmt.yaml
+++ b/network/ports/storage_mgmt.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the storage_mgmt API network.
diff --git a/network/ports/storage_mgmt_from_pool.yaml b/network/ports/storage_mgmt_from_pool.yaml
index c3f0f4e2..6ec3dbae 100644
--- a/network/ports/storage_mgmt_from_pool.yaml
+++ b/network/ports/storage_mgmt_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/storage_mgmt_from_pool_v6.yaml b/network/ports/storage_mgmt_from_pool_v6.yaml
index e1145a31..2f3ea196 100644
--- a/network/ports/storage_mgmt_from_pool_v6.yaml
+++ b/network/ports/storage_mgmt_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs This version is for IPv6
diff --git a/network/ports/storage_mgmt_v6.yaml b/network/ports/storage_mgmt_v6.yaml
index 9db66964..01e4c31a 100644
--- a/network/ports/storage_mgmt_v6.yaml
+++ b/network/ports/storage_mgmt_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the storage_mgmt API network.
diff --git a/network/ports/storage_v6.yaml b/network/ports/storage_v6.yaml
index adf3595a..1dd76199 100644
--- a/network/ports/storage_v6.yaml
+++ b/network/ports/storage_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the storage network.
diff --git a/network/ports/tenant.yaml b/network/ports/tenant.yaml
index d8f78c49..f6929b81 100644
--- a/network/ports/tenant.yaml
+++ b/network/ports/tenant.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the tenant network.
diff --git a/network/ports/tenant_from_pool.yaml b/network/ports/tenant_from_pool.yaml
index d5fd7080..c72b2278 100644
--- a/network/ports/tenant_from_pool.yaml
+++ b/network/ports/tenant_from_pool.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/tenant_from_pool_v6.yaml b/network/ports/tenant_from_pool_v6.yaml
index d4f0d29c..bc056fa6 100644
--- a/network/ports/tenant_from_pool_v6.yaml
+++ b/network/ports/tenant_from_pool_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Returns an IP from a network mapped list of IPs
diff --git a/network/ports/tenant_v6.yaml b/network/ports/tenant_v6.yaml
index 21ba1efa..84101828 100644
--- a/network/ports/tenant_v6.yaml
+++ b/network/ports/tenant_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port on the tenant network.
diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml
index 38322907..d996d03d 100644
--- a/network/ports/vip.yaml
+++ b/network/ports/vip.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port for a VIP on the isolated network NetworkName.
diff --git a/network/ports/vip_v6.yaml b/network/ports/vip_v6.yaml
index 498e5d69..7a45756c 100644
--- a/network/ports/vip_v6.yaml
+++ b/network/ports/vip_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: >
Creates a port for a VIP on the isolated network NetworkName.
diff --git a/network/scripts/run-os-net-config.sh b/network/scripts/run-os-net-config.sh
index fc1e6d54..8fe2d270 100755
--- a/network/scripts/run-os-net-config.sh
+++ b/network/scripts/run-os-net-config.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-# Note this script expects the following environment variables to be set
-# normally these are provided by the calling SoftwareConfig resource, but
-# they may also be set manually for testing
+# The following environment variables may be set to substitute in a
+# custom bridge or interface name. Normally these are provided by the calling
+# SoftwareConfig resource, but they may also be set manually for testing.
# $bridge_name : The bridge device name to apply
# $interface_name : The interface name to apply
#
@@ -10,7 +10,7 @@
# a deployment input via input_values
# $network_config : the json serialized os-net-config config to apply
#
-set -ux
+set -eux
function get_metadata_ip() {
@@ -98,8 +98,10 @@ EOF_CAT
fi
fi
done
+ set +e
os-net-config -c /etc/os-net-config/dhcp_all_interfaces.yaml -v --detailed-exit-codes --cleanup
RETVAL=$?
+ set -e
if [[ $RETVAL == 2 ]]; then
ping_metadata_ip
elif [[ $RETVAL != 0 ]]; then
@@ -108,16 +110,26 @@ EOF_CAT
}
if [ -n '$network_config' ]; then
- trap configure_safe_defaults EXIT
+ if [ -z "${disable_configure_safe_defaults:-''}" ]; then
+ trap configure_safe_defaults EXIT
+ fi
mkdir -p /etc/os-net-config
# Note these variables come from the calling heat SoftwareConfig
echo '$network_config' > /etc/os-net-config/config.json
- sed -i "s/bridge_name/$bridge_name/" /etc/os-net-config/config.json
- sed -i "s/interface_name/$interface_name/" /etc/os-net-config/config.json
+ if [ "$(type -t network_config_hook)" = "function" ]; then
+ network_config_hook
+ fi
+
+ sed -i "s/bridge_name/${bridge_name:-''}/" /etc/os-net-config/config.json
+ sed -i "s/interface_name/${interface_name:-''}/" /etc/os-net-config/config.json
+
+ set +e
os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
RETVAL=$?
+ set -e
+
if [[ $RETVAL == 2 ]]; then
ping_metadata_ip
diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml
index 0cb6571f..a1042ebb 100644
--- a/network/service_net_map.j2.yaml
+++ b/network/service_net_map.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Mapping of service_name_network -> network name
@@ -21,9 +21,19 @@ parameters:
# snake_case - the names must still match when converted
ServiceNetMapDefaults:
default:
+ # Note the values in this map are replaced by *NetName
+ # to allow for sane defaults when the network names are
+ # overridden.
ApacheNetwork: internal_api
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
+ ContrailAnalyticsNetwork: internal_api
+ ContrailAnalyticsDatabaseNetwork: internal_api
+ ContrailConfigNetwork: internal_api
+ ContrailControlNetwork: internal_api
+ ContrailDatabaseNetwork: internal_api
+ ContrailWebuiNetwork: internal_api
+ ContrailTsnNetwork: internal_api
AodhApiNetwork: internal_api
PankoApiNetwork: internal_api
BarbicanApiNetwork: internal_api
@@ -31,20 +41,26 @@ parameters:
MongodbNetwork: internal_api
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
+ CongressApiNetwork: internal_api
GlanceApiNetwork: storage
- GlanceRegistryNetwork: internal_api
IronicApiNetwork: ctlplane
IronicNetwork: ctlplane
KeystoneAdminApiNetwork: ctlplane # allows undercloud to config endpoints
KeystonePublicApiNetwork: internal_api
ManilaApiNetwork: internal_api
NeutronApiNetwork: internal_api
+ OctaviaApiNetwork: internal_api
HeatApiNetwork: internal_api
HeatApiCfnNetwork: internal_api
HeatApiCloudwatchNetwork: internal_api
NovaApiNetwork: internal_api
+ NovaPlacementNetwork: internal_api
NovaMetadataNetwork: internal_api
NovaVncProxyNetwork: internal_api
+ NovaLibvirtNetwork: internal_api
+ Ec2ApiNetwork: internal_api
+ Ec2ApiMetadataNetwork: internal_api
+ TackerApiNetwork: internal_api
SwiftStorageNetwork: storage_mgmt
SwiftProxyNetwork: storage
SaharaApiNetwork: internal_api
@@ -58,10 +74,14 @@ parameters:
CephRgwNetwork: storage
PublicNetwork: external
OpendaylightApiNetwork: internal_api
+ OvnDbsNetwork: internal_api
MistralApiNetwork: internal_api
+ ZaqarApiNetwork: internal_api
+ PacemakerRemoteNetwork: internal_api
# We special-case the default ResolveNetwork for the CephStorage role
# for backwards compatibility, all other roles default to internal_api
CephStorageHostnameResolveNetwork: storage
+ EtcdNetwork: internal_api
{% for role in roles if role.name != 'CephStorage' %}
{{role.name}}HostnameResolveNetwork: internal_api
{% endfor %}
@@ -81,20 +101,62 @@ parameters:
internal use only, this will be removed in future.
type: json
+ InternalApiNetName:
+ default: internal_api
+ description: The name of the internal API network.
+ type: string
+ ExternalNetName:
+ default: external
+ description: The name of the external network.
+ type: string
+ ManagementNetName:
+ default: management
+ description: The name of the management network.
+ type: string
+ StorageNetName:
+ default: storage
+ description: The name of the storage network.
+ type: string
+ StorageMgmtNetName:
+ default: storage_mgmt
+ description: The name of the Storage management network.
+ type: string
+ TenantNetName:
+ default: tenant
+ description: The name of the tenant network.
+ type: string
+
+
parameter_groups:
- label: deprecated
description: Do not use deprecated params, they will be removed.
parameters:
- ServiceNetMapDeprecatedMapping
+resources:
+ ServiceNetMapValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_merge:
+ - map_replace:
+ - {get_param: ServiceNetMapDefaults}
+ - values:
+ external: {get_param: ExternalNetName}
+ internal_api: {get_param: InternalApiNetName}
+ storage: {get_param: StorageNetName}
+ storage_mgmt: {get_param: StorageMgmtNetName}
+ tenant: {get_param: TenantNetName}
+ management: {get_param: ManagementNetName}
+ - map_replace:
+ - {get_param: ServiceNetMap}
+ - keys: {get_param: ServiceNetMapDeprecatedMapping}
+
+
outputs:
service_net_map:
- value:
- map_merge:
- - {get_param: ServiceNetMapDefaults}
- - map_replace:
- - {get_param: ServiceNetMap}
- - keys: {get_param: ServiceNetMapDeprecatedMapping}
+ value: {get_attr: [ServiceNetMapValue, value]}
service_net_map_lower:
value:
@@ -104,9 +166,4 @@ outputs:
yaql:
expression: dict($.data.map.items().select([ regex(`([a-z0-9])([A-Z])`).replace($[0], '\\1_\\2').toLower(), $[1]]))
data:
- map:
- map_merge:
- - {get_param: ServiceNetMapDefaults}
- - map_replace:
- - {get_param: ServiceNetMap}
- - keys: {get_param: ServiceNetMapDeprecatedMapping}
+ map: {get_attr: [ServiceNetMapValue, value]}
diff --git a/network/storage.yaml b/network/storage.yaml
index 35dae17a..0a704ea3 100644
--- a/network/storage.yaml
+++ b/network/storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Storage network.
diff --git a/network/storage_mgmt.yaml b/network/storage_mgmt.yaml
index 03cfd139..c7117165 100644
--- a/network/storage_mgmt.yaml
+++ b/network/storage_mgmt.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Storage management network. Storage replication, etc.
diff --git a/network/storage_mgmt_v6.yaml b/network/storage_mgmt_v6.yaml
index 39c456db..2b065195 100644
--- a/network/storage_mgmt_v6.yaml
+++ b/network/storage_mgmt_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Storage management network. Storage replication, etc.
diff --git a/network/storage_v6.yaml b/network/storage_v6.yaml
index 5c8af9e5..777e6167 100644
--- a/network/storage_v6.yaml
+++ b/network/storage_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Storage network.
diff --git a/network/tenant.yaml b/network/tenant.yaml
index 1045b81b..33055fe8 100644
--- a/network/tenant.yaml
+++ b/network/tenant.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Tenant network.
diff --git a/network/tenant_v6.yaml b/network/tenant_v6.yaml
index bf758a50..0bf5d2f0 100644
--- a/network/tenant_v6.yaml
+++ b/network/tenant_v6.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Tenant IPv6 network.
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index 77a48658..1360d0be 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -2,6 +2,7 @@ resource_registry:
OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment
OS::TripleO::PostDeploySteps: puppet/post.yaml
+ OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
@@ -10,7 +11,11 @@ resource_registry:
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
+ OS::TripleO::Tasks::SwiftRingDeploy: extraconfig/tasks/swift-ring-deploy.yaml
+ OS::TripleO::Tasks::SwiftRingUpdate: extraconfig/tasks/swift-ring-update.yaml
+
{% for role in roles %}
+ OS::TripleO::{{role.name}}::PreNetworkConfig: OS::Heat::None
OS::TripleO::{{role.name}}PostDeploySteps: puppet/post.yaml
OS::TripleO::{{role.name}}: puppet/{{role.name.lower()}}-role.yaml
OS::TripleO::{{role.name}}Config: puppet/{{role.name.lower()}}-config.yaml
@@ -41,6 +46,8 @@ resource_registry:
# in the jinja loop
OS::TripleO::Controller::Net::SoftwareConfig: net-config-bridge.yaml
+ OS::TripleO::ServiceServerMetadataHook: OS::Heat::None
+
OS::TripleO::Server: OS::Nova::Server
# This creates the "heat-admin" user for all OS images by default
@@ -49,17 +56,23 @@ resource_registry:
# Hooks for operator extra config
# NodeUserData == Cloud-init additional user-data, e.g cloud-config
+ # role::NodeUserData == Role specific cloud-init additional user-data
# ControllerExtraConfigPre == Controller configuration pre service deployment
# NodeExtraConfig == All nodes configuration pre service deployment
# NodeExtraConfigPost == All nodes configuration post service deployment
OS::TripleO::NodeUserData: firstboot/userdata_default.yaml
+{% for role in roles %}
+ OS::TripleO::{{role.name}}::NodeUserData: firstboot/userdata_default.yaml
+{% endfor %}
OS::TripleO::NodeTLSCAData: OS::Heat::None
OS::TripleO::NodeTLSData: OS::Heat::None
OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
- OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None
- OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None
+{% for role in roles %}
+ OS::TripleO::Tasks::{{role.name}}PrePuppet: OS::Heat::None
+ OS::TripleO::Tasks::{{role.name}}PostPuppet: OS::Heat::None
+{% endfor %}
# "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
# phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
@@ -90,6 +103,7 @@ resource_registry:
OS::TripleO::Network::Ports::StorageVipPort: network/ports/noop.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml
OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
+ OS::TripleO::Network::Ports::ControlPlaneVipPort: OS::Neutron::Port
# Service to network Mappings
OS::TripleO::ServiceNetMap: network/service_net_map.yaml
@@ -102,14 +116,15 @@ resource_registry:
# Upgrade resources
OS::TripleO::UpgradeConfig: puppet/upgrade_config.yaml
- OS::TripleO::UpgradeSteps: OS::Heat::None
# services
OS::TripleO::Services: puppet/services/services.yaml
OS::TripleO::Services::Apache: puppet/services/apache.yaml
OS::TripleO::Services::ApacheTLS: OS::Heat::None
OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
+ OS::TripleO::Services::CephMds: OS::Heat::None
OS::TripleO::Services::CephMon: OS::Heat::None
+ OS::TripleO::Services::CephRbdMirror: OS::Heat::None
OS::TripleO::Services::CephRgw: OS::Heat::None
OS::TripleO::Services::CephOSD: OS::Heat::None
OS::TripleO::Services::CephClient: OS::Heat::None
@@ -119,10 +134,10 @@ resource_registry:
OS::TripleO::Services::CinderScheduler: puppet/services/cinder-scheduler.yaml
OS::TripleO::Services::CinderVolume: puppet/services/cinder-volume.yaml
OS::TripleO::Services::BlockStorageCinderVolume: puppet/services/cinder-volume.yaml
- OS::TripleO::Services::Core: OS::Heat::None
+ OS::TripleO::Services::Congress: OS::Heat::None
OS::TripleO::Services::Keystone: puppet/services/keystone.yaml
OS::TripleO::Services::GlanceApi: puppet/services/glance-api.yaml
- OS::TripleO::Services::GlanceRegistry: puppet/services/glance-registry.yaml
+ OS::TripleO::Services::GlanceRegistry: puppet/services/disabled/glance-registry.yaml
OS::TripleO::Services::HeatApi: puppet/services/heat-api.yaml
OS::TripleO::Services::HeatApiCfn: puppet/services/heat-api-cfn.yaml
OS::TripleO::Services::HeatApiCloudwatch: puppet/services/heat-api-cloudwatch.yaml
@@ -145,11 +160,13 @@ resource_registry:
OS::TripleO::Services::NeutronCorePluginML2OVN: puppet/services/neutron-plugin-ml2-ovn.yaml
OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml
OS::TripleO::Services::NeutronCorePluginNuage: puppet/services/neutron-plugin-nuage.yaml
- OS::TripleO::Services::NeutronCorePluginOpencontrail: puppet/services/neutron-plugin-opencontrail.yaml
+ OS::TripleO::Services::OVNDBs: OS::Heat::None
+
OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml
OS::TripleO::Services::NeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
OS::TripleO::Services::ComputeNeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
OS::TripleO::Services::Pacemaker: OS::Heat::None
+ OS::TripleO::Services::PacemakerRemote: OS::Heat::None
OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None
OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml
@@ -159,10 +176,12 @@ resource_registry:
OS::TripleO::Services::Memcached: puppet/services/memcached.yaml
OS::TripleO::Services::SaharaApi: OS::Heat::None
OS::TripleO::Services::SaharaEngine: OS::Heat::None
+ OS::TripleO::Services::Sshd: OS::Heat::None
OS::TripleO::Services::Redis: puppet/services/database/redis.yaml
OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml
OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml
OS::TripleO::Services::NovaApi: puppet/services/nova-api.yaml
+ OS::TripleO::Services::NovaPlacement: puppet/services/nova-placement.yaml
OS::TripleO::Services::NovaMetadata: puppet/services/nova-metadata.yaml
OS::TripleO::Services::NovaScheduler: puppet/services/nova-scheduler.yaml
OS::TripleO::Services::NovaConsoleauth: puppet/services/nova-consoleauth.yaml
@@ -174,6 +193,7 @@ resource_registry:
OS::TripleO::Services::SwiftStorage: puppet/services/swift-storage.yaml
OS::TripleO::Services::SwiftRingBuilder: puppet/services/swift-ringbuilder.yaml
OS::TripleO::Services::Snmp: puppet/services/snmp.yaml
+ OS::TripleO::Services::Tacker: OS::Heat::None
OS::TripleO::Services::Timezone: puppet/services/time/timezone.yaml
OS::TripleO::Services::CeilometerApi: puppet/services/ceilometer-api.yaml
OS::TripleO::Services::CeilometerCollector: puppet/services/ceilometer-collector.yaml
@@ -188,6 +208,7 @@ resource_registry:
OS::TripleO::Services::GnocchiStatsd: puppet/services/gnocchi-statsd.yaml
# Services that are disabled by default (use relevant environment files):
OS::TripleO::Services::FluentdClient: OS::Heat::None
+ OS::TripleO::Services::Collectd: OS::Heat::None
OS::TripleO::LoggingConfiguration: puppet/services/logging/fluentd-config.yaml
OS::TripleO::Services::ManilaApi: OS::Heat::None
OS::TripleO::Services::ManilaScheduler: OS::Heat::None
@@ -202,7 +223,7 @@ resource_registry:
OS::TripleO::Services::AodhEvaluator: puppet/services/aodh-evaluator.yaml
OS::TripleO::Services::AodhNotifier: puppet/services/aodh-notifier.yaml
OS::TripleO::Services::AodhListener: puppet/services/aodh-listener.yaml
- OS::TripleO::Services::PankoApi: OS::Heat::None
+ OS::TripleO::Services::PankoApi: puppet/services/panko-api.yaml
OS::TripleO::Services::MistralEngine: OS::Heat::None
OS::TripleO::Services::MistralApi: OS::Heat::None
OS::TripleO::Services::MistralExecutor: OS::Heat::None
@@ -214,12 +235,19 @@ resource_registry:
OS::TripleO::Services::OpenDaylightApi: OS::Heat::None
OS::TripleO::Services::OpenDaylightOvs: OS::Heat::None
OS::TripleO::Services::SensuClient: OS::Heat::None
- OS::TripleO::Services::ContrailAnalytics: puppet/services/network/contrail-analytics.yaml
- OS::TripleO::Services::ContrailConfig: puppet/services/network/contrail-config.yaml
- OS::TripleO::Services::ContrailControl: puppet/services/network/contrail-control.yaml
- OS::TripleO::Services::ContrailDatabase: puppet/services/network/contrail-database.yaml
- OS::TripleO::Services::ContrailWebui: puppet/services/network/contrail-webui.yaml
+ OS::TripleO::Services::TLSProxyBase: OS::Heat::None
OS::TripleO::Services::Zaqar: OS::Heat::None
+ OS::TripleO::Services::NeutronML2FujitsuCfab: OS::Heat::None
+ OS::TripleO::Services::NeutronML2FujitsuFossw: OS::Heat::None
+ OS::TripleO::Services::CinderHPELeftHandISCSI: OS::Heat::None
+ OS::TripleO::Services::Etcd: OS::Heat::None
+ OS::TripleO::Services::Ec2Api: OS::Heat::None
+ OS::TripleO::Services::AuditD: OS::Heat::None
+ OS::TripleO::Services::OctaviaApi: OS::Heat::None
+ OS::TripleO::Services::OctaviaHealthManager: OS::Heat::None
+ OS::TripleO::Services::OctaviaHousekeeping: OS::Heat::None
+ OS::TripleO::Services::OctaviaWorker: OS::Heat::None
+ OS::TripleO::Services::MySQLClient: puppet/services/database/mysql-client.yaml
parameter_defaults:
EnablePackageInstall: false
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index 39a092b1..e9447b94 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -1,4 +1,5 @@
-heat_template_version: 2016-10-14
+{% set primary_role_name = roles[0].name -%}
+heat_template_version: ocata
description: >
Deploy an OpenStack environment, consisting of several node types (roles),
@@ -254,6 +255,18 @@ resources:
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
+ # Filter any null/None service_names which may be present due to mapping
+ # of services to OS::Heat::None
+ {{role.name}}ServiceNames:
+ type: OS::Heat::Value
+ depends_on: {{role.name}}ServiceChain
+ properties:
+ type: comma_delimited_list
+ value:
+ yaql:
+ expression: coalesce($.data, []).where($ != null)
+ data: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+
{{role.name}}HostsDeployment:
type: OS::Heat::StructuredDeployments
properties:
@@ -304,7 +317,7 @@ resources:
StorageMgmtIpList: {get_attr: [{{role.name}}, storage_mgmt_ip_address]}
TenantIpList: {get_attr: [{{role.name}}, tenant_ip_address]}
ManagementIpList: {get_attr: [{{role.name}}, management_ip_address]}
- EnabledServices: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ EnabledServices: {get_attr: [{{role.name}}ServiceNames, value]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
ServiceHostnameList: {get_attr: [{{role.name}}, hostname]}
NetworkHostnameMap:
@@ -360,9 +373,10 @@ resources:
{% for r in roles %}
- get_attr: [{{r.name}}ServiceChain, role_data, service_config_settings]
{% endfor %}
- services: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
- ServiceNames: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ services: {get_attr: [{{role.name}}ServiceNames, value]}
+ ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]}
MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]}
+ ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChain, role_data, service_metadata_settings]}
{% endfor %}
hostsConfig:
@@ -394,7 +408,7 @@ resources:
list_join:
- ','
{% for role in roles %}
- - {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ - {get_attr: [{{role.name}}ServiceNames, value]}
{% endfor %}
logging_groups:
yaql:
@@ -414,8 +428,8 @@ resources:
{% for role in roles %}
- {get_attr: [{{role.name}}ServiceChain, role_data, logging_sources]}
{% endfor %}
- controller_ips: {get_attr: [Controller, ip_address]}
- controller_names: {get_attr: [Controller, hostname]}
+ controller_ips: {get_attr: [{{primary_role_name}}, ip_address]}
+ controller_names: {get_attr: [{{primary_role_name}}, hostname]}
service_ips:
# Note (shardy) this somewhat complex yaql may be replaced
# with a map_deep_merge function in ocata. It merges the
@@ -444,8 +458,16 @@ resources:
{% for role in roles %}
- {get_attr: [{{role.name}}IpListMap, short_service_hostnames]}
{% endfor %}
+ short_service_bootstrap_node:
+ yaql:
+ expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten().first()]))
+ data:
+ l:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, short_service_bootstrap_hostnames]}
+{% endfor %}
# FIXME(shardy): These require further work to move into service_ips
- memcache_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
+ memcache_node_ips: {get_attr: [{{primary_role_name}}IpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
NetVipMap: {get_attr: [VipMap, net_ip_map]}
RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
@@ -477,7 +499,7 @@ resources:
type: OS::TripleO::Network
ControlVirtualIP:
- type: OS::Neutron::Port
+ type: OS::TripleO::Network::Ports::ControlPlaneVipPort
depends_on: Networks
properties:
name: control_virtual_ip
@@ -551,12 +573,12 @@ resources:
PingTestIps:
list_join:
- ' '
- - - {get_attr: [Controller, resource.0.external_ip_address]}
- - {get_attr: [Controller, resource.0.internal_api_ip_address]}
- - {get_attr: [Controller, resource.0.storage_ip_address]}
- - {get_attr: [Controller, resource.0.storage_mgmt_ip_address]}
- - {get_attr: [Controller, resource.0.tenant_ip_address]}
- - {get_attr: [Controller, resource.0.management_ip_address]}
+ - - {get_attr: [{{primary_role_name}}, resource.0.external_ip_address]}
+ - {get_attr: [{{primary_role_name}}, resource.0.internal_api_ip_address]}
+ - {get_attr: [{{primary_role_name}}, resource.0.storage_ip_address]}
+ - {get_attr: [{{primary_role_name}}, resource.0.storage_mgmt_ip_address]}
+ - {get_attr: [{{primary_role_name}}, resource.0.tenant_ip_address]}
+ - {get_attr: [{{primary_role_name}}, resource.0.management_ip_address]}
UpdateWorkflow:
type: OS::TripleO::Tasks::UpdateWorkflow
@@ -583,8 +605,9 @@ resources:
- {{role.name}}AllNodesValidationDeployment
{% endfor %}
properties:
+ servers:
{% for role in roles %}
- servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{% endfor %}
# Post deployment steps for all roles
@@ -599,26 +622,12 @@ resources:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{% endfor %}
+ EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
role_data:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
{% endfor %}
- # Upgrade steps for all roles
- AllNodesUpgradeSteps:
- type: OS::TripleO::UpgradeSteps
- depends_on: AllNodesDeploySteps
- properties:
- servers:
-{% for role in roles %}
- {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
-{% endfor %}
- role_data:
-{% for role in roles %}
- {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
-{% endfor %}
-
-
outputs:
ManagedEndpoints:
description: Asserts that the keystone endpoints have been provisioned.
@@ -649,7 +658,7 @@ outputs:
description: The services enabled on each role
value:
{% for role in roles %}
- {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ {{role.name}}: {get_attr: [{{role.name}}ServiceNames, value]}
{% endfor %}
RoleData:
description: The configuration data associated with each role
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 0a8bec6e..ee43c3a5 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'All Nodes Config for Puppet'
parameters:
@@ -28,6 +28,8 @@ parameters:
type: json
short_service_node_names:
type: json
+ short_service_bootstrap_node:
+ type: json
controller_names:
type: comma_delimited_list
memcache_node_ips:
@@ -125,6 +127,7 @@ resources:
- {get_param: service_ips}
- {get_param: service_node_names}
- {get_param: short_service_node_names}
+ - {get_param: short_service_bootstrap_node}
- controller_node_ips:
list_join:
- ','
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index c9bf894f..a5218dbe 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'OpenStack cinder storage configured by Puppet'
parameters:
BlockStorageImage:
@@ -71,11 +71,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ BlockStorageServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
BlockStorageSchedulerHints:
type: json
@@ -93,10 +102,27 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
resources:
BlockStorage:
@@ -118,7 +144,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: BlockStorageServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: BlockStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -130,6 +160,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -141,6 +173,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::BlockStorage::NodeUserData
+
ExternalPort:
type: OS::TripleO::BlockStorage::Ports::ExternalPort
properties:
@@ -217,17 +254,135 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [BlockStorage, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::BlockStorage::PreNetworkConfig
+ properties:
+ server: {get_resource: BlockStorage}
+
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: BlockStorage}
actions: {get_param: NetworkDeploymentActions}
+ BlockStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ BlockStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: BlockStorageUpgradeInitDeployment
+ server: {get_resource: BlockStorage}
+ config: {get_resource: BlockStorageUpgradeInitConfig}
+
BlockStorageDeployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: BlockStorageUpgradeInitDeployment
properties:
name: BlockStorageDeployment
server: {get_resource: BlockStorage}
@@ -266,42 +421,12 @@ resources:
extraconfig: {get_param: ExtraConfig}
volume:
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
- fqdn_internal_api:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - internalapi
- - {get_param: CloudDomain}
- fqdn_storage:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - storage
- - {get_param: CloudDomain}
- fqdn_storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- fqdn_tenant:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - tenant
- - {get_param: CloudDomain}
- fqdn_management:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - management
- - {get_param: CloudDomain}
- fqdn_ctlplane:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -341,48 +466,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
value:
str_replace:
@@ -400,47 +490,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [BlockStorage, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [BlockStorage, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for the block storage server
value:
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index 18787a21..0867e17f 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'OpenStack ceph storage node configured by Puppet'
parameters:
OvercloudCephStorageFlavor:
@@ -77,11 +77,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ CephStorageServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
CephStorageSchedulerHints:
type: json
@@ -99,10 +108,27 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
resources:
CephStorage:
@@ -124,7 +150,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: CephStorageServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: CephStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -136,6 +166,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -147,6 +179,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::CephStorage::NodeUserData
+
ExternalPort:
type: OS::TripleO::CephStorage::Ports::ExternalPort
properties:
@@ -223,17 +260,135 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [CephStorage, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::CephStorage::PreNetworkConfig
+ properties:
+ server: {get_resource: CephStorage}
+
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: CephStorage}
actions: {get_param: NetworkDeploymentActions}
+ CephStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ CephStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: CephStorageUpgradeInitDeployment
+ server: {get_resource: CephStorage}
+ config: {get_resource: CephStorageUpgradeInitConfig}
+
CephStorageDeployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: CephStorageUpgradeInitDeployment
properties:
name: CephStorageDeployment
config: {get_resource: CephStorageConfig}
@@ -271,42 +426,12 @@ resources:
extraconfig: {get_param: ExtraConfig}
ceph:
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
- fqdn_internal_api:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - internalapi
- - {get_param: CloudDomain}
- fqdn_storage:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - storage
- - {get_param: CloudDomain}
- fqdn_storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- fqdn_tenant:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - tenant
- - {get_param: CloudDomain}
- fqdn_management:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - management
- - {get_param: CloudDomain}
- fqdn_ctlplane:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -352,48 +477,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
value:
str_replace:
@@ -411,47 +501,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [CephStorage, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [CephStorage, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for the ceph storage server
value:
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index f359bf70..1a0294af 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack hypervisor node configured via Puppet.
@@ -92,11 +92,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ NovaComputeServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
NovaComputeSchedulerHints:
type: json
@@ -111,10 +120,27 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
resources:
@@ -138,7 +164,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: NovaComputeServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: NovaComputeSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -150,6 +180,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -161,6 +193,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::Compute::NodeUserData
+
ExternalPort:
type: OS::TripleO::Compute::Ports::ExternalPort
properties:
@@ -226,6 +263,101 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [NovaCompute, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::Compute::PreNetworkConfig
+ properties:
+ server: {get_resource: NovaCompute}
+
NetworkConfig:
type: OS::TripleO::Compute::Net::SoftwareConfig
properties:
@@ -239,6 +371,7 @@ resources:
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
@@ -248,6 +381,28 @@ resources:
bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
+ NovaComputeUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ NovaComputeUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: NovaComputeUpgradeInitDeployment
+ server: {get_resource: NovaCompute}
+ config: {get_resource: NovaComputeUpgradeInitConfig}
+
NovaComputeConfig:
type: OS::Heat::StructuredConfig
properties:
@@ -284,46 +439,16 @@ resources:
extraconfig: {get_param: ExtraConfig}
compute:
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
- fqdn_internal_api:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - internalapi
- - {get_param: CloudDomain}
- fqdn_storage:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - storage
- - {get_param: CloudDomain}
- fqdn_storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- fqdn_tenant:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - tenant
- - {get_param: CloudDomain}
- fqdn_management:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - management
- - {get_param: CloudDomain}
- fqdn_ctlplane:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
NovaComputeDeployment:
type: OS::TripleO::SoftwareDeployment
- depends_on: NetworkDeployment
+ depends_on: NovaComputeUpgradeInitDeployment
properties:
name: NovaComputeDeployment
config: {get_resource: NovaComputeConfig}
@@ -394,48 +519,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
description: >
Server's IP address and hostname in the /etc/hosts format
@@ -455,47 +545,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [NovaCompute, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [NovaCompute, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
diff --git a/puppet/config.role.j2.yaml b/puppet/config.role.j2.yaml
index 552c59b2..7337d062 100644
--- a/puppet/config.role.j2.yaml
+++ b/puppet/config.role.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
A software config which runs puppet on the {{role}} role
diff --git a/puppet/controller-config-pacemaker.yaml b/puppet/controller-config-pacemaker.yaml
deleted file mode 100644
index 24f31dc8..00000000
--- a/puppet/controller-config-pacemaker.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
- A software config which runs manifests/overcloud_controller_pacemaker.pp
-
-parameters:
- ConfigDebug:
- default: false
- description: Whether to run config management (e.g. Puppet) in debug mode.
- type: boolean
- StepConfig:
- type: string
- description: Config manifests that will be used to step through the deployment.
- default: ''
-
-resources:
-
- ControllerPuppetConfigImpl:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- options:
- enable_debug: {get_param: ConfigDebug}
- enable_hiera: True
- enable_facter: False
- modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
- outputs:
- - name: result
- inputs:
- - name: step
- type: Number
- config:
- list_join:
- - ''
- - - get_file: manifests/overcloud_controller_pacemaker.pp
- - {get_param: StepConfig}
-
-outputs:
- OS::stack_id:
- description: The software config which runs overcloud_controller_pacemaker.pp
- value: {get_resource: ControllerPuppetConfigImpl}
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index 77b54ff3..825006ba 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack controller node configured by Puppet.
@@ -106,11 +106,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ ControllerServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
ControllerSchedulerHints:
type: json
@@ -125,10 +134,27 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
parameter_groups:
- label: deprecated
@@ -157,7 +183,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: ControllerServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ControllerSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -169,6 +199,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -180,6 +212,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::Controller::NodeUserData
+
ExternalPort:
type: OS::TripleO::Controller::Ports::ExternalPort
properties:
@@ -245,6 +282,101 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [Controller, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::Controller::PreNetworkConfig
+ properties:
+ server: {get_resource: Controller}
+
NetworkConfig:
type: OS::TripleO::Controller::Net::SoftwareConfig
properties:
@@ -258,6 +390,7 @@ resources:
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
@@ -282,10 +415,31 @@ resources:
server: {get_resource: Controller}
NodeIndex: {get_param: NodeIndex}
+ ControllerUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ ControllerUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: ControllerUpgradeInitDeployment
+ server: {get_resource: Controller}
+ config: {get_resource: ControllerUpgradeInitConfig}
ControllerDeployment:
type: OS::TripleO::SoftwareDeployment
- depends_on: NetworkDeployment
+ depends_on: ControllerUpgradeInitDeployment
properties:
name: ControllerDeployment
config: {get_resource: ControllerConfig}
@@ -313,9 +467,7 @@ resources:
- all_nodes # provided by allNodesConfig
- vip_data # provided by allNodesConfig
- '"%{::osfamily}"'
- - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre
- cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
- - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre
- neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
- neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
- cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
@@ -344,42 +496,12 @@ resources:
# Misc
tripleo::haproxy::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
- fqdn_internal_api:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - internalapi
- - {get_param: CloudDomain}
- fqdn_storage:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - storage
- - {get_param: CloudDomain}
- fqdn_storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- fqdn_tenant:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - tenant
- - {get_param: CloudDomain}
- fqdn_management:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - management
- - {get_param: CloudDomain}
- fqdn_ctlplane:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
# Hook for site-specific additional pre-deployment config, e.g extra hieradata
ControllerExtraConfigPre:
@@ -437,48 +559,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
description: >
Server's IP address and hostname in the /etc/hosts format
@@ -498,47 +585,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [Controller, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [Controller, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
diff --git a/puppet/deploy-artifacts.sh b/puppet/deploy-artifacts.sh
index 8bcbbf4c..4e1ad89f 100644
--- a/puppet/deploy-artifacts.sh
+++ b/puppet/deploy-artifacts.sh
@@ -8,7 +8,7 @@ trap cleanup EXIT
if [ -n "$artifact_urls" ]; then
for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
- curl --globoff -o $TMP_DATA/file_data "$artifact_urls"
+ curl --globoff -o $TMP_DATA/file_data "$URL"
if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
yum install -y $TMP_DATA/file_data
elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
diff --git a/puppet/deploy-artifacts.yaml b/puppet/deploy-artifacts.yaml
index 17f84163..5e89405b 100644
--- a/puppet/deploy-artifacts.yaml
+++ b/puppet/deploy-artifacts.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
Software Config to install deployment artifacts (tarball's and/or
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
index 6a2ea4d5..3daf3fd3 100644
--- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-10-15
+heat_template_version: ocata
description: Configure hieradata for all MidoNet nodes
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index 7bda0cd5..9b900bc4 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Network Cisco configuration
@@ -245,7 +245,9 @@ resources:
for map_name in mappings:
f_name = '/root/' + map_name
map_data = os.getenv(map_name, "Nada")
- with open(f_name, 'a') as f:
+ with os.fdopen(os.open(f_name,
+ os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o644),
+ 'w') as f:
f.write(map_data)
if map_data is not "Nada":
if map_name is not 'nexus_config':
@@ -260,7 +262,9 @@ resources:
for mac in vals[1:]:
mac2host[mac.lower()] = vals[0]
- with open('/root/mac2host', 'a') as f:
+ with os.fdopen(os.open('/root/mac2host',
+ os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o644),
+ 'w') as f:
f.write(str(mac2host))
# now we have mac to host, map host to switchport in hieradata
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
index 49c77190..7fe2a842 100644
--- a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Big Switch agents on compute node
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
deleted file mode 100644
index f5b1f0e6..00000000
--- a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: Compute node hieradata for Neutron OpenContrail configuration
-
-parameters:
- server:
- description: ID of the compute node to apply this config to
- type: string
- ContrailApiServerIp:
- description: IP address of the OpenContrail API server
- type: string
- ContrailApiServerPort:
- description: Port of the OpenContrail API
- type: string
- default: 8082
-
-resources:
- ComputeContrailConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- hiera:
- datafiles:
- neutron_opencontrail_data:
- mapped_data:
- nova::network::neutron::network_api_class: nova.network.neutronv2.api.API
-
- contrail::vrouter::provision_vrouter::api_address: {get_input: contrail_api_server_ip}
- contrail::vrouter::provision_vrouter::api_port: {get_input: contrail_api_server_port}
- contrail::vrouter::provision_vrouter::keystone_admin_user: admin
- contrail::vrouter::provision_vrouter::keystone_admin_tenant_name: admin
- contrail::vrouter::provision_vrouter::keystone_admin_password: '"%{::admin_password}"'
-
- contrail::vnc_api::vnc_api_config:
- 'auth/AUTHN_TYPE':
- value: keystone
- 'auth/AUTHN_PROTOCOL':
- value: http
- 'auth/AUTHN_SERVER':
- value: "%{hiera('keystone_admin_api_vip')}"
- 'auth/AUTHN_PORT':
- value: 35357
- 'auth/AUTHN_URL':
- value: '/v2.0/tokens'
-
- ComputeContrailDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: ComputeContrailConfig}
- server: {get_param: server}
- input_values:
- contrail_api_server_ip: {get_param: ContrailApiServerIp}
- contrail_api_server_port: {get_param: ContrailApiServerPort}
-
-outputs:
- deploy_stdout:
- description: Output of the extra hiera data deployment
- value: {get_attr: [ComputeContrailDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
index 5561c74a..47c782c7 100644
--- a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Nuage configuration on the Compute
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
deleted file mode 100644
index 9423208e..00000000
--- a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-heat_template_version: 2015-10-15
-
-description: Configure hieradata for Cinder Dell Storage Center configuration
-
-parameters:
- server:
- description: ID of the controller node to apply this config to
- type: string
-
- # Config specific parameters, to be provided via parameter_defaults
- CinderEnableDellScBackend:
- type: boolean
- default: true
- CinderDellScBackendName:
- type: string
- default: 'tripleo_dellsc'
- CinderDellScSanIp:
- type: string
- CinderDellScSanLogin:
- type: string
- default: 'Admin'
- CinderDellScSanPassword:
- type: string
- hidden: true
- CinderDellScSsn:
- type: string
- default: '64702'
- CinderDellScIscsiIpAddress:
- type: string
- default: ''
- CinderDellScIscsiPort:
- type: string
- default: '3260'
- CinderDellScApiPort:
- type: string
- default: '3033'
- CinderDellScServerFolder:
- type: string
- default: 'dellsc_server'
- CinderDellScVolumeFolder:
- type: string
- default: 'dellsc_volume'
-
-resources:
- CinderDellScConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- hiera:
- datafiles:
- cinder_dellsc_data:
- mapped_data:
- tripleo::profile::base::cinder::volume::cinder_enable_dellsc_backend: {get_input: EnableDellScBackend}
- cinder::backend::dellsc_iscsi::volume_backend_name: {get_input: DellScBackendName}
- cinder::backend::dellsc_iscsi::san_ip: {get_input: DellScSanIp}
- cinder::backend::dellsc_iscsi::san_login: {get_input: DellScSanLogin}
- cinder::backend::dellsc_iscsi::san_password: {get_input: DellScSanPassword}
- cinder::backend::dellsc_iscsi::dell_sc_ssn: {get_input: DellScSsn}
- cinder::backend::dellsc_iscsi::iscsi_ip_address: {get_input: DellScIscsiIpAddress}
- cinder::backend::dellsc_iscsi::iscsi_port: {get_input: DellScIscsiPort}
- cinder::backend::dellsc_iscsi::dell_sc_api_port: {get_input: DellScApiPort}
- cinder::backend::dellsc_iscsi::dell_sc_server_folder: {get_input: DellScServerFolder}
- cinder::backend::dellsc_iscsi::dell_sc_volume_folder: {get_input: DellScVolumeFolder}
-
- CinderDellScDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: CinderDellScConfig}
- server: {get_param: server}
- input_values:
- EnableDellScBackend: {get_param: CinderEnableDellScBackend}
- DellScBackendName: {get_param: CinderDellScBackendName}
- DellScSanIp: {get_param: CinderDellScSanIp}
- DellScSanLogin: {get_param: CinderDellScSanLogin}
- DellScSanPassword: {get_param: CinderDellScSanPassword}
- DellScSsn: {get_param: CinderDellScSsn}
- DellScIscsiIpAddress: {get_param: CinderDellScIscsiIpAddress}
- DellScIscsiPort: {get_param: CinderDellScIscsiPort}
- DellScApiPort: {get_param: CinderDellScApiPort}
- DellScServerFolder: {get_param: CinderDellScServerFolder}
- DellScVolumeFolder: {get_param: CinderDellScVolumeFolder}
-
-outputs:
- deploy_stdout:
- description: Deployment reference, used to trigger puppet apply on changes
- value: {get_attr: [CinderDellScDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
deleted file mode 100644
index c7af6f22..00000000
--- a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-heat_template_version: 2015-10-15
-
-description: Configure hieradata for Cinder Eqlx configuration
-
-parameters:
- server:
- description: ID of the controller node to apply this config to
- type: string
-
- # Config specific parameters, to be provided via parameter_defaults
- CinderEnableEqlxBackend:
- type: boolean
- default: true
- CinderEqlxBackendName:
- type: string
- default: 'tripleo_eqlx'
- CinderEqlxSanIp:
- type: string
- CinderEqlxSanLogin:
- type: string
- CinderEqlxSanPassword:
- type: string
- hidden: true
- CinderEqlxSanThinProvision:
- type: boolean
- default: true
- CinderEqlxGroupname:
- type: string
- default: 'group-0'
- CinderEqlxPool:
- type: string
- default: 'default'
- CinderEqlxChapLogin:
- type: string
- default: ''
- CinderEqlxChapPassword:
- type: string
- default: ''
- CinderEqlxUseChap:
- type: boolean
- default: false
-
-resources:
- CinderEqlxConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- hiera:
- datafiles:
- cinder_eqlx_data:
- mapped_data:
- tripleo::profile::base::cinder::volume::cinder_enable_eqlx_backend: {get_input: EnableEqlxBackend}
- cinder::backend::eqlx::volume_backend_name: {get_input: EqlxBackendName}
- cinder::backend::eqlx::san_ip: {get_input: EqlxSanIp}
- cinder::backend::eqlx::san_login: {get_input: EqlxSanLogin}
- cinder::backend::eqlx::san_password: {get_input: EqlxSanPassword}
- cinder::backend::eqlx::san_thin_provision: {get_input: EqlxSanThinProvision}
- cinder::backend::eqlx::eqlx_group_name: {get_input: EqlxGroupname}
- cinder::backend::eqlx::eqlx_pool: {get_input: EqlxPool}
- cinder::backend::eqlx::eqlx_use_chap: {get_input: EqlxUseChap}
- cinder::backend::eqlx::eqlx_chap_login: {get_input: EqlxChapLogin}
- cinder::backend::eqlx::eqlx_chap_password: {get_input: EqlxChapPassword}
-
- CinderEqlxDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: CinderEqlxConfig}
- server: {get_param: server}
- input_values:
- EnableEqlxBackend: {get_param: CinderEnableEqlxBackend}
- EqlxBackendName: {get_param: CinderEqlxBackendName}
- EqlxSanIp: {get_param: CinderEqlxSanIp}
- EqlxSanLogin: {get_param: CinderEqlxSanLogin}
- EqlxSanPassword: {get_param: CinderEqlxSanPassword}
- EqlxSanThinProvision: {get_param: CinderEqlxSanThinProvision}
- EqlxGroupname: {get_param: CinderEqlxGroupname}
- EqlxPool: {get_param: CinderEqlxPool}
- EqlxUseChap: {get_param: CinderEqlxUseChap}
- EqlxChapLogin: {get_param: CinderEqlxChapLogin}
- EqlxChapPassword: {get_param: CinderEqlxChapPassword}
-
-outputs:
- deploy_stdout:
- description: Deployment reference, used to trigger puppet apply on changes
- value: {get_attr: [CinderEqlxDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
index 48446e5a..763ae39a 100644
--- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Cinder Netapp configuration
diff --git a/puppet/extraconfig/pre_deploy/controller/multiple.yaml b/puppet/extraconfig/pre_deploy/controller/multiple.yaml
index f949a397..d3d546dd 100644
--- a/puppet/extraconfig/pre_deploy/controller/multiple.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/multiple.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Extra Pre-Deployment Config, multiple'
parameters:
server:
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
index 467f57cc..0f4806db 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Neutron Big Switch configuration
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
index cec885cd..6eae812f 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata for Cisco N1KV configuration
diff --git a/puppet/extraconfig/pre_deploy/default.yaml b/puppet/extraconfig/pre_deploy/default.yaml
index dcbc6811..5da07f87 100644
--- a/puppet/extraconfig/pre_deploy/default.yaml
+++ b/puppet/extraconfig/pre_deploy/default.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2014-10-16
+heat_template_version: ocata
description: 'Noop Extra Pre-Deployment Config'
parameters:
server:
diff --git a/puppet/extraconfig/pre_deploy/per_node.yaml b/puppet/extraconfig/pre_deploy/per_node.yaml
index e236e336..65113f6a 100644
--- a/puppet/extraconfig/pre_deploy/per_node.yaml
+++ b/puppet/extraconfig/pre_deploy/per_node.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: Configure hieradata overrides for specific nodes
diff --git a/puppet/extraconfig/tls/ca-inject.yaml b/puppet/extraconfig/tls/ca-inject.yaml
index f955034d..04b5ccf6 100644
--- a/puppet/extraconfig/tls/ca-inject.yaml
+++ b/puppet/extraconfig/tls/ca-inject.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
This is a template which will inject the trusted anchor.
diff --git a/puppet/extraconfig/tls/freeipa-enroll.yaml b/puppet/extraconfig/tls/freeipa-enroll.yaml
new file mode 100644
index 00000000..7ce15069
--- /dev/null
+++ b/puppet/extraconfig/tls/freeipa-enroll.yaml
@@ -0,0 +1,83 @@
+heat_template_version: ocata
+
+description: Enroll nodes to FreeIPA
+
+parameters:
+ server:
+ description: ID of the controller node to apply this config to
+ type: string
+
+ CloudDomain:
+ description: >
+ The configured cloud domain; this will also be used as the kerberos realm
+ type: string
+
+ FreeIPAOTP:
+ default: ''
+ description: 'OTP that will be used for FreeIPA enrollment'
+ type: string
+ hidden: true
+ FreeIPAServer:
+ default: ''
+ description: 'FreeIPA server DNS name'
+ type: string
+ FreeIPAIPAddress:
+ default: ''
+ description: 'FreeIPA server IP Address'
+ type: string
+
+resources:
+ FreeIPAEnrollmentConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: otp
+ - name: ipa_server
+ - name: ipa_domain
+ - name: ipa_ip
+ config: |
+ #!/bin/sh
+ # If no IPA server was given as a parameter, it will be assumed from
+ # DNS.
+ if [ -n "${ipa_server}" ]; then
+ sed -i "/${ipa_server}/d" /etc/hosts
+ # Optionally add the FreeIPA server IP to /etc/hosts
+ if [ -n "${ipa_ip}" ]; then
+ echo "${ipa_ip} ${ipa_server}" >> /etc/hosts
+ fi
+ fi
+ # Set the node's domain if needed
+ if [ ! $(hostname -f | grep "${ipa_domain}$") ]; then
+ hostnamectl set-hostname "$(hostname).${ipa_domain}"
+ fi
+ yum install -y ipa-client
+ # Enroll. If there is already keytab, we have already done this. If
+ # this node hasn't enrolled and the OTP is missing, fail.
+ if [ ! -f /etc/krb5.keytab ]; then
+ if [ -z "${otp}" ]; then
+ echo "OTP is missing"
+ exit 1
+ fi
+ ipa-client-install --server ${ipa_server} -w ${otp} \
+ --domain=${ipa_domain} -U
+ fi
+ # Get a TGT
+ kinit -k -t /etc/krb5.keytab
+
+ FreeIPAControllerEnrollmentDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ name: FreeIPAEnrollmentDeployment
+ config: {get_resource: FreeIPAEnrollmentConfig}
+ server: {get_param: server}
+ input_values:
+ otp: {get_param: FreeIPAOTP}
+ ipa_server: {get_param: FreeIPAServer}
+ ipa_domain: {get_param: CloudDomain}
+ ipa_ip: {get_param: FreeIPAIPAddress}
+
+outputs:
+ deploy_stdout:
+ description: Output of the FreeIPA enrollment deployment
+ value: {get_attr: [FreeIPAControllerEnrollmentDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/tls/tls-cert-inject.yaml b/puppet/extraconfig/tls/tls-cert-inject.yaml
index 49d84574..2a61afc0 100644
--- a/puppet/extraconfig/tls/tls-cert-inject.yaml
+++ b/puppet/extraconfig/tls/tls-cert-inject.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: ocata
description: >
This is a template which will build the TLS Certificates necessary
diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml
index f8dad433..6f2dd684 100644
--- a/puppet/major_upgrade_steps.j2.yaml
+++ b/puppet/major_upgrade_steps.j2.yaml
@@ -1,4 +1,8 @@
-heat_template_version: 2016-10-14
+{% set enabled_roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% set batch_upgrade_steps_max = 3 -%}
+{% set upgrade_steps_max = 6 -%}
+{% set deliver_script = {'deliver': False} -%}
+heat_template_version: ocata
description: 'Upgrade steps for all roles'
parameters:
@@ -14,18 +18,39 @@ parameters:
description: >
Setting to a previously unused value during stack-update will trigger
the Upgrade resources to re-run on all roles.
-
- UpgradeInitCommand:
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ NovaPassword:
+ description: The password for the nova service and db account, used by nova-api.
type: string
- description: |
- Command or script snippet to run on all overcloud nodes to
- initialize the upgrade process. E.g. a repository switch.
- default: ''
+ hidden: true
+
+conditions:
+ # Conditions to disable any steps where the task list is empty
+{%- for role in roles %}
+ {{role.name}}UpgradeBatchConfigEnabled:
+ not:
+ equals:
+ - {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
+ - []
+ {{role.name}}UpgradeConfigEnabled:
+ not:
+ equals:
+ - {get_param: [role_data, {{role.name}}, upgrade_tasks]}
+ - []
+{%- endfor %}
resources:
- # For the UpgradeInit also rename /etc/resolv.conf.save for +bug/1567004
- UpgradeInitConfig:
+{% for role in roles if role.disable_upgrade_deployment|default(false) %}
+ {{role.name}}DeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
@@ -33,49 +58,153 @@ resources:
list_join:
- ''
- - "#!/bin/bash\n\n"
- - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- - get_param: UpgradeInitCommand
+ - "set -eu\n\n"
+ - "if hiera -c /etc/puppet/hiera.yaml service_names | grep nova_compute ; then\n\n"
+ - " crudini --set /etc/nova/nova.conf placement auth_type password\n\n"
+ - " crudini --set /etc/nova/nova.conf placement username placement\n\n"
+ - " crudini --set /etc/nova/nova.conf placement project_domain_name Default\n\n"
+ - " crudini --set /etc/nova/nova.conf placement user_domain_name Default\n\n"
+ - " crudini --set /etc/nova/nova.conf placement project_name service\n\n"
+ - " systemctl restart openstack-nova-compute\n\n"
+ - "fi\n\n"
+ - str_replace:
+ template: |
+ crudini --set /etc/nova/nova.conf placement password 'SERVICE_PASSWORD'
+ crudini --set /etc/nova/nova.conf placement region_name 'REGION_NAME'
+ crudini --set /etc/nova/nova.conf placement auth_url 'AUTH_URL'
+ ROLE='ROLE_NAME'
+ params:
+ SERVICE_PASSWORD: { get_param: NovaPassword }
+ REGION_NAME: { get_param: KeystoneRegion }
+ AUTH_URL: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ ROLE_NAME: {{role.name}}
+ - get_file: ../extraconfig/tasks/pacemaker_common_functions.sh
+ - get_file: ../extraconfig/tasks/run_puppet.sh
+ - get_file: ../extraconfig/tasks/tripleo_upgrade_node.sh
-{% for role in roles %}
- {{role.name}}Upgrade_Init:
- type: OS::Heat::StructuredDeploymentGroup
+ {{role.name}}DeliverUpgradeScriptDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
properties:
- name: {{role.name}}Upgrade_Init
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig}
+{% endfor %}
+
+# Upgrade Steps for all roles, batched updates
+# The UpgradeConfig resources could actually be created without
+# serialization, but the event output is easier to follow if we
+# do, and there should be minimal performance hit (creating the
+# config is cheap compared to the time to apply the deployment).
+{% for step in range(0, batch_upgrade_steps_max) %}
+ # Batch config resources step {{step}}
+ {%- for role in roles %}
+ {{role.name}}UpgradeBatchConfig_Step{{step}}:
+ type: OS::TripleO::UpgradeConfig
+ {%- if step > 0 %}
+ condition: {{role.name}}UpgradeBatchConfigEnabled
+ {% if role.name in enabled_roles %}
+ depends_on:
+ - {{role.name}}UpgradeBatch_Step{{step -1}}
+ {%- endif %}
+ {% else %}
+ {% for role in roles if role.disable_upgrade_deployment|default(false) %}
+ {% if deliver_script.update({'deliver': True}) %} {% endif %}
+ {% endfor %}
+ {% if deliver_script.deliver %}
+ depends_on:
+ {% endif %}
+ {% for dep in roles if dep.disable_upgrade_deployment|default(false) %}
+ - {{dep.name}}DeliverUpgradeScriptDeployment
+ {% endfor %}
+ {% endif %}
+ properties:
+ UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
+ step: {{step}}
+ {%- endfor %}
+
+ # Batch deployment resources for step {{step}} (only for enabled roles)
+ {%- for role in enabled_roles %}
+ {{role.name}}UpgradeBatch_Step{{step}}:
+ type: OS::Heat::SoftwareDeploymentGroup
+ condition: {{role.name}}UpgradeBatchConfigEnabled
+ {%- if step > 0 %}
+ depends_on:
+ - {{role.name}}UpgradeBatch_Step{{step -1}}
+ {% else %}
+ depends_on:
+ - {{role.name}}UpgradeBatchConfig_Step{{step}}
+ {%- endif %}
+ update_policy:
+ batch_create:
+ max_batch_size: {{role.upgrade_batch_size|default(1)}}
+ rolling_update:
+ max_batch_size: {{role.upgrade_batch_size|default(1)}}
+ properties:
+ name: {{role.name}}UpgradeBatch_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: UpgradeInitConfig}
+ config: {get_resource: {{role.name}}UpgradeBatchConfig_Step{{step}}}
+ input_values:
+ role: {{role.name}}
+ update_identifier: {get_param: UpdateIdentifier}
+ {%- endfor %}
+{%- endfor %}
+
+# Dump the puppet manifests to be apply later when disable_upgrade_deployment
+# is to true
+{% for role in roles if role.disable_upgrade_deployment|default(false) %}
+ {{role.name}}DeliverPuppetConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - str_replace:
+ template: |
+ #!/bin/bash
+ cat > /root/{{role.name}}_puppet_config.pp << ENDOFCAT
+ PUPPET_CLASSES
+ ENDOFCAT
+ params:
+ PUPPET_CLASSES: {get_param: [role_data, {{role.name}}, step_config]}
+
+ {{role.name}}DeliverPuppetDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}DeliverPuppetConfig}
{% endfor %}
# Upgrade Steps for all roles
-# FIXME(shardy): would be nice to make the number of steps configurable
-{% for step in range(1, 8) %}
- {% for role in roles %}
- # Step {{step}} resources
+{%- for step in range(0, upgrade_steps_max) %}
+ # Config resources for step {{step}}
+ {%- for role in roles %}
{{role.name}}UpgradeConfig_Step{{step}}:
type: OS::TripleO::UpgradeConfig
# The UpgradeConfig resources could actually be created without
# serialization, but the event output is easier to follow if we
# do, and there should be minimal performance hit (creating the
# config is cheap compared to the time to apply the deployment).
+ {%- if step > 0 %}
+ condition: {{role.name}}UpgradeConfigEnabled
+ {% if role.name in enabled_roles %}
depends_on:
- {% if step == 1 %}
- - {{role.name}}Upgrade_Init
- {% else %}
- {% for dep in roles %}
- - {{dep.name}}Upgrade_Step{{step -1}}
- {% endfor %}
- {% endif %}
+ - {{role.name}}Upgrade_Step{{step -1}}
+ {% endif %}
+ {%- endif %}
properties:
UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]}
step: {{step}}
+ {%- endfor %}
+ # Deployment resources for step {{step}} (only for enabled roles)
+ {%- for role in enabled_roles %}
{{role.name}}Upgrade_Step{{step}}:
- type: OS::Heat::StructuredDeploymentGroup
- {% if step > 1 %}
+ type: OS::Heat::SoftwareDeploymentGroup
+ {%- if step > 0 %}
+ condition: {{role.name}}UpgradeConfigEnabled
depends_on:
- {% for dep in roles %}
- - {{dep.name}}Upgrade_Step{{step -1}}
- {% endfor %}
- {% endif %}
+ - {{role.name}}Upgrade_Step{{step -1}}
+ {%- endif %}
properties:
name: {{role.name}}Upgrade_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
@@ -83,8 +212,21 @@ resources:
input_values:
role: {{role.name}}
update_identifier: {get_param: UpdateIdentifier}
- {% endfor %}
-{% endfor %}
+ {%- endfor %}
+{%- endfor %}
+
+ # Post upgrade deployment steps for all roles
+ # This runs the normal configuration (e.g puppet) steps unless upgrade
+ # is disabled for the role
+ AllNodesPostUpgradeSteps:
+ type: OS::TripleO::PostUpgradeSteps
+ depends_on:
+{%- for dep in enabled_roles %}
+ - {{dep.name}}Upgrade_Step{{upgrade_steps_max - 1}}
+{%- endfor %}
+ properties:
+ servers: {get_param: servers}
+ role_data: {get_param: role_data}
outputs:
# Output the config for each role, just use Step1 as the config should be
@@ -95,4 +237,3 @@ outputs:
{% for role in roles %}
{{role.name.lower()}}: {get_attr: [{{role.name}}UpgradeConfig_Step1, upgrade_config]}
{% endfor %}
-
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
deleted file mode 100644
index d329d5fc..00000000
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-if hiera('step') >= 4 {
- hiera_include('controller_classes', [])
-}
-
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
-package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_role.pp b/puppet/manifests/overcloud_role.pp
index 1a59620c..e2bf5146 100644
--- a/puppet/manifests/overcloud_role.pp
+++ b/puppet/manifests/overcloud_role.pp
@@ -24,3 +24,7 @@ if hiera('step') >= 4 {
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud___ROLE__', hiera('step')])
package_manifest{$package_manifest_name: ensure => present}
+
+# NOTE(gfidente): ensure deprecated package manifest is absent, can be removed after Pike
+$absent_package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
+package_manifest{$absent_package_manifest_name: ensure => absent}
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index 60c12c3b..172484dc 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'OpenStack swift storage node configured by Puppet'
parameters:
OvercloudSwiftStorageFlavor:
@@ -71,11 +71,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ SwiftStorageServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
ObjectStorageSchedulerHints:
type: json
@@ -93,10 +102,27 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
resources:
@@ -118,7 +144,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: SwiftStorageServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ObjectStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -130,6 +160,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -141,6 +173,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::ObjectStorage::NodeUserData
+
ExternalPort:
type: OS::TripleO::SwiftStorage::Ports::ExternalPort
properties:
@@ -217,14 +254,132 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [SwiftStorage, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::ObjectStorage::PreNetworkConfig
+ properties:
+ server: {get_resource: SwiftStorage}
+
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: SwiftStorage}
actions: {get_param: NetworkDeploymentActions}
+ SwiftStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ SwiftStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: SwiftStorageUpgradeInitDeployment
+ server: {get_resource: SwiftStorage}
+ config: {get_resource: SwiftStorageUpgradeInitConfig}
+
SwiftStorageHieraConfig:
type: OS::Heat::StructuredConfig
properties:
@@ -255,47 +410,16 @@ resources:
extraconfig: {get_param: ExtraConfig}
object:
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
- fqdn_internal_api:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - internalapi
- - {get_param: CloudDomain}
- fqdn_storage:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - storage
- - {get_param: CloudDomain}
- fqdn_storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- fqdn_tenant:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - tenant
- - {get_param: CloudDomain}
- fqdn_management:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - management
- - {get_param: CloudDomain}
- fqdn_ctlplane:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - ctlplane
- - {get_param: CloudDomain}
-
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
SwiftStorageHieraDeploy:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: SwiftStorageUpgradeInitDeployment
properties:
name: SwiftStorageHieraDeploy
server: {get_resource: SwiftStorage}
@@ -340,48 +464,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
value:
str_replace:
@@ -399,47 +488,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [SwiftStorage, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [SwiftStorage, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for the swift storage server
value:
diff --git a/puppet/post-upgrade.j2.yaml b/puppet/post-upgrade.j2.yaml
new file mode 100644
index 00000000..b84039de
--- /dev/null
+++ b/puppet/post-upgrade.j2.yaml
@@ -0,0 +1,27 @@
+heat_template_version: ocata
+
+description: >
+ Post-upgrade configuration steps via puppet for all roles
+ where upgrade is not disabled as defined in ../roles_data.yaml
+
+parameters:
+ servers:
+ type: json
+ description: Mapping of Role name e.g Controller to a list of servers
+
+ role_data:
+ type: json
+ description: Mapping of Role name e.g Controller to the per-role data
+
+ DeployIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+
+resources:
+# Note the include here is the same as post.j2.yaml but the data used at
+# the time of rendering is different if any roles disable upgrades
+{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% include 'puppet-steps.j2' %}
diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml
index 582eb28d..21202775 100644
--- a/puppet/post.j2.yaml
+++ b/puppet/post.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Post-deploy configuration steps via puppet for all roles,
@@ -12,7 +12,11 @@ parameters:
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
-
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
DeployIdentifier:
default: ''
type: string
@@ -21,97 +25,4 @@ parameters:
perform configuration on a Heat stack-update.
resources:
-
-{% for role in roles %}
- # Post deployment steps for all roles
- # A single config is re-applied with an incrementing step number
- # {{role.name}} Role steps
- {{role.name}}ArtifactsConfig:
- type: deploy-artifacts.yaml
-
- {{role.name}}ArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ArtifactsConfig}
-
- {{role.name}}PreConfig:
- type: OS::TripleO::Tasks::{{role.name}}PreConfig
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}Config:
- type: OS::TripleO::{{role.name}}Config
- properties:
- StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
-
- {% if role.name == 'Controller' %}
- ControllerPrePuppet:
- type: OS::TripleO::Tasks::ControllerPrePuppet
- properties:
- servers: {get_param: [servers, Controller]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
- {% endif %}
-
- # Step through a series of configuration steps
-{% for step in range(1, 6) %}
- {% for role in roles %}
-
- {{role.name}}Deployment_Step{{step}}:
- type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step{{step -1}}
- {% endfor %}
- {% endif %}
- properties:
- name: {{role.name}}Deployment_Step{{step}}
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: {{step}}
- update_identifier: {get_param: DeployIdentifier}
-
- {% endfor %}
-{% endfor %}
-
- {{role.name}}PostConfig:
- type: OS::TripleO::Tasks::{{role.name}}PostConfig
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step5
- {% endfor %}
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- {{role.name}}ExtraConfigPost:
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}PostConfig
- {% endfor %}
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: [servers, {{role.name}}]}
-
- {% if role.name == 'Controller' %}
- ControllerPostPuppet:
- depends_on:
- - ControllerExtraConfigPost
- type: OS::TripleO::Tasks::ControllerPostPuppet
- properties:
- servers: {get_param: [servers, Controller]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
- {% endif %}
-
-{% endfor %}
+{% include 'puppet-steps.j2' %}
diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2
new file mode 100644
index 00000000..581c4f0d
--- /dev/null
+++ b/puppet/puppet-steps.j2
@@ -0,0 +1,102 @@
+ # Post deployment steps for all roles
+ # A single config is re-applied with an incrementing step number
+{% for role in roles %}
+ # {{role.name}} Role post-deploy steps
+ {{role.name}}ArtifactsConfig:
+ type: deploy-artifacts.yaml
+
+ {{role.name}}ArtifactsDeploy:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ArtifactsConfig}
+
+ {{role.name}}PreConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PreConfig
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Config:
+ type: OS::TripleO::{{role.name}}Config
+ properties:
+ StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
+
+ {{role.name}}PrePuppet:
+ type: OS::TripleO::Tasks::{{role.name}}PrePuppet
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {% if role.name in ['Controller', 'ObjectStorage'] %}
+ {{role.name}}SwiftRingDeploy:
+ type: OS::TripleO::Tasks::SwiftRingDeploy
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ {% endif %}
+
+ # Step through a series of configuration steps
+{% for step in range(1, 6) %}
+ {{role.name}}Deployment_Step{{step}}:
+ type: OS::Heat::StructuredDeploymentGroup
+ {% if step == 1 %}
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ {% else %}
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ name: {{role.name}}Deployment_Step{{step}}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: {{step}}
+ update_identifier: {get_param: DeployIdentifier}
+{% endfor %}
+
+ {{role.name}}PostConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PostConfig
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ {% endfor %}
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ # Note, this should come last, so use depends_on to ensure
+ # this is created after any other resources.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}PostConfig
+ {% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+
+ {{role.name}}PostPuppet:
+ depends_on:
+ - {{role.name}}ExtraConfigPost
+ type: OS::TripleO::Tasks::{{role.name}}PostPuppet
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {% if role.name in ['Controller', 'ObjectStorage'] %}
+ {{role.name}}SwiftRingUpdate:
+ type: OS::TripleO::Tasks::SwiftRingUpdate
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ {% endfor %}
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ {% endif %}
+{% endfor %}
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 587ff58d..2e1bd6f1 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'OpenStack {{role}} node configured by Puppet'
parameters:
Overcloud{{role}}Flavor:
@@ -28,6 +28,10 @@ parameters:
constraints:
- custom_constraint: nova.keypair
{% endif %}
+ NeutronPublicInterface:
+ default: nic1
+ description: What interface to bridge onto br-ex for network nodes.
+ type: string
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -83,11 +87,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ {{role}}ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
{{role}}SchedulerHints:
type: json
@@ -105,6 +118,9 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
@@ -115,6 +131,20 @@ parameters:
LoggingGroups:
type: comma_delimited_list
default: []
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+ UpgradeInitCommonCommand:
+ type: string
+ description: |
+ Common commands required by the upgrades process. This should not
+ normally be modified by the operator and is set and unset in the
+ major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
+ environment files.
+ default: ''
resources:
{{role}}:
@@ -136,7 +166,11 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: {{role}}ServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: {{role}}SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -148,6 +182,8 @@ resources:
type: multipart
- config: {get_resource: NodeUserData}
type: multipart
+ - config: {get_resource: RoleUserData}
+ type: multipart
# Creates the "heat-admin" user if configured via the environment
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
@@ -159,6 +195,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ # For optional operator role-specific userdata
+ # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+ RoleUserData:
+ type: OS::TripleO::{{role}}::NodeUserData
+
ExternalPort:
type: OS::TripleO::{{role}}::Ports::ExternalPort
properties:
@@ -235,17 +276,138 @@ resources:
ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+ NetHostMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ external:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - external
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - external
+ internal_api:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - internalapi
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - internalapi
+ storage:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storage
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storage
+ storage_mgmt:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storagemgmt
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - storagemgmt
+ tenant:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - tenant
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - tenant
+ management:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - management
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - management
+ ctlplane:
+ fqdn:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - ctlplane
+ - {get_param: CloudDomain}
+ short:
+ list_join:
+ - '.'
+ - - {get_attr: [{{role}}, name]}
+ - ctlplane
+
+ PreNetworkConfig:
+ type: OS::TripleO::{{role}}::PreNetworkConfig
+ properties:
+ server: {get_resource: {{role}}}
+
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
server: {get_resource: {{role}}}
actions: {get_param: NetworkDeploymentActions}
+ input_values:
+ bridge_name: br-ex
+ interface_name: {get_param: NeutronPublicInterface}
+
+ {{role}}UpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+ - get_param: UpgradeInitCommonCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ {{role}}UpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: {{role}}UpgradeInitDeployment
+ server: {get_resource: {{role}}}
+ config: {get_resource: {{role}}UpgradeInitConfig}
{{role}}Deployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: {{role}}UpgradeInitDeployment
properties:
name: {{role}}Deployment
config: {get_resource: {{role}}Config}
@@ -285,42 +447,12 @@ resources:
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: LoggingSources}
tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: LoggingGroups}
- fqdn_internal_api:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - internalapi
- - {get_param: CloudDomain}
- fqdn_storage:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - storage
- - {get_param: CloudDomain}
- fqdn_storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- fqdn_tenant:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - tenant
- - {get_param: CloudDomain}
- fqdn_management:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - management
- - {get_param: CloudDomain}
- fqdn_ctlplane:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ fqdn_internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ fqdn_storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ fqdn_storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
+ fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
@@ -366,48 +498,13 @@ outputs:
hostname_map:
description: Mapping of network names to hostnames
value:
- external:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - external
- - {get_param: CloudDomain}
- internal_api:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - internalapi
- - {get_param: CloudDomain}
- storage:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - storage
- - {get_param: CloudDomain}
- storage_mgmt:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - storagemgmt
- - {get_param: CloudDomain}
- tenant:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - tenant
- - {get_param: CloudDomain}
- management:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - management
- - {get_param: CloudDomain}
- ctlplane:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - ctlplane
- - {get_param: CloudDomain}
+ external: {get_attr: [NetHostMap, value, external, fqdn]}
+ internal_api: {get_attr: [NetHostMap, value, internal_api, fqdn]}
+ storage: {get_attr: [NetHostMap, value, storage, fqdn]}
+ storage_mgmt: {get_attr: [NetHostMap, value, storage_mgmt, fqdn]}
+ tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
+ management: {get_attr: [NetHostMap, value, management, fqdn]}
+ ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
hosts_entry:
value:
str_replace:
@@ -425,47 +522,19 @@ outputs:
DOMAIN: {get_param: CloudDomain}
PRIMARYHOST: {get_attr: [{{role}}, name]}
EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
- EXTERNALHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - external
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
- INTERNAL_APIHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - internalapi
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
STORAGEIP: {get_attr: [StoragePort, ip_address]}
- STORAGEHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - storage
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
- STORAGE_MGMTHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - storagemgmt
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
TENANTIP: {get_attr: [TenantPort, ip_address]}
- TENANTHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - tenant
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
- MANAGEMENTHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - management
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
- CTLPLANEHOST:
- list_join:
- - '.'
- - - {get_attr: [{{role}}, name]}
- - ctlplane
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
nova_server_resource:
description: Heat resource handle for {{role}} server
value:
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index 856b306e..e5c11535 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -19,8 +19,21 @@ environment to set per service parameters.
Config Settings
---------------
-Each service may define a config_settings output variable which returns
-Hiera settings to be configured.
+Each service may define three ways in which to output variables to configure Hiera
+settings on the nodes.
+
+ * config_settings: the hiera keys will be pushed on all roles of which the service
+ is a part of.
+
+ * global_config_settings: the hiera keys will be distributed to all roles
+
+ * service_config_settings: Takes an extra key to wire in values that are
+ defined for a service that need to be consumed by some other service.
+ For example:
+ service_config_settings:
+ haproxy:
+ foo: bar
+ This will set the hiera key 'foo' on all roles where haproxy is included.
Deployment Steps
----------------
@@ -49,6 +62,32 @@ are re-asserted when applying latter ones.
5) Service activation (Pacemaker)
+Batch Upgrade Steps
+-------------------
+
+Each service template may optionally define a `upgrade_batch_tasks` key, which
+is a list of ansible tasks to be performed during the upgrade process.
+
+Similar to the step_config, we allow a series of steps for the per-service
+upgrade sequence, defined as ansible tasks with a tag e.g "step1" for the first
+step, "step2" for the second, etc (currently only two steps are supported, but
+more may be added when required as additional services get converted to batched
+upgrades).
+
+Note that each step is performed in batches, then we move on to the next step
+which is also performed in batches (we don't perform all steps on one node,
+then move on to the next one which means you can sequence rolling upgrades of
+dependent services via the step value).
+
+The tasks performed at each step is service specific, but note that all batch
+upgrade steps are performed before the `upgrade_tasks` described below. This
+means that all services that support rolling upgrades can be upgraded without
+downtime during `upgrade_batch_tasks`, then any remaining services are stopped
+and upgraded during `upgrade_tasks`
+
+The default batch size is 1, but this can be overridden for each role via the
+`upgrade_batch_size` option in roles_data.yaml
+
Upgrade Steps
-------------
@@ -71,6 +110,20 @@ step, "step2" for the second, etc.
5) Perform any migration tasks, e.g DB sync commands
- 6) Start control-plane services
+Note that the services are not started in the upgrade tasks - we instead re-run
+puppet which does any reconfiguration required for the new version, then starts
+the services.
+
+Nova Server Metadata Settings
+-----------------------------
+
+One can use the hook of type `OS::TripleO::ServiceServerMetadataHook` to pass
+entries to the nova instances' metadata. It is, however, disabled by default.
+In order to overwrite it one needs to define it in the resource registry. An
+implementation of this hook needs to conform to the following:
+
+* It needs to define an input called `RoleData` of json type. This gets as
+ input the contents of the `role_data` for each role's ServiceChain.
- 7) Any additional online migration tasks (e.g data migrations)
+* This needs to define an output called `metadata` which will be given to the
+ Nova Server resource as the instance's metadata.
diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml
index 0cc7ad8b..4bd9fc47 100644
--- a/puppet/services/aodh-api.yaml
+++ b/puppet/services/aodh-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Aodh API service configured with Puppet
@@ -58,6 +58,7 @@ outputs:
"%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, AodhApiNetwork]}
+ aodh::wsgi::apache::wsgi_process_display_name: 'aodh_wsgi'
aodh::api::service_name: 'httpd'
aodh::api::enable_proxy_headers_parsing: true
tripleo.aodh_api.firewall_rules:
@@ -82,3 +83,9 @@ outputs:
get_attr: [AodhBase, role_data, service_config_settings]
step_config: |
include tripleo::profile::base::aodh::api
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: Stop aodh_api service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped
diff --git a/puppet/services/aodh-base.yaml b/puppet/services/aodh-base.yaml
index 0e2410f7..c2c2d023 100644
--- a/puppet/services/aodh-base.yaml
+++ b/puppet/services/aodh-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Aodh service configured with Puppet
@@ -69,6 +69,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/aodh'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
aodh::debug: {get_param: Debug}
aodh::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::rabbit_userid: {get_param: RabbitUserName}
@@ -78,7 +79,7 @@ outputs:
aodh::keystone::authtoken::project_name: 'service'
aodh::keystone::authtoken::password: {get_param: AodhPassword}
aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::auth::auth_password: {get_param: AodhPassword}
aodh::auth::auth_region: 'regionOne'
aodh::auth::auth_tenant_name: 'service'
diff --git a/puppet/services/aodh-evaluator.yaml b/puppet/services/aodh-evaluator.yaml
index 405c500e..56dbb558 100644
--- a/puppet/services/aodh-evaluator.yaml
+++ b/puppet/services/aodh-evaluator.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Aodh Evaluator service configured with Puppet
@@ -40,3 +40,10 @@ outputs:
get_attr: [AodhBase, role_data, config_settings]
step_config: |
include tripleo::profile::base::aodh::evaluator
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-aodh-evaluator is running"
+ shell: /usr/bin/systemctl show 'openstack-aodh-evaluator' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop aodh_evaluator service
+ tags: step2
+ service: name=openstack-aodh-evaluator state=stopped
diff --git a/puppet/services/aodh-listener.yaml b/puppet/services/aodh-listener.yaml
index fc4e8b39..76db0ca8 100644
--- a/puppet/services/aodh-listener.yaml
+++ b/puppet/services/aodh-listener.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Aodh Listener service configured with Puppet
@@ -40,3 +40,10 @@ outputs:
get_attr: [AodhBase, role_data, config_settings]
step_config: |
include tripleo::profile::base::aodh::listener
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-aodh-listener is running"
+ shell: /usr/bin/systemctl show 'openstack-aodh-listener' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop aodh_listener service
+ tags: step2
+ service: name=openstack-aodh-listener state=stopped
diff --git a/puppet/services/aodh-notifier.yaml b/puppet/services/aodh-notifier.yaml
index 2e51c639..30c67635 100644
--- a/puppet/services/aodh-notifier.yaml
+++ b/puppet/services/aodh-notifier.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Aodh Notifier service configured with Puppet
@@ -40,3 +40,10 @@ outputs:
get_attr: [AodhBase, role_data, config_settings]
step_config: |
include tripleo::profile::base::aodh::notifier
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-aodh-notifier is running"
+ shell: /usr/bin/systemctl show 'openstack-aodh-notifier' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop aodh_notifier service
+ tags: step2
+ service: name=openstack-aodh-notifier state=stopped
diff --git a/puppet/services/apache-internal-tls-certmonger.yaml b/puppet/services/apache-internal-tls-certmonger.yaml
index 1d76b9a3..4c21e02a 100644
--- a/puppet/services/apache-internal-tls-certmonger.yaml
+++ b/puppet/services/apache-internal-tls-certmonger.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Apache service TLS configurations.
@@ -21,6 +21,22 @@ parameters:
via parameter_defaults in the resource registry.
type: json
+resources:
+
+ ApacheNetworks:
+ type: OS::Heat::Value
+ properties:
+ value:
+ # NOTE(jaosorior) Get unique network names to create
+ # certificates for those. We skip the tenant network since
+ # we don't need a certificate for that, and the external
+ # network will be handled in another template.
+ yaql:
+ expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
+ data:
+ map:
+ get_param: ServiceNetMap
+
outputs:
role_data:
description: Role data for the Apache role.
@@ -38,13 +54,16 @@ outputs:
hostname: "%{hiera('fqdn_NETWORK')}"
principal: "HTTP/%{hiera('fqdn_NETWORK')}"
for_each:
- NETWORK:
- # NOTE(jaosorior) Get unique network names to create
- # certificates for those. We skip the tenant network since
- # we don't need a certificate for that, and the external
- # network will be handled in another template.
- yaql:
- expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
- data:
- map:
- get_param: ServiceNetMap
+ NETWORK: {get_attr: [ApacheNetworks, value]}
+ metadata_settings:
+ repeat:
+ template:
+ - service: HTTP
+ network: $NETWORK
+ type: node
+ for_each:
+ $NETWORK: {get_attr: [ApacheNetworks, value]}
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service httpd is running"
+ shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
diff --git a/puppet/services/apache.yaml b/puppet/services/apache.yaml
index 382e0ff9..74ddbde8 100644
--- a/puppet/services/apache.yaml
+++ b/puppet/services/apache.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Apache service configured with Puppet. Note this is typically included
@@ -64,3 +64,9 @@ outputs:
apache::mod::prefork::serverlimit: { get_param: ApacheServerLimit }
apache::mod::remoteip::proxy_ips:
- "%{hiera('apache_remote_proxy_ips_network')}"
+ metadata_settings:
+ get_attr: [ApacheTLS, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service httpd is running"
+ shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
diff --git a/puppet/services/pacemaker/core.yaml b/puppet/services/auditd.yaml
index 9eca1de3..639631e1 100644
--- a/puppet/services/pacemaker/core.yaml
+++ b/puppet/services/auditd.yaml
@@ -1,7 +1,7 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- OpenStack Core (fake) service with Pacemaker configured with Puppet.
+ AuditD configured with Puppet
parameters:
ServiceNetMap:
@@ -18,12 +18,17 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ AuditdRules:
+ description: Mapping of auditd rules
+ type: json
+ default: {}
outputs:
role_data:
- description: Role data for the Core role.
+ description: Role data for the auditd service
value:
- service_name: core
- config_settings: {}
+ service_name: auditd
+ config_settings:
+ auditd::rules: {get_param: AuditdRules}
step_config: |
- include ::tripleo::profile::pacemaker::core \ No newline at end of file
+ include ::tripleo::profile::base::auditd
diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml
index 24687d03..ffc4c83a 100644
--- a/puppet/services/barbican-api.yaml
+++ b/puppet/services/barbican-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Barbican API service configured with Puppet
@@ -75,7 +75,7 @@ outputs:
- get_attr: [ApacheServiceBase, role_data, config_settings]
- barbican::keystone::authtoken::password: {get_param: BarbicanPassword}
barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
barbican::keystone::authtoken::project_name: 'service'
barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]}
barbican::api::db_auto_create: false
@@ -105,6 +105,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/barbican'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
tripleo.barbican_api.firewall_rules:
'117 barbican':
dport:
@@ -134,11 +135,17 @@ outputs:
nova::compute::barbican_endpoint:
get_param: [EndpointMap, BarbicanInternal, uri]
nova::compute::barbican_auth_endpoint:
- get_param: [EndpointMap, KeystoneV3Internal, uri]
+ get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
cinder_api:
cinder::api::keymgr_api_class: >
castellan.key_manager.barbican_key_manager.BarbicanKeyManager
cinder::api::keymgr_encryption_api_url:
get_param: [EndpointMap, BarbicanInternal, uri]
cinder::api::keymgr_encryption_auth_url:
- get_param: [EndpointMap, KeystoneV3Internal, uri]
+ get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
+ shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
diff --git a/puppet/services/ca-certs.yaml b/puppet/services/ca-certs.yaml
index 1a534156..735e6dde 100644
--- a/puppet/services/ca-certs.yaml
+++ b/puppet/services/ca-certs.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
HAproxy service configured with Puppet
diff --git a/puppet/services/ceilometer-agent-central.yaml b/puppet/services/ceilometer-agent-central.yaml
index c4abc307..cf8a8a8e 100644
--- a/puppet/services/ceilometer-agent-central.yaml
+++ b/puppet/services/ceilometer-agent-central.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer Central Agent service configured with Puppet
@@ -54,3 +54,10 @@ outputs:
- ceilometer_redis_password: {get_param: RedisPassword}
step_config: |
include ::tripleo::profile::base::ceilometer::agent::central
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-central is running"
+ shell: /usr/bin/systemctl show 'openstack-ceilometer-central' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop ceilometer_agent_central service
+ tags: step2
+ service: name=openstack-ceilometer-central state=stopped
diff --git a/puppet/services/ceilometer-agent-compute.yaml b/puppet/services/ceilometer-agent-compute.yaml
index 5457539c..00042914 100644
--- a/puppet/services/ceilometer-agent-compute.yaml
+++ b/puppet/services/ceilometer-agent-compute.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer Compute Agent service configured with Puppet
@@ -21,6 +21,12 @@ parameters:
MonitoringSubscriptionCeilometerCompute:
default: 'overcloud-ceilometer-agent-compute'
type: string
+ InstanceDiscoveryMethod:
+ default: 'libvirt_metadata'
+ description: Method used to discover instances running on compute node
+ type: string
+ constraints:
+ - allowed_values: ['naive', 'libvirt_metadata', 'workload_partitioning']
resources:
CeilometerServiceBase:
@@ -37,6 +43,15 @@ outputs:
service_name: ceilometer_agent_compute
monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCompute}
config_settings:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
+ map_merge:
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer::agent::compute::instance_discovery_method: {get_param: InstanceDiscoveryMethod}
step_config: |
include ::tripleo::profile::base::ceilometer::agent::compute
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-compute is running"
+ shell: /usr/bin/systemctl show 'openstack-ceilometer-compute' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop ceilometer_agent_compute service
+ tags: step2
+ service: name=openstack-ceilometer-compute state=stopped
diff --git a/puppet/services/ceilometer-agent-notification.yaml b/puppet/services/ceilometer-agent-notification.yaml
index ea403aa1..760acd65 100644
--- a/puppet/services/ceilometer-agent-notification.yaml
+++ b/puppet/services/ceilometer-agent-notification.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer Notification Agent service configured with Puppet
@@ -49,3 +49,10 @@ outputs:
get_attr: [CeilometerServiceBase, role_data, config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::agent::notification
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-notification is running"
+ shell: /usr/bin/systemctl show 'openstack-ceilometer-notification' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop ceilometer_agent_notification service
+ tags: step2
+ service: name=openstack-ceilometer-notification state=stopped
diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml
index 2e2d3f2d..741f8da1 100644
--- a/puppet/services/ceilometer-api.yaml
+++ b/puppet/services/ceilometer-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer API service configured with Puppet
@@ -90,3 +90,9 @@ outputs:
get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::api
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: Stop ceilometer_api service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
index ded1bc03..874c6893 100644
--- a/puppet/services/ceilometer-base.yaml
+++ b/puppet/services/ceilometer-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer service configured with Puppet
@@ -31,9 +31,9 @@ parameters:
type: string
hidden: true
CeilometerMeterDispatcher:
- default: 'gnocchi'
- description: Dispatcher to process meter data
- type: string
+ default: ['gnocchi']
+ description: Comma-seperated list of Dispatcher to process meter data
+ type: comma_delimited_list
constraints:
- allowed_values: ['gnocchi', 'database']
CeilometerEventDispatcher:
@@ -46,10 +46,10 @@ parameters:
default: 0
description: Number of workers for Ceilometer service.
type: number
- CeilometerStoreEvents:
- default: false
- description: Whether to store events in ceilometer.
- type: boolean
+ EventPipelinePublishers:
+ default: ['notifier://?topic=alarm.all']
+ description: A list of publishers to put in event_pipeline.yaml.
+ type: comma_delimited_list
Debug:
default: ''
description: Set to True to enable debugging on all services.
@@ -93,17 +93,17 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ceilometer'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
ceilometer_backend: {get_param: CeilometerBackend}
- ceilometer::metering_secret: {get_param: CeilometerMeteringSecret}
# we include db_sync class in puppet-tripleo
ceilometer::db::sync_db: false
ceilometer::keystone::authtoken::project_name: 'service'
ceilometer::keystone::authtoken::password: {get_param: CeilometerPassword}
ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
- ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents}
+ ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers}
ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
ceilometer::agent::auth::auth_tenant_name: 'service'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
diff --git a/puppet/services/ceilometer-collector.yaml b/puppet/services/ceilometer-collector.yaml
index e3f1ef4e..a219f9eb 100644
--- a/puppet/services/ceilometer-collector.yaml
+++ b/puppet/services/ceilometer-collector.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer Collector service configured with Puppet
@@ -59,3 +59,10 @@ outputs:
get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::collector
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-collector is running"
+ shell: /usr/bin/systemctl show 'openstack-ceilometer-collector' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop ceilometer_collector service
+ tags: step2
+ service: name=openstack-ceilometer-collector state=stopped
diff --git a/puppet/services/ceilometer-expirer.yaml b/puppet/services/ceilometer-expirer.yaml
index 3b811c4d..714434b1 100644
--- a/puppet/services/ceilometer-expirer.yaml
+++ b/puppet/services/ceilometer-expirer.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ceilometer Expirer service configured with Puppet
diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml
index 8faf5640..033d3f77 100644
--- a/puppet/services/ceph-base.yaml
+++ b/puppet/services/ceph-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Ceph base service. Shared by all Ceph services.
diff --git a/puppet/services/ceph-client.yaml b/puppet/services/ceph-client.yaml
index b482dd2e..f972e21b 100644
--- a/puppet/services/ceph-client.yaml
+++ b/puppet/services/ceph-client.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Ceph Client service.
diff --git a/puppet/services/ceph-external.yaml b/puppet/services/ceph-external.yaml
index b708665f..134f47c4 100644
--- a/puppet/services/ceph-external.yaml
+++ b/puppet/services/ceph-external.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Ceph External service.
@@ -61,6 +61,12 @@ parameters:
MonitoringSubscriptionCephExternal:
default: 'overcloud-ceph-external'
type: string
+ RbdDefaultFeatures:
+ default: ''
+ description: The default features enabled when creating a block device
+ image. Only applies to format 2 images. Set to '1' for Jewel
+ clients using older Ceph servers.
+ type: string
conditions:
glance_multiple_locations:
@@ -81,24 +87,25 @@ outputs:
config_settings:
tripleo::profile::base::ceph::ceph_mon_host: {get_param: CephExternalMonHost}
ceph::profile::params::fsid: {get_param: CephClusterFSID}
+ ceph::profile::params::rbd_default_features: {get_param: RbdDefaultFeatures}
ceph::profile::params::client_keys:
- str_replace:
- template: "{
- client.CLIENT_USER: {
- secret: 'CLIENT_KEY',
- mode: '0644',
- cap_mon: 'allow r',
- cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
- }
- }"
- params:
- CLIENT_USER: {get_param: CephClientUserName}
- CLIENT_KEY: {get_param: CephClientKey}
- NOVA_POOL: {get_param: NovaRbdPoolName}
- CINDER_POOL: {get_param: CinderRbdPoolName}
- CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
- GLANCE_POOL: {get_param: GlanceRbdPoolName}
- GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ map_replace:
+ - CEPH_CLIENT_KEY:
+ secret: {get_param: CephClientKey}
+ mode: '0644'
+ cap_mon: 'allow r'
+ cap_osd:
+ str_replace:
+ template: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL'
+ params:
+ NOVA_POOL: {get_param: NovaRbdPoolName}
+ CINDER_POOL: {get_param: CinderRbdPoolName}
+ CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName}
+ GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ GNOCCHI_POOL: {get_param: GnocchiRbdPoolName}
+ - keys:
+ CEPH_CLIENT_KEY:
+ list_join: ['.', ['client', {get_param: CephClientUserName}]]
ceph::profile::params::manage_repo: false
# FIXME(gfidente): we should not have to list the packages explicitly in
# the templates, but this should stay until the following is fixed:
diff --git a/puppet/services/pacemaker/ceilometer-api.yaml b/puppet/services/ceph-mds.yaml
index 4b6c18f6..b68567fb 100644
--- a/puppet/services/pacemaker/ceilometer-api.yaml
+++ b/puppet/services/ceph-mds.yaml
@@ -1,7 +1,7 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- OpenStack Ceilometer API service with Pacemaker configured with Puppet
+ Ceph MDS service.
parameters:
ServiceNetMap:
@@ -18,13 +18,15 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- MonitoringSubscriptionCeilometerApi:
- default: 'overcloud-ceilometer-api'
+ CephMdsKey:
+ description: The cephx key for the MDS service. Can be created
+ with ceph-authtool --gen-print-key.
type: string
+ hidden: true
resources:
- CeilometerServiceBase:
- type: ../ceilometer-api.yaml
+ CephBase:
+ type: ./ceph-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
@@ -32,14 +34,16 @@ resources:
outputs:
role_data:
- description: Role data for the Ceilometer API pacemaker role.
+ description: Role data for the Ceph MDS service.
value:
- service_name: ceilometer_api
- monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerApi}
+ service_name: ceph_mds
config_settings:
map_merge:
- - get_attr: [CeilometerServiceBase, role_data, config_settings]
- - ceilometer::api::manage_service: false
- ceilometer::api::enabled: false
+ - get_attr: [CephBase, role_data, config_settings]
+ - ceph::profile::params::mds_key: {get_param: CephMdsKey}
+ tripleo.ceph_mds.firewall_rules:
+ '112 ceph_mds':
+ dport:
+ - '6800-7300'
step_config: |
- include ::tripleo::profile::pacemaker::ceilometer::api
+ include ::tripleo::profile::base::ceph::mds
diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml
index 3471f16c..1ce58335 100644
--- a/puppet/services/ceph-mon.yaml
+++ b/puppet/services/ceph-mon.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Ceph Monitor service.
@@ -28,6 +28,12 @@ parameters:
CinderRbdPoolName:
default: volumes
type: string
+ ManilaCephFSDataPoolName:
+ default: manila_data
+ type: string
+ ManilaCephFSMetadataPoolName:
+ default: manila_metadata
+ type: string
CinderBackupRbdPoolName:
default: backups
type: string
@@ -87,6 +93,8 @@ outputs:
for_each:
<%pool%>:
- {get_param: CinderRbdPoolName}
+ - {get_param: ManilaCephFSDataPoolName}
+ - {get_param: ManilaCephFSMetadataPoolName}
- {get_param: CinderBackupRbdPoolName}
- {get_param: NovaRbdPoolName}
- {get_param: GlanceRbdPoolName}
@@ -105,3 +113,27 @@ outputs:
get_attr: [CephBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceph::mon
+ upgrade_batch_tasks:
+ # Note we perform these tasks in list order, but they are all step0 so
+ # we can perform a rolling upgrade of all mon nodes in step0, then a
+ # rolling upgrade of all osd nodes in step1
+ - name: Check status
+ tags: step0,validation
+ shell: ceph health | grep -qv HEALTH_ERR
+ # FIXME(shardy) I suspect we can use heat or ansible facts here instead?
+ - name: Get hostname
+ tags: step0
+ shell: hostname -s
+ register: mon_id
+ - name: Stop Ceph Mon
+ tags: step0
+ service: name=ceph-mon@{{mon_id.stdout}} pattern=ceph-mon state=stopped
+ - name: Update ceph packages
+ tags: step0
+ yum: name=ceph-mon state=latest
+ - name: Start ceph-mon service
+ tags: step0
+ service: name=ceph-mon@{{mon_id.stdout}} state=started
+ - name: ceph osd crush tunables default
+ tags: step0
+ shell: ceph osd crush tunables default
diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml
index f6378720..9bd83aab 100644
--- a/puppet/services/ceph-osd.yaml
+++ b/puppet/services/ceph-osd.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Ceph OSD service.
@@ -45,3 +45,47 @@ outputs:
- '6800-7300'
step_config: |
include ::tripleo::profile::base::ceph::osd
+ upgrade_batch_tasks:
+ - name: Check status
+ tags: step1,validation
+ shell: ceph health | grep -qv HEALTH_ERR
+ - name: Get OSD IDs
+ tags: step1
+ shell: ls /var/lib/ceph/osd | awk 'BEGIN { FS = "-" } ; { print $2 }'
+ register: osd_ids
+ # "so that mirrors aren't rebalanced as if the OSD died" - gfidente / leseb
+ - name: ceph osd set noout
+ tags: step1
+ command: ceph osd set noout
+ - name: ceph osd set norebalance
+ tags: step1
+ command: ceph osd set norebalance
+ - name: ceph osd set nodeep-scrub
+ tags: step1
+ command: ceph osd set nodeep-scrub
+ - name: ceph osd set noscrub
+ tags: step1
+ command: ceph osd set noscrub
+ - name: Stop Ceph OSD
+ tags: step1
+ service: name=ceph-osd@{{ item }} state=stopped
+ with_items: "{{osd_ids.stdout.strip().split()}}"
+ - name: Update ceph OSD packages
+ tags: step1
+ yum: name=ceph-osd state=latest
+ - name: Start ceph-osd service
+ tags: step1
+ service: name=ceph-osd@{{ item }} state=started
+ with_items: "{{osd_ids.stdout.strip().split()}}"
+ - name: ceph osd unset noout
+ tags: step1
+ command: ceph osd unset noout
+ - name: ceph osd unset norebalance
+ tags: step1
+ command: ceph osd unset norebalance
+ - name: ceph osd unset nodeep-scrub
+ tags: step1
+ command: ceph osd unset nodeep-scrub
+ - name: ceph osd unset noscrub
+ tags: step1
+ command: ceph osd unset noscrub
diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml
index 4b85d28f..d7014e54 100644
--- a/puppet/services/ceph-rgw.yaml
+++ b/puppet/services/ceph-rgw.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Ceph RadosGW service.
@@ -54,10 +54,14 @@ outputs:
- get_attr: [CephBase, role_data, config_settings]
- tripleo::profile::base::ceph::rgw::rgw_key: {get_param: CephRgwKey}
tripleo::profile::base::ceph::rgw::keystone_admin_token: {get_param: AdminToken}
- tripleo::profile::base::ceph::rgw::keystone_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ tripleo::profile::base::ceph::rgw::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
tripleo::profile::base::ceph::rgw::civetweb_bind_ip: {get_param: [ServiceNetMap, CephRgwNetwork]}
tripleo::profile::base::ceph::rgw::civetweb_bind_port: {get_param: [EndpointMap, CephRgwInternal, port]}
- ceph::params::user_radosgw: ceph
+ tripleo::profile::base::ceph::rgw::rgw_keystone_version: v3
+ ceph::profile::params::rgw_keystone_admin_domain: default
+ ceph::profile::params::rgw_keystone_admin_project: service
+ ceph::profile::params::rgw_keystone_admin_user: swift
+ ceph::profile::params::rgw_keystone_admin_password: {get_param: SwiftPassword}
tripleo.ceph_rgw.firewall_rules:
'122 ceph rgw':
dport: {get_param: [EndpointMap, CephRgwInternal, port]}
@@ -68,7 +72,19 @@ outputs:
ceph::rgw::keystone::auth::public_url: {get_param: [EndpointMap, CephRgwPublic, uri]}
ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
- ceph::rgw::keystone::auth::user: 'swift'
- ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
- ceph::rgw::keystone::auth::tenant: 'service'
+ ceph::rgw::keystone::auth::roles: [ 'admin', 'member', '_member_' ]
+ ceph::rgw::keystone::auth::tenant: service
+ ceph::rgw::keystone::auth::user: swift
+ ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
+ upgrade_tasks:
+ - name: Gather RGW instance ID
+ tags: step0
+ shell: hiera -c /etc/puppet/hiera.yaml ceph::profile::params::rgw_name radosgw.gateway
+ register: rgw_id
+ - name: Check status
+ shell: /usr/bin/systemctl show ceph-radosgw@{{rgw_id.stdout}} --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop RGW instance
+ tags: step1
+ service: name=ceph-radosgw@{{rgw_id.stdout}} state=stopped
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index 6cb2b194..8c5a07ac 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Cinder API service configured with Puppet
@@ -81,7 +81,7 @@ outputs:
- get_attr: [CinderBase, role_data, config_settings]
- get_attr: [ApacheServiceBase, role_data, config_settings]
- cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
cinder::keystone::authtoken::password: {get_param: CinderPassword}
cinder::keystone::authtoken::project_name: 'service'
cinder::api::enable_proxy_headers_parsing: true
@@ -91,7 +91,6 @@ outputs:
cinder::config:
DEFAULT/swift_catalog_info:
value: 'object-store:swift:internalURL'
- cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
tripleo::profile::base::cinder::cinder_enable_db_purge: {get_param: CinderEnableDBPurge}
tripleo.cinder_api.firewall_rules:
'119 cinder':
@@ -147,3 +146,21 @@ outputs:
cinder::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
+ shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: check for cinder running under apache (post upgrade)
+ tags: step2
+ shell: "apachectl -t -D DUMP_VHOSTS | grep -q cinder"
+ register: cinder_apache
+ ignore_errors: true
+ - name: Stop cinder_api service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped
+ when: "cinder_apache.rc == 0"
+ - name: Stop and disable cinder_api service (pre-upgrade not under httpd)
+ tags: step2
+ service: name=openstack-cinder-api state=stopped enabled=no
diff --git a/puppet/services/cinder-backend-dellps.yaml b/puppet/services/cinder-backend-dellps.yaml
new file mode 100644
index 00000000..1f15c53e
--- /dev/null
+++ b/puppet/services/cinder-backend-dellps.yaml
@@ -0,0 +1,85 @@
+# Copyright (c) 2017 Dell Inc. or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+heat_template_version: ocata
+
+description: >
+ Openstack Cinder Dell EMC PS Series backend
+
+parameters:
+ CinderEnableDellPsBackend:
+ type: boolean
+ default: true
+ CinderDellPsBackendName:
+ type: string
+ default: 'tripleo_dellps'
+ CinderDellPsSanIp:
+ type: string
+ CinderDellPsSanLogin:
+ type: string
+ CinderDellPsSanPassword:
+ type: string
+ hidden: true
+ CinderDellPsSanThinProvision:
+ type: boolean
+ default: true
+ CinderDellPsGroupname:
+ type: string
+ default: 'group-0'
+ CinderDellPsPool:
+ type: string
+ default: 'default'
+ CinderDellPsChapLogin:
+ type: string
+ default: ''
+ CinderDellPsChapPassword:
+ type: string
+ default: ''
+ CinderDellPsUseChap:
+ type: boolean
+ default: false
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Dell EMC PS Series backend.
+ value:
+ service_name: cinder_backend_dellps
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_dellps_backend: {get_param: CinderEnableDellPsBackend}
+ cinder::backend::eqlx::volume_backend_name: {get_param: CinderDellPsBackendName}
+ cinder::backend::eqlx::san_ip: {get_param: CinderDellPsSanIp}
+ cinder::backend::eqlx::san_login: {get_param: CinderDellPsSanLogin}
+ cinder::backend::eqlx::san_password: {get_param: CinderDellPsSanPassword}
+ cinder::backend::eqlx::san_thin_provision: {get_param: CinderDellPsSanThinProvision}
+ cinder::backend::eqlx::eqlx_group_name: {get_param: CinderDellPsGroupname}
+ cinder::backend::eqlx::eqlx_pool: {get_param: CinderDellPsPool}
+ cinder::backend::eqlx::eqlx_use_chap: {get_param: CinderDellPsUseChap}
+ cinder::backend::eqlx::eqlx_chap_login: {get_param: CinderDellPsChapLogin}
+ cinder::backend::eqlx::eqlx_chap_password: {get_param: CinderDellPsChapPassword}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-backend-dellsc.yaml b/puppet/services/cinder-backend-dellsc.yaml
new file mode 100644
index 00000000..6a6196ac
--- /dev/null
+++ b/puppet/services/cinder-backend-dellsc.yaml
@@ -0,0 +1,85 @@
+# Copyright (c) 2016-2017 Dell Inc, or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+heat_template_version: ocata
+
+description: >
+ Openstack Cinder Dell EMC Storage Center backend
+
+parameters:
+ CinderEnableDellScBackend:
+ type: boolean
+ default: true
+ CinderDellScBackendName:
+ type: string
+ default: 'tripleo_dellsc'
+ CinderDellScSanIp:
+ type: string
+ CinderDellScSanLogin:
+ type: string
+ default: 'Admin'
+ CinderDellScSanPassword:
+ type: string
+ hidden: true
+ CinderDellScSsn:
+ type: number
+ default: 64702
+ CinderDellScIscsiIpAddress:
+ type: string
+ default: ''
+ CinderDellScIscsiPort:
+ type: number
+ default: 3260
+ CinderDellScApiPort:
+ type: number
+ default: 3033
+ CinderDellScServerFolder:
+ type: string
+ default: 'dellsc_server'
+ CinderDellScVolumeFolder:
+ type: string
+ default: 'dellsc_volume'
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Dell EMC Storage Center backend.
+ value:
+ service_name: cinder_backend_dellsc
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_dellsc_backend: {get_param: CinderEnableDellScBackend}
+ cinder::backend::dellsc_iscsi::volume_backend_name: {get_param: CinderDellScBackendName}
+ cinder::backend::dellsc_iscsi::san_ip: {get_param: CinderDellScSanIp}
+ cinder::backend::dellsc_iscsi::san_login: {get_param: CinderDellScSanLogin}
+ cinder::backend::dellsc_iscsi::san_password: {get_param: CinderDellScSanPassword}
+ cinder::backend::dellsc_iscsi::dell_sc_ssn: {get_param: CinderDellScSsn}
+ cinder::backend::dellsc_iscsi::iscsi_ip_address: {get_param: CinderDellScIscsiIpAddress}
+ cinder::backend::dellsc_iscsi::iscsi_port: {get_param: CinderDellScIscsiPort}
+ cinder::backend::dellsc_iscsi::dell_sc_api_port: {get_param: CinderDellScApiPort}
+ cinder::backend::dellsc_iscsi::dell_sc_server_folder: {get_param: CinderDellScServerFolder}
+ cinder::backend::dellsc_iscsi::dell_sc_volume_folder: {get_param: CinderDellScVolumeFolder}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-backend-scaleio.yaml b/puppet/services/cinder-backend-scaleio.yaml
new file mode 100644
index 00000000..eb709cd5
--- /dev/null
+++ b/puppet/services/cinder-backend-scaleio.yaml
@@ -0,0 +1,111 @@
+# Copyright (c) 2016-2017 Dell Inc, or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+heat_template_version: ocata
+
+description: >
+ Openstack Cinder Dell EMC ScaleIO backend
+
+parameters:
+ CinderEnableScaleIOBackend:
+ type: boolean
+ default: true
+ CinderScaleIOBackendName:
+ type: string
+ default: 'tripleo_scaleio'
+ CinderScaleIOSanIp:
+ type: string
+ default: ''
+ CinderScaleIOSanLogin:
+ type: string
+ default: ''
+ CinderScaleIOSanPassword:
+ type: string
+ default: ''
+ hidden: true
+ CinderScaleIORestServerPort:
+ type: number
+ default: 443
+ CinderScaleIOVerifyServerCertificate:
+ type: boolean
+ default: false
+ CinderScaleIOServerCertificatePath:
+ type: string
+ default: ''
+ CinderScaleIOProtectionDomainId:
+ type: string
+ default: ''
+ CinderScaleIOProtectionDomainName:
+ type: string
+ default: ''
+ CinderScaleIOStoragePoolId:
+ type: string
+ default: ''
+ CinderScaleIOStoragePoolName:
+ type: string
+ default: ''
+ CinderScaleIOStoragePools:
+ type: string
+ default: ''
+ CinderScaleIORoundVolumeCapacity:
+ type: boolean
+ default: true
+ CinderScaleIOUnmapVolumeBeforeDeletion:
+ type: boolean
+ default: false
+ CinderScaleIOMaxOverSubscriptionRatio:
+ type: string
+ default: ''
+ CinderScaleIOSanThinProvision:
+ type: boolean
+ default: true
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Dell EMC ScaleIO backend.
+ value:
+ service_name: cinder_backend_scaleio
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_scaleio_backend: {get_param: CinderEnableScaleIOBackend}
+ cinder::backend::scaleio::volume_backend_name: {get_param: CinderScaleIOBackendName}
+ cinder::backend::scaleio::sio_login: {get_param: CinderScaleIOSanLogin}
+ cinder::backend::scaleio::sio_password: {get_param: CinderScaleIOSanPassword}
+ cinder::backend::scaleio::sio_server_hostname: {get_param: CinderScaleIOSanIp}
+ cinder::backend::scaleio::sio_server_port: {get_param: CinderScaleIORestServerPort}
+ cinder::backend::scaleio::sio_verify_server_certificate: {get_param: CinderScaleIOVerifyServerCertificate}
+ cinder::backend::scaleio::sio_server_certificate_path: {get_param: CinderScaleIOServerCertificatePath}
+ cinder::backend::scaleio::sio_protection_domain_name: {get_param: CinderScaleIOProtectionDomainName}
+ cinder::backend::scaleio::sio_protection_domain_id: {get_param: CinderScaleIOProtectionDomainId}
+ cinder::backend::scaleio::sio_storage_pool_id: {get_param: CinderScaleIOStoragePoolId}
+ cinder::backend::scaleio::sio_storage_pool_name: {get_param: CinderScaleIOStoragePoolName}
+ cinder::backend::scaleio::sio_storage_pools: {get_param: CinderScaleIOStoragePools}
+ cinder::backend::scaleio::sio_round_volume_capacity: {get_param: CinderScaleIORoundVolumeCapacity}
+ cinder::backend::scaleio::sio_unmap_volume_before_deletion: {get_param: CinderScaleIOUnmapVolumeBeforeDeletion}
+ cinder::backend::scaleio::sio_max_over_subscription_ratio: {get_param: CinderScaleIOMaxOverSubscriptionRatio}
+ cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOThinProvision}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-backup.yaml b/puppet/services/cinder-backup.yaml
index 80795457..14be07af 100644
--- a/puppet/services/cinder-backup.yaml
+++ b/puppet/services/cinder-backup.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder Backup service configured with Puppet
diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml
index 59c9b844..88e7edb7 100644
--- a/puppet/services/cinder-base.yaml
+++ b/puppet/services/cinder-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder base service. Shared by all Cinder services.
@@ -44,6 +44,46 @@ parameters:
default: guest
description: The username for RabbitMQ
type: string
+ CinderCronDbPurgeMinute:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Minute
+ default: '1'
+ CinderCronDbPurgeHour:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Hour
+ default: '0'
+ CinderCronDbPurgeMonthday:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Month Day
+ default: '*'
+ CinderCronDbPurgeMonth:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Month
+ default: '*'
+ CinderCronDbPurgeWeekday:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Week Day
+ default: '*'
+ CinderCronDbPurgeUser:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - User
+ default: 'keystone'
+ CinderCronDbPurgeAge:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Age
+ default: '0'
+ CinderCronDbPurgeDestination:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Log destination
+ default: '/var/log/cinder/cinder-rowsflush.log'
outputs:
role_data:
@@ -60,6 +100,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/cinder'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
cinder::debug: {get_param: Debug}
cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
cinder::rabbit_userid: {get_param: RabbitUserName}
@@ -69,3 +110,12 @@ outputs:
cinder::cron::db_purge::destination: '/dev/null'
cinder::db::database_db_max_retries: -1
cinder::db::database_max_retries: -1
+ cinder::cron::db_purge::minute: {get_param: CinderCronDbPurgeMinute}
+ cinder::cron::db_purge::hour: {get_param: CinderCronDbPurgeHour}
+ cinder::cron::db_purge::monthday: {get_param: CinderCronDbPurgeMonthday}
+ cinder::cron::db_purge::month: {get_param: CinderCronDbPurgeMonth}
+ cinder::cron::db_purge::weekday: {get_param: CinderCronDbPurgeWeekday}
+ cinder::cron::db_purge::user: {get_param: CinderCronDbPurgeUser}
+ cinder::cron::db_purge::age: {get_param: CinderCronDbPurgeAge}
+ cinder::cron::db_purge::destination: {get_param: CinderCronDbPurgeDestination}
+ cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
diff --git a/puppet/services/cinder-hpelefthand-iscsi.yaml b/puppet/services/cinder-hpelefthand-iscsi.yaml
new file mode 100644
index 00000000..ca7d2838
--- /dev/null
+++ b/puppet/services/cinder-hpelefthand-iscsi.yaml
@@ -0,0 +1,56 @@
+heat_template_version: ocata
+
+description: >
+ Configure Cinder HPELeftHandISCSIDriver
+
+parameters:
+ # Config specific parameters, to be provided via parameter_defaults
+ CinderHPELeftHandISCSIApiUrl:
+ type: string
+ CinderHPELeftHandISCSIUserName:
+ type: string
+ CinderHPELeftHandISCSIPassword:
+ type: string
+ hidden: true
+ CinderHPELeftHandISCSIBackendName:
+ type: string
+ default: 'tripleo_hpelefthand'
+ CinderHPELeftHandISCSIChapEnabled:
+ type: boolean
+ default: false
+ CinderHPELeftHandClusterName:
+ type: string
+ CinderHPELeftHandDebug:
+ type: boolean
+ default: false
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+outputs:
+ role_data:
+ description: Role data for Cinder HPELeftHandISCSIDriver
+ value:
+ service_name: cinder_hpelefthand_iscsi
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_hpelefthand_backend: true
+ cinder::backend::hpelefthand_iscsi::hpelefthand_api_url: {get_param: CinderHPELeftHandISCSIApiUrl}
+ cinder::backend::hpelefthand_iscsi::hpelefthand_username: {get_param: CinderHPELeftHandISCSIUserName}
+ cinder::backend::hpelefthand_iscsi::hpelefthand_password: {get_param: CinderHPELeftHandISCSIPassword}
+ cinder::backend::hpelefthand_iscsi::volume_backend_name: {get_param: CinderHPELeftHandISCSIBackendName}
+ cinder::backend::hpelefthand_iscsi::hpelefthand_iscsi_chap_enabled: {get_param: CinderHPELeftHandISCSIChapEnabled}
+ cinder::backend::hpelefthand_iscsi::hpelefthand_clustername: {get_param: CinderHPELeftHandClusterName}
+ cinder::backend::hpelefthand_iscsi::hpelefthand_debug: {get_param: CinderHPELeftHandDebug}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-scheduler.yaml b/puppet/services/cinder-scheduler.yaml
index 94c263ea..f102810e 100644
--- a/puppet/services/cinder-scheduler.yaml
+++ b/puppet/services/cinder-scheduler.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder Scheduler service configured with Puppet
@@ -51,3 +51,10 @@ outputs:
- cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
step_config: |
include ::tripleo::profile::base::cinder::scheduler
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-cinder-scheduler is running"
+ shell: /usr/bin/systemctl show 'openstack-cinder-scheduler' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop cinder_scheduler service
+ tags: step2
+ service: name=openstack-cinder-scheduler state=stopped
diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml
index 82e16f39..3a06afb8 100644
--- a/puppet/services/cinder-volume.yaml
+++ b/puppet/services/cinder-volume.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder Volume service configured with Puppet
@@ -20,6 +20,10 @@ parameters:
default: lioadm
description: The iSCSI helper to use with cinder.
type: string
+ CinderISCSIProtocol:
+ default: iscsi
+ description: Whether to use TCP ('iscsi') or iSER RDMA ('iser') for iSCSI
+ type: string
CinderLVMLoopDeviceSize:
default: 10280
description: The size of the loopback file used by the cinder LVM driver.
@@ -97,6 +101,7 @@ outputs:
SERVERS: {get_param: CinderNfsServers}
tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
+ tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: {get_param: CephClientUserName}
tripleo.cinder_volume.firewall_rules:
@@ -110,3 +115,10 @@ outputs:
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_address: {get_param: [ServiceNetMap, CinderIscsiNetwork]}
step_config: |
include ::tripleo::profile::base::cinder::volume
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-cinder-volume is running"
+ shell: /usr/bin/systemctl show 'openstack-cinder-volume' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop cinder_volume service
+ tags: step2
+ service: name=openstack-cinder-volume state=stopped
diff --git a/puppet/services/congress.yaml b/puppet/services/congress.yaml
new file mode 100644
index 00000000..6855a838
--- /dev/null
+++ b/puppet/services/congress.yaml
@@ -0,0 +1,97 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Congress service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ CongressPassword:
+ description: The password for the congress service account.
+ type: string
+ hidden: true
+ Debug:
+ type: string
+ default: ''
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+
+outputs:
+ role_data:
+ description: Role data for the Congress role.
+ value:
+ service_name: congress
+ config_settings:
+ congress_password: {get_param: CongressPassword}
+ congress::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://congress:'
+ - {get_param: CongressPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/congress'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ congress::keystone::auth::tenant: 'service'
+ congress::keystone::auth::password: {get_param: CongressPassword}
+ congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ congress::debug: {get_param: Debug}
+ congress::rpc_backend: rabbit
+ congress::rabbit_userid: {get_param: RabbitUserName}
+ congress::rabbit_password: {get_param: RabbitPassword}
+ congress::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ congress::rabbit_port: {get_param: RabbitClientPort}
+ congress::server::bind_host: {get_param: [ServiceNetMap, CongressApiNetwork]}
+
+ congress::db::mysql::password: {get_param: CongressPassword}
+ congress::db::mysql::user: congress
+ congress::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ congress::db::mysql::dbname: congress
+ congress::db::mysql::allowed_hosts:
+ - '%'
+ - {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+
+
+ step_config: |
+ include ::tripleo::profile::base::congress
+
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-congress-server is running"
+ shell: /usr/bin/systemctl show 'openstack-congress-server' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop congress service
+ tags: step2
+ service: name=openstack-congress-server state=stopped
diff --git a/puppet/services/database/mongodb-base.yaml b/puppet/services/database/mongodb-base.yaml
index 3f4f106d..c27fcb7f 100644
--- a/puppet/services/database/mongodb-base.yaml
+++ b/puppet/services/database/mongodb-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Configuration details for MongoDB service using composable roles
diff --git a/puppet/services/database/mongodb.yaml b/puppet/services/database/mongodb.yaml
index 01daeafe..63ec4446 100644
--- a/puppet/services/database/mongodb.yaml
+++ b/puppet/services/database/mongodb.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
MongoDb service deployment using puppet
@@ -66,3 +66,10 @@ outputs:
mongodb::server::bind_ip: {get_param: [ServiceNetMap, MongodbNetwork]}
step_config: |
include ::tripleo::profile::base::database::mongodb
+ upgrade_tasks:
+ - name: Stop mongodb service
+ tags: step2
+ service: name=mongod state=stopped
+ - name: Start mongodb service
+ tags: step4
+ service: name=mongod state=started
diff --git a/puppet/services/database/mysql-client.yaml b/puppet/services/database/mysql-client.yaml
new file mode 100644
index 00000000..1415391c
--- /dev/null
+++ b/puppet/services/database/mysql-client.yaml
@@ -0,0 +1,30 @@
+heat_template_version: ocata
+
+description: >
+ Mysql client settings
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role for setting mysql client parameters
+ value:
+ service_name: mysql_client
+ config_settings:
+ tripleo::profile::base:database::mysql::client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]}
+ step_config: |
+ include ::tripleo::profile::base::database::mysql::client
diff --git a/puppet/services/database/mysql-internal-tls-certmonger.yaml b/puppet/services/database/mysql-internal-tls-certmonger.yaml
index 3ba51fb6..9f7eaf57 100644
--- a/puppet/services/database/mysql-internal-tls-certmonger.yaml
+++ b/puppet/services/database/mysql-internal-tls-certmonger.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
MySQL configurations for using TLS via certmonger.
@@ -41,3 +41,7 @@ outputs:
template: "mysql/%{hiera('cloud_name_NETWORK')}"
params:
NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ metadata_settings:
+ - service: mysql
+ network: {get_param: [ServiceNetMap, MysqlNetwork]}
+ type: vip
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
index abe752e2..808f1353 100644
--- a/puppet/services/database/mysql.yaml
+++ b/puppet/services/database/mysql.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
MySQL service deployment using puppet
@@ -34,6 +34,10 @@ parameters:
default: true
description: Whether to use Galera instead of regular MariaDB.
type: boolean
+ NovaPassword:
+ description: The password for the nova db account
+ type: string
+ hidden: true
resources:
@@ -90,13 +94,31 @@ outputs:
"%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::client_bind_address:
+ {get_param: [ServiceNetMap, MysqlNetwork]}
step_config: |
include ::tripleo::profile::base::database::mysql
+ metadata_settings:
+ get_attr: [MySQLTLS, role_data, metadata_settings]
upgrade_tasks:
+ - name: Check for galera root password
+ tags: step0
+ file: path=/root/.my.cnf state=file
- name: Stop service
tags: step2
service: name=mariadb state=stopped
- name: Start service
tags: step4
service: name=mariadb state=started
-
+ - name: Setup cell_v2 (create cell0 database)
+ tags: step4
+ mysql_db:
+ name: nova_cell0
+ state: present
+ - name: Setup cell_v2 (grant access to the nova DB user)
+ tags: step4
+ mysql_user:
+ str_replace:
+ template: "name=nova password=PASSWORD host=\"%\" priv=\"nova.*:ALL/nova_cell0.*:ALL,GRANT\" state=present"
+ params:
+ PASSWORD: {get_param: NovaPassword}
diff --git a/puppet/services/database/redis-base.yaml b/puppet/services/database/redis-base.yaml
index 2fab0eb6..2b7dd430 100644
--- a/puppet/services/database/redis-base.yaml
+++ b/puppet/services/database/redis-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Redis service configured with Puppet
diff --git a/puppet/services/database/redis.yaml b/puppet/services/database/redis.yaml
index 1c333b97..5ea25ca8 100644
--- a/puppet/services/database/redis.yaml
+++ b/puppet/services/database/redis.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Redis service configured with Puppet
diff --git a/puppet/services/disabled/glance-registry.yaml b/puppet/services/disabled/glance-registry.yaml
new file mode 100644
index 00000000..4d22bddc
--- /dev/null
+++ b/puppet/services/disabled/glance-registry.yaml
@@ -0,0 +1,30 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Glance Registry service, disabled since ocata
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the disabled Glance Registry role.
+ value:
+ service_name: glance_registry
+ upgrade_tasks:
+ - name: Stop and disable glance_registry service on upgrade
+ tags: step2
+ service: name=openstack-glance-registry state=stopped enabled=no
diff --git a/puppet/services/ec2-api.yaml b/puppet/services/ec2-api.yaml
new file mode 100644
index 00000000..bb10140e
--- /dev/null
+++ b/puppet/services/ec2-api.yaml
@@ -0,0 +1,117 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack EC2-API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ Ec2ApiWorkers:
+ default: 0
+ description: Number of workers for EC2-API service.
+ type: number
+ Ec2ApiPassword:
+ description: The password for the nova service and db account, used by nova-api.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ MonitoringSubscriptionEc2Api:
+ default: 'overcloud-ec2-api'
+ type: string
+ Ec2ApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.ec2.api
+ path: /var/log/ec2api/ec2api.log
+ EnablePackageInstall:
+ default: 'false'
+ description: Set to true to enable package installation via Puppet
+ type: boolean
+
+
+conditions:
+ nova_workers_zero: {equals : [{get_param: Ec2ApiWorkers}, 0]}
+
+outputs:
+ role_data:
+ description: Role data for the EC2-API service.
+ value:
+ service_name: ec2_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionEc2Api}
+ logging_source: {get_param: Ec2ApiLoggingSource}
+ logging_groups:
+ - nova
+ config_settings:
+ map_merge:
+ - tripleo.ec2_api.firewall_rules:
+ '113 ec2_api':
+ dport:
+ - 8788
+ - 13788
+ ec2api::keystone::authtoken::project_name: 'service'
+ ec2api::keystone::authtoken::password: {get_param: Ec2ApiPassword}
+ ec2api::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ec2api::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ec2api::api::enabled: true
+ ec2api::package_manage: {get_param: EnablePackageInstall}
+ ec2api::api::ec2api_listen:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, Ec2ApiNetwork]}
+ ec2api::metadata::metadata_listen:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, Ec2ApiMetadataNetwork]}
+ ec2api::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://ec2_api:'
+ - {get_param: Ec2ApiPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/ec2_api'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ -
+ if:
+ - nova_workers_zero
+ - {}
+ - ec2api::api::ec2api_workers: {get_param: Ec2ApiWorkers}
+ ec2api::metadata::metadata_workers: {get_param: Ec2ApiWorkers}
+ step_config: |
+ include tripleo::profile::base::nova::ec2api
+ service_config_settings:
+ keystone:
+ ec2api::keystone::auth::tenant: 'service'
+ ec2api::keystone::auth::public_url: {get_param: [EndpointMap, Ec2ApiPublic, uri]}
+ ec2api::keystone::auth::internal_url: {get_param: [EndpointMap, Ec2ApiInternal, uri]}
+ ec2api::keystone::auth::admin_url: {get_param: [EndpointMap, Ec2ApiAdmin, uri]}
+ ec2api::keystone::auth::password: {get_param: Ec2ApiPassword}
+ ec2api::keystone::auth::region: {get_param: KeystoneRegion}
+ mysql:
+ ec2api::db::mysql::password: {get_param: Ec2ApiPassword}
+ ec2api::db::mysql::user: ec2_api
+ ec2api::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ ec2api::db::mysql::dbname: ec2_api
+ ec2api::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/etcd.yaml b/puppet/services/etcd.yaml
new file mode 100644
index 00000000..f96fa723
--- /dev/null
+++ b/puppet/services/etcd.yaml
@@ -0,0 +1,58 @@
+heat_template_version: ocata
+
+description: >
+ Etcd service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ EtcdInitialClusterToken:
+ default: 'etcd-tripleo'
+ description: Initial cluster token for the etcd cluster during bootstrap.
+ type: string
+ MonitoringSubscriptionEtcd:
+ default: 'overcloud-etcd'
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the Etcd role.
+ value:
+ service_name: etcd
+ monitoring_subscription: {get_param: MonitoringSubscriptionEtcd}
+ config_settings:
+ etcd::etcd_name:
+ str_replace:
+ template:
+ '"%{::fqdn_$NETWORK}"'
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ tripleo::profile::base::etcd::bind_ip: {get_param: [ServiceNetMap, EtcdNetwork]}
+ tripleo::profile::base::etcd::client_port: '2379'
+ tripleo::profile::base::etcd::peer_port: '2380'
+ etcd::initial_cluster_token: {get_param: EtcdInitialClusterToken}
+ etcd::manage_package: false
+ tripleo.etcd.firewall_rules:
+ '141 etcd':
+ dport:
+ - 2379
+ - 2380
+ step_config: |
+ include ::tripleo::profile::base::etcd
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index 33abdbf9..d26d96aa 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Glance API service configured with Puppet
@@ -45,8 +45,23 @@ parameters:
default:
tag: openstack.glance.api
path: /var/log/glance/api.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
resources:
+
+ TLSProxyBase:
+ type: OS::TripleO::Services::TLSProxyBase
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
GlanceBase:
type: ./glance-base.yaml
properties:
@@ -66,6 +81,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [GlanceBase, role_data, config_settings]
+ - get_attr: [TLSProxyBase, role_data, config_settings]
- glance::api::database_connection:
list_join:
- ''
@@ -75,15 +91,12 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/glance'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]}
glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- glance::api::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
- glance::api::registry_host:
- str_replace:
- template: "'REGISTRY_HOST'"
- params:
- REGISTRY_HOST: {get_param: [EndpointMap, GlanceRegistryInternal, host]}
- glance::api::registry_client_protocol: {get_param: [EndpointMap, GlanceRegistryInternal, protocol] }
+ glance::api::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
+ glance::api::enable_v1_api: false
+ glance::api::enable_v2_api: true
glance::api::authtoken::password: {get_param: GlancePassword}
glance::api::enable_proxy_headers_parsing: true
glance::api::debug: {get_param: Debug}
@@ -102,8 +115,34 @@ outputs:
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- glance::api::bind_host: {get_param: [ServiceNetMap, GlanceApiNetwork]}
+ tripleo::profile::base::glance::api::tls_proxy_bind_ip:
+ get_param: [ServiceNetMap, GlanceApiNetwork]
+ tripleo::profile::base::glance::api::tls_proxy_fqdn:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, GlanceApiNetwork]}
+ tripleo::profile::base::glance::api::tls_proxy_port:
+ get_param: [EndpointMap, GlanceInternal, port]
+ # Bind to localhost if internal TLS is enabled, since we put a TLs
+ # proxy in front.
+ glance::api::bind_host:
+ if:
+ - use_tls_proxy
+ - 'localhost'
+ - {get_param: [ServiceNetMap, GlanceApiNetwork]}
step_config: |
include ::tripleo::profile::base::glance::api
service_config_settings:
get_attr: [GlanceBase, role_data, service_config_settings]
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-glance-api is running"
+ shell: /usr/bin/systemctl show 'openstack-glance-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop glance_api service
+ tags: step2
+ service: name=openstack-glance-api state=stopped
+ - name: Stop and disable glance registry (removed for Ocata)
+ tags: step2
+ service: name=openstack-glance-registry state=stopped enabled=no
diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml
index cc979af9..f5548982 100644
--- a/puppet/services/glance-base.yaml
+++ b/puppet/services/glance-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Glance Common settings with Puppet
@@ -105,8 +105,6 @@ outputs:
glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
glance::notify::rabbitmq::notification_driver: messagingv2
- glance::registry::db::database_db_max_retries: -1
- glance::registry::db::database_max_retries: -1
tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml
deleted file mode 100644
index c45582d4..00000000
--- a/puppet/services/glance-registry.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Glance Registry service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- GlancePassword:
- description: The password for the glance service and db account, used by the glance services.
- type: string
- hidden: true
- GlanceWorkers:
- default: ''
- description: |
- Number of worker processes for glance registry. If left unset (empty
- string), the default value will result in the configuration being left
- unset and a system-dependent default value will be chosen (e.g.: number of
- processors). Please note that this will create a large number of processes
- on systems with a large number of CPUs resulting in excess memory
- consumption. It is recommended that a suitable non-default value be
- selected on such systems.
- type: string
- MonitoringSubscriptionGlanceRegistry:
- default: 'overcloud-glance-registry'
- type: string
- GlanceRegistryLoggingSource:
- type: json
- default:
- tag: openstack.glance.registry
- path: /var/log/glance/registry.log
-
-resources:
- GlanceBase:
- type: ./glance-base.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Glance Registry role.
- value:
- service_name: glance_registry
- monitoring_subscription: {get_param: MonitoringSubscriptionGlanceRegistry}
- logging_source: {get_param: GlanceRegistryLoggingSource}
- logging_groups:
- - glance
- config_settings:
- map_merge:
- - get_attr: [GlanceBase, role_data, config_settings]
-
- - glance::registry::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://glance:'
- - {get_param: GlancePassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/glance'
- glance::registry::authtoken::password: {get_param: GlancePassword}
- glance::registry::authtoken::project_name: 'service'
- glance::registry::pipeline: 'keystone'
- glance::registry::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- glance::registry::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
- glance::registry::debug: {get_param: Debug}
- glance::registry::workers: {get_param: GlanceWorkers}
- tripleo.glance_registry.firewall_rules:
- '112 glance_registry':
- dport:
- - 9191
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- glance::registry::bind_host: {get_param: [ServiceNetMap, GlanceRegistryNetwork]}
- step_config: |
- include ::tripleo::profile::base::glance::registry
- service_config_settings:
- get_attr: [GlanceBase, role_data, config_settings]
diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml
index b3d39e0f..22c0967e 100644
--- a/puppet/services/gnocchi-api.yaml
+++ b/puppet/services/gnocchi-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Gnocchi service configured with Puppet
@@ -84,7 +84,7 @@ outputs:
gnocchi::api::enable_proxy_headers_parsing: true
gnocchi::api::service_name: 'httpd'
gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
gnocchi::keystone::authtoken::project_name: 'service'
gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS}
@@ -102,15 +102,10 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
- gnocchi::api::host:
- str_replace:
- template:
- "%{hiera('fqdn_$NETWORK')}"
- params:
- $NETWORK: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
+ gnocchi::wsgi::apache::wsgi_process_display_name: 'gnocchi_wsgi'
gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri]}
step_config: |
include ::tripleo::profile::base::gnocchi::api
@@ -130,3 +125,9 @@ outputs:
gnocchi::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: Stop gnocchi_api service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped
diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml
index 556baae0..c6310056 100644
--- a/puppet/services/gnocchi-base.yaml
+++ b/puppet/services/gnocchi-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Gnocchi service configured with Puppet
@@ -67,7 +67,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/gnocchi'
- gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ gnocchi::db::sync::extra_opts: '--skip-storage'
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 2
gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword}
diff --git a/puppet/services/gnocchi-metricd.yaml b/puppet/services/gnocchi-metricd.yaml
index 1400bc98..1337b0cb 100644
--- a/puppet/services/gnocchi-metricd.yaml
+++ b/puppet/services/gnocchi-metricd.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Gnocchi service configured with Puppet
@@ -22,7 +22,7 @@ parameters:
default: 'overcloud-gnocchi-metricd'
type: string
GnocchiMetricdWorkers:
- default: ''
+ default: '%{::os_workers}'
description: Number of workers for Gnocchi MetricD
type: string
@@ -46,3 +46,10 @@ outputs:
- gnocchi::metricd::workers: {get_param: GnocchiMetricdWorkers}
step_config: |
include ::tripleo::profile::base::gnocchi::metricd
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-gnocchi-metricd is running"
+ shell: /usr/bin/systemctl show 'openstack-gnocchi-metricd' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop gnocchi_metricd service
+ tags: step2
+ service: name=openstack-gnocchi-metricd state=stopped
diff --git a/puppet/services/gnocchi-statsd.yaml b/puppet/services/gnocchi-statsd.yaml
index 983d6c91..41222a79 100644
--- a/puppet/services/gnocchi-statsd.yaml
+++ b/puppet/services/gnocchi-statsd.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Gnocchi service configured with Puppet
@@ -45,3 +45,10 @@ outputs:
proto: 'udp'
step_config: |
include ::tripleo::profile::base::gnocchi::statsd
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-gnocchi-statsd is running"
+ shell: /usr/bin/systemctl show 'openstack-gnocchi-statsd' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop gnocchi_statsd service
+ tags: step2
+ service: name=openstack-gnocchi-statsd state=stopped
diff --git a/puppet/services/haproxy-internal-tls-certmonger.yaml b/puppet/services/haproxy-internal-tls-certmonger.yaml
index c6d53542..ae226163 100644
--- a/puppet/services/haproxy-internal-tls-certmonger.yaml
+++ b/puppet/services/haproxy-internal-tls-certmonger.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
HAProxy deployment with TLS enabled, powered by certmonger
@@ -19,6 +19,22 @@ parameters:
via parameter_defaults in the resource registry.
type: json
+resources:
+
+ HAProxyNetworks:
+ type: OS::Heat::Value
+ properties:
+ value:
+ # NOTE(jaosorior) Get unique network names to create
+ # certificates for those. We skip the tenant network since
+ # we don't need a certificate for that, and the external
+ # network will be handled in another template.
+ yaql:
+ expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
+ data:
+ map:
+ get_param: ServiceNetMap
+
outputs:
role_data:
description: Role data for the HAProxy internal TLS via certmonger role.
@@ -39,13 +55,12 @@ outputs:
postsave_cmd: "" # TODO
principal: "haproxy/%{hiera('cloud_name_NETWORK')}"
for_each:
- NETWORK:
- # NOTE(jaosorior) Get unique network names to create
- # certificates for those. We skip the tenant network since
- # we don't need a certificate for that, and the external
- # network will be handled in another template.
- yaql:
- expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
- data:
- map:
- get_param: ServiceNetMap
+ NETWORK: {get_attr: [HAProxyNetworks, value]}
+ metadata_settings:
+ repeat:
+ template:
+ - service: haproxy
+ network: $NETWORK
+ type: vip
+ for_each:
+ $NETWORK: {get_attr: [HAProxyNetworks, value]}
diff --git a/puppet/services/haproxy-public-tls-certmonger.yaml b/puppet/services/haproxy-public-tls-certmonger.yaml
index 1551d16a..6013b026 100644
--- a/puppet/services/haproxy-public-tls-certmonger.yaml
+++ b/puppet/services/haproxy-public-tls-certmonger.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
HAProxy deployment with TLS enabled, powered by certmonger
@@ -35,3 +35,7 @@ outputs:
hostname: "%{hiera('cloud_name_external')}"
postsave_cmd: "" # TODO
principal: "haproxy/%{hiera('cloud_name_external')}"
+ metadata_settings:
+ - service: haproxy
+ network: external
+ type: vip
diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml
index 675a79ec..358698dd 100644
--- a/puppet/services/haproxy.yaml
+++ b/puppet/services/haproxy.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
HAproxy service configured with Puppet
@@ -78,9 +78,18 @@ outputs:
step_config: |
include ::tripleo::profile::base::haproxy
upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service haproxy is running"
+ shell: /usr/bin/systemctl show 'haproxy' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
- name: Stop haproxy service
tags: step1
service: name=haproxy state=stopped
- name: Start haproxy service
tags: step4 # Needed at step 4 for mysql
service: name=haproxy state=started
+ metadata_settings:
+ yaql:
+ expression: '[].concat(coalesce($.data.internal, []), coalesce($.data.public, []))'
+ data:
+ public: {get_attr: [HAProxyPublicTLS, role_data, metadata_settings]}
+ internal: {get_attr: [HAProxyInternalTLS, role_data, metadata_settings]}
diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml
index 12d4a6a1..3ae4cc70 100644
--- a/puppet/services/heat-api-cfn.yaml
+++ b/puppet/services/heat-api-cfn.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Heat CloudFormation API service configured with Puppet
@@ -84,3 +84,10 @@ outputs:
heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
heat::keystone::auth_cfn::password: {get_param: HeatPassword}
heat::keystone::auth_cfn::region: {get_param: KeystoneRegion}
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-heat-api-cfn is running"
+ shell: /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop heat_api_cfn service
+ tags: step2
+ service: name=openstack-heat-api-cfn state=stopped
diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml
index 6dfeaaf3..56183535 100644
--- a/puppet/services/heat-api-cloudwatch.yaml
+++ b/puppet/services/heat-api-cloudwatch.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Heat CloudWatch API service configured with Puppet
@@ -66,3 +66,10 @@ outputs:
heat::api_cloudwatch::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
step_config: |
include ::tripleo::profile::base::heat::api_cloudwatch
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-heat-api-cloudwatch is running"
+ shell: /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop heat_api_cloudwatch service
+ tags: step2
+ service: name=openstack-heat-api-cloudwatch state=stopped
diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml
index b0cd16dd..38c5b479 100644
--- a/puppet/services/heat-api.yaml
+++ b/puppet/services/heat-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Heat API service configured with Puppet
@@ -84,3 +84,10 @@ outputs:
heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
heat::keystone::auth::password: {get_param: HeatPassword}
heat::keystone::auth::region: {get_param: KeystoneRegion}
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-heat-api is running"
+ shell: /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop heat_api service
+ tags: step2
+ service: name=openstack-heat-api state=stopped
diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml
index a2a65d7d..b4d314f4 100644
--- a/puppet/services/heat-base.yaml
+++ b/puppet/services/heat-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Heat base service. Shared for all Heat services.
@@ -44,6 +44,61 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ HeatCronPurgeDeletedEnsure:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Ensure
+ default: 'present'
+ HeatCronPurgeDeletedMinute:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Minute
+ default: '1'
+ HeatCronPurgeDeletedHour:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Hour
+ default: '0'
+ HeatCronPurgeDeletedMonthday:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Month Day
+ default: '*'
+ HeatCronPurgeDeletedMonth:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Month
+ default: '*'
+ HeatCronPurgeDeletedWeekday:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Week Day
+ default: '*'
+ HeatCronPurgeDeletedMaxDelay:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Max Delay
+ default: '3600'
+ HeatCronPurgeDeletedUser:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - User
+ default: 'heat'
+ HeatCronPurgeDeletedAge:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Age
+ default: '30'
+ HeatCronPurgeDeletedAgeType:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Age type
+ default: 'days'
+ HeatCronPurgeDeletedDestination:
+ type: string
+ description: >
+ Cron to purge db entries marked as deleted and older than $age - Log destination
+ default: '/dev/null'
outputs:
role_data:
@@ -57,6 +112,7 @@ outputs:
heat::rabbit_port: {get_param: RabbitClientPort}
heat::debug: {get_param: Debug}
heat::enable_proxy_headers_parsing: true
+ heat::rpc_response_timeout: 600
# We need this because the default heat policy.json no longer works on TripleO
# https://git.openstack.org/cgit/openstack/heat/commit/?id=ac86702172ddf01f5bdc3f3cd99d2e32ad9b7024
heat::policy::policies:
@@ -66,17 +122,26 @@ outputs:
heat::rabbit_heartbeat_timeout_threshold: 60
heat::keystone::authtoken::project_name: 'service'
heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
heat::keystone::authtoken::password: {get_param: HeatPassword}
heat::keystone::domain::domain_name: 'heat_stack'
heat::keystone::domain::domain_admin: 'heat_stack_domain_admin'
heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost'
- heat::cron::purge_deleted::age: 30
- heat::cron::purge_deleted::age_type: 'days'
- heat::cron::purge_deleted::maxdelay: 3600
- heat::cron::purge_deleted::destination: '/dev/null'
heat::db::database_db_max_retries: -1
heat::db::database_max_retries: -1
+ heat::yaql_memory_quota: 100000
+ heat::yaql_limit_iterators: 1000
+ heat::cron::purge_deleted::ensure: {get_param: HeatCronPurgeDeletedEnsure}
+ heat::cron::purge_deleted::minute: {get_param: HeatCronPurgeDeletedMinute}
+ heat::cron::purge_deleted::hour: {get_param: HeatCronPurgeDeletedHour}
+ heat::cron::purge_deleted::monthday: {get_param: HeatCronPurgeDeletedMonthday}
+ heat::cron::purge_deleted::month: {get_param: HeatCronPurgeDeletedMonth}
+ heat::cron::purge_deleted::weekday: {get_param: HeatCronPurgeDeletedWeekday}
+ heat::cron::purge_deleted::maxdelay: {get_param: HeatCronPurgeDeletedMaxDelay}
+ heat::cron::purge_deleted::user: {get_param: HeatCronPurgeDeletedUser}
+ heat::cron::purge_deleted::age: {get_param: HeatCronPurgeDeletedAge}
+ heat::cron::purge_deleted::age_type: {get_param: HeatCronPurgeDeletedAgeType}
+ heat::cron::purge_deleted::destination: {get_param: HeatCronPurgeDeletedDestination}
service_config_settings:
keystone:
tripleo::profile::base::keystone::heat_admin_domain: 'heat_stack'
diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml
index 3f0e4105..7787d0a7 100644
--- a/puppet/services/heat-engine.yaml
+++ b/puppet/services/heat-engine.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Openstack Heat Engine service configured with Puppet
@@ -48,6 +48,15 @@ parameters:
default:
tag: openstack.heat.engine
path: /var/log/heat/heat-engine.log
+ HeatConvergenceEngine:
+ type: boolean
+ default: true
+ description: Enables the heat engine with the convergence architecture.
+ HeatMaxResourcesPerStack:
+ type: number
+ default: 1000
+ description: Maximum resources allowed per top-level stack. -1 stands for unlimited.
+
resources:
HeatBase:
@@ -72,6 +81,26 @@ outputs:
- heat::engine::num_engine_workers: {get_param: HeatWorkers}
heat::engine::configure_delegated_roles: false
heat::engine::trusts_delegated_roles: []
+ heat::engine::max_nested_stack_depth: 6
+ heat::engine::max_resources_per_stack: {get_param: HeatMaxResourcesPerStack}
+ heat::engine::heat_metadata_server_url:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, HeatCfnPublic, protocol]}
+ - '://'
+ - {get_param: [EndpointMap, HeatCfnPublic, host]}
+ - ':'
+ - {get_param: [EndpointMap, HeatCfnPublic, port]}
+ heat::engine::heat_waitcondition_server_url:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, HeatCfnPublic, protocol]}
+ - '://'
+ - {get_param: [EndpointMap, HeatCfnPublic, host]}
+ - ':'
+ - {get_param: [EndpointMap, HeatCfnPublic, port]}
+ - '/v1/waitcondition'
+ heat::engine::convergence_engine: {get_param: HeatConvergenceEngine}
tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge}
heat::database_connection:
list_join:
@@ -82,6 +111,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/heat'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
heat::engine::auth_encryption_key:
@@ -106,3 +136,10 @@ outputs:
keystone:
# This is needed because the keystone profile handles creating the domain
tripleo::profile::base::keystone::heat_admin_password: {get_param: HeatStackDomainAdminPassword}
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-heat-engine is running"
+ shell: /usr/bin/systemctl show 'openstack-heat-engine' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop heat_engine service
+ tags: step2
+ service: name=openstack-heat-engine state=stopped
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index 8eaf4044..2111021b 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Horizon service configured with Puppet
@@ -27,6 +27,14 @@ parameters:
description: A list of IP/Hostname for the server Horizon is running on.
Used for header checks.
type: comma_delimited_list
+ HorizonPasswordValidator:
+ description: Regex for password validation
+ type: string
+ default: ''
+ HorizonPasswordValidatorHelp:
+ description: Help text for password validation
+ type: string
+ default: ''
HorizonSecret:
description: Secret key for Django
type: string
@@ -58,8 +66,10 @@ outputs:
dport:
- 80
- 443
+ horizon::enable_secure_proxy_ssl_header: true
horizon::disable_password_reveal: true
horizon::enforce_password_check: true
+ horizon::disallow_iframe_embed: true
horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
horizon::vhost_extra_params:
@@ -69,6 +79,8 @@ outputs:
options: ['FollowSymLinks','MultiViews']
horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ horizon::password_validator: {get_param: [HorizonPasswordValidator]}
+ horizon::password_validator_help: {get_param: [HorizonPasswordValidatorHelp]}
horizon::secret_key:
yaql:
expression: $.data.passwords.where($ != '').first()
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
index c8a2e833..a84df538 100644
--- a/puppet/services/ironic-api.yaml
+++ b/puppet/services/ironic-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ironic API configured with Puppet
@@ -25,6 +25,10 @@ parameters:
MonitoringSubscriptionIronicApi:
default: 'overcloud-ironic-api'
type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
resources:
IronicBase:
@@ -47,7 +51,7 @@ outputs:
ironic::api::authtoken::project_name: 'service'
ironic::api::authtoken::username: 'ironic'
ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
# (eg. for internal_api):
@@ -73,6 +77,7 @@ outputs:
ironic::keystone::auth::auth_name: 'ironic'
ironic::keystone::auth::password: {get_param: IronicPassword }
ironic::keystone::auth::tenant: 'service'
+ ironic::keystone::auth::region: {get_param: KeystoneRegion}
mysql:
ironic::db::mysql::password: {get_param: IronicPassword}
ironic::db::mysql::user: ironic
@@ -81,3 +86,7 @@ outputs:
ironic::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ upgrade_tasks:
+ - name: Stop ironic_api service
+ tags: step2
+ service: name=openstack-ironic-api state=stopped
diff --git a/puppet/services/ironic-base.yaml b/puppet/services/ironic-base.yaml
index 0ff393c6..d186b047 100644
--- a/puppet/services/ironic-base.yaml
+++ b/puppet/services/ironic-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ironic services configured with Puppet
@@ -60,6 +60,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ironic'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
ironic::debug: {get_param: Debug}
ironic::rabbit_userid: {get_param: RabbitUserName}
ironic::rabbit_password: {get_param: RabbitPassword}
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index f173aa63..739db13c 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Ironic conductor configured with Puppet
@@ -24,6 +24,14 @@ parameters:
"full" for full cleaning, "metadata" to clean only disk
metadata (partition table).
type: string
+ IronicCleaningNetwork:
+ default: 'provisioning'
+ description: Name or UUID of the *overcloud* network used for cleaning
+ bare metal nodes. The default value of "provisioning" can be
+ left during the initial deployment (when no networks are
+ created yet) and should be changed to an actual UUID in
+ a post-deployment stack update.
+ type: string
IronicEnabledDrivers:
default: ['pxe_ipmitool', 'pxe_drac', 'pxe_ilo']
description: Enabled Ironic drivers
@@ -61,6 +69,7 @@ outputs:
- ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase}
+ ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork}
ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers}
# We need an endpoint containing a real IP, not a VIP here
ironic_conductor_http_host: {get_param: [ServiceNetMap, IronicNetwork]}
@@ -98,3 +107,7 @@ outputs:
step_config: |
include ::tripleo::profile::base::ironic::conductor
+ upgrade_tasks:
+ - name: Stop ironic_conductor service
+ tags: step2
+ service: name=openstack-ironic-conductor state=stopped
diff --git a/puppet/services/keepalived.yaml b/puppet/services/keepalived.yaml
index b4f1a100..38f9f3be 100644
--- a/puppet/services/keepalived.yaml
+++ b/puppet/services/keepalived.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Keepalived service configured with Puppet
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
index 69898718..29157959 100644
--- a/puppet/services/kernel.yaml
+++ b/puppet/services/kernel.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Load kernel modules with kmod and configure kernel options with sysctl.
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index e48d7037..9c4cc60f 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Keystone service configured with Puppet
@@ -113,6 +113,51 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ KeystoneCronTokenFlushEnsure:
+ type: string
+ description: >
+ Cron to purge expired tokens - Ensure
+ default: 'present'
+ KeystoneCronTokenFlushMinute:
+ type: string
+ description: >
+ Cron to purge expired tokens - Minute
+ default: '1'
+ KeystoneCronTokenFlushHour:
+ type: string
+ description: >
+ Cron to purge expired tokens - Hour
+ default: '0'
+ KeystoneCronTokenFlushMonthday:
+ type: string
+ description: >
+ Cron to purge expired tokens - Month Day
+ default: '*'
+ KeystoneCronTokenFlushMonth:
+ type: string
+ description: >
+ Cron to purge expired tokens - Month
+ default: '*'
+ KeystoneCronTokenFlushWeekday:
+ type: string
+ description: >
+ Cron to purge expired tokens - Week Day
+ default: '*'
+ KeystoneCronTokenFlushMaxDelay:
+ type: string
+ description: >
+ Cron to purge expired tokens - Max Delay
+ default: '0'
+ KeystoneCronTokenFlushDestination:
+ type: string
+ description: >
+ Cron to purge expired tokens - Log destination
+ default: '/var/log/keystone/keystone-tokenflush.log'
+ KeystoneCronTokenFlushUser:
+ type: string
+ description: >
+ Cron to purge expired tokens - User
+ default: 'keystone'
resources:
@@ -148,6 +193,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/keystone'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
keystone::admin_token: {get_param: AdminToken}
keystone::admin_password: {get_param: AdminPassword}
keystone::roles::admin::password: {get_param: AdminPassword}
@@ -237,6 +283,16 @@ outputs:
# NOTE: this applies to all 2 bind IP settings below...
keystone::wsgi::apache::bind_host: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}
keystone::wsgi::apache::admin_bind_host: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
+ keystone::cron::token_flush::ensure: {get_param: KeystoneCronTokenFlushEnsure}
+ keystone::cron::token_flush::minute: {get_param: KeystoneCronTokenFlushMinute}
+ keystone::cron::token_flush::hour: {get_param: KeystoneCronTokenFlushHour}
+ keystone::cron::token_flush::monthday: {get_param: KeystoneCronTokenFlushMonthday}
+ keystone::cron::token_flush::month: {get_param: KeystoneCronTokenFlushMonth}
+ keystone::cron::token_flush::weekday: {get_param: KeystoneCronTokenFlushWeekday}
+ keystone::cron::token_flush::maxdelay: {get_param: KeystoneCronTokenFlushMaxDelay}
+ keystone::cron::token_flush::destination: {get_param: KeystoneCronTokenFlushDestination}
+ keystone::cron::token_flush::user: {get_param: KeystoneCronTokenFlushUser}
+
step_config: |
include ::tripleo::profile::base::keystone
service_config_settings:
@@ -253,9 +309,5 @@ outputs:
- name: Stop keystone service (running under httpd)
tags: step2
service: name=httpd state=stopped
- - name: Sync keystone DB
- tags: step5
- command: keystone-manage db_sync
- - name: Start keystone service (running under httpd)
- tags: step6
- service: name=httpd state=started
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
diff --git a/puppet/services/logging/fluentd-base.yaml b/puppet/services/logging/fluentd-base.yaml
index c8f67556..65ad80ed 100644
--- a/puppet/services/logging/fluentd-base.yaml
+++ b/puppet/services/logging/fluentd-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: Fluentd base service
diff --git a/puppet/services/logging/fluentd-client.yaml b/puppet/services/logging/fluentd-client.yaml
index 3ae7110f..94c63d33 100644
--- a/puppet/services/logging/fluentd-client.yaml
+++ b/puppet/services/logging/fluentd-client.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: Fluentd client configured with Puppet
@@ -62,3 +62,12 @@ outputs:
get_attr: [LoggingConfiguration, LoggingSharedKey]
step_config: |
include ::tripleo::profile::base::logging::fluentd
+ upgrade_tasks:
+ - name: Check status of fluentd service
+ shell: >
+ /usr/bin/systemctl show fluentd --property ActiveState |
+ grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop fluentd service
+ tags: step2
+ service: name=fluentd state=stopped
diff --git a/puppet/services/logging/fluentd-config.yaml b/puppet/services/logging/fluentd-config.yaml
index 58b423fd..68f98aff 100644
--- a/puppet/services/logging/fluentd-config.yaml
+++ b/puppet/services/logging/fluentd-config.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: Fluentd logging configuration
diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml
index b4b3d480..7b78c82e 100644
--- a/puppet/services/manila-api.yaml
+++ b/puppet/services/manila-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Manila-api service configured with Puppet
@@ -49,7 +49,7 @@ outputs:
- get_attr: [ManilaBase, role_data, config_settings]
- manila::keystone::authtoken::password: {get_param: ManilaPassword}
manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
manila::keystone::authtoken::project_name: 'service'
tripleo.manila_api.firewall_rules:
'150 manila':
@@ -64,6 +64,7 @@ outputs:
# internal_api_subnet - > IP/CIDR
manila::api::bind_host: {get_param: [ServiceNetMap, ManilaApiNetwork]}
manila::api::enable_proxy_headers_parsing: true
+ manila::api::default_share_type: 'default'
step_config: |
include ::tripleo::profile::base::manila::api
service_config_settings:
diff --git a/puppet/services/manila-backend-cephfs.yaml b/puppet/services/manila-backend-cephfs.yaml
index 0fc39e2a..36ef1ea9 100644
--- a/puppet/services/manila-backend-cephfs.yaml
+++ b/puppet/services/manila-backend-cephfs.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Manila Cephfs backend
@@ -40,6 +40,20 @@ parameters:
ManilaCephFSNativeCephFSEnableSnapshots:
type: boolean
default: true
+ ManilaCephFSDataPoolName:
+ default: manila_data
+ type: string
+ ManilaCephFSMetadataPoolName:
+ default: manila_metadata
+ type: string
+ # (jprovazn) default value is set to assure this templates works with an
+ # external ceph too (user/key is created only when ceph is deployed by
+ # TripleO)
+ CephManilaClientKey:
+ default: ''
+ description: The Ceph client key. Can be created with ceph-authtool --gen-print-key.
+ type: string
+ hidden: true
outputs:
role_data:
@@ -54,4 +68,8 @@ outputs:
manila::backend::cephfsnative::cephfs_auth_id: {get_param: ManilaCephFSNativeCephFSAuthId}
manila::backend::cephfsnative::cephfs_cluster_name: {get_param: ManilaCephFSNativeCephFSClusterName}
manila::backend::cephfsnative::cephfs_enable_snapshots: {get_param: ManilaCephFSNativeCephFSEnableSnapshots}
+ manila::backend::cephfsnative::ceph_client_key: {get_param: CephManilaClientKey}
+ ceph::profile::params::fs_data_pool: {get_param: ManilaCephFSDataPoolName}
+ ceph::profile::params::fs_metadata_pool: {get_param: ManilaCephFSMetadataPoolName}
+ ceph::profile::params::fs_name: {get_param: ManilaCephFSNativeShareBackendName}
step_config:
diff --git a/puppet/services/manila-backend-generic.yaml b/puppet/services/manila-backend-generic.yaml
index c527666e..23831a6a 100644
--- a/puppet/services/manila-backend-generic.yaml
+++ b/puppet/services/manila-backend-generic.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Manila generic backend.
diff --git a/puppet/services/manila-backend-netapp.yaml b/puppet/services/manila-backend-netapp.yaml
index e6d2f250..1f6fcf4f 100644
--- a/puppet/services/manila-backend-netapp.yaml
+++ b/puppet/services/manila-backend-netapp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Manila netapp backend.
diff --git a/puppet/services/manila-base.yaml b/puppet/services/manila-base.yaml
index 844bd3a3..c183bc08 100644
--- a/puppet/services/manila-base.yaml
+++ b/puppet/services/manila-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Manila base service. Shared by manila-api/scheduler/share services
@@ -67,6 +67,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/manila'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
service_config_settings:
mysql:
manila::db::mysql::password: {get_param: ManilaPassword}
diff --git a/puppet/services/manila-scheduler.yaml b/puppet/services/manila-scheduler.yaml
index d96b677b..c8114f2b 100644
--- a/puppet/services/manila-scheduler.yaml
+++ b/puppet/services/manila-scheduler.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Manila-scheduler service configured with Puppet
diff --git a/puppet/services/manila-share.yaml b/puppet/services/manila-share.yaml
index 49c69fc1..6ac0d2cf 100644
--- a/puppet/services/manila-share.yaml
+++ b/puppet/services/manila-share.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Manila-share service configured with Puppet
@@ -46,7 +46,7 @@ outputs:
- manila::volume::cinder::cinder_admin_tenant_name: 'service'
manila::keystone::authtoken::password: {get_param: ManilaPassword}
manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
manila::keystone::authtoken::project_name: 'service'
service_config_settings:
get_attr: [ManilaBase, role_data, service_config_settings]
diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml
index 9e3f6375..146cc306 100644
--- a/puppet/services/memcached.yaml
+++ b/puppet/services/memcached.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Memcached service configured with Puppet
@@ -18,6 +18,12 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MemcachedMaxMemory:
+ default: '50%'
+ description: The maximum amount of memory for memcached to be configured
+ to use when installed. This can be either a percentage ('50%')
+ or a fixed value ('2048').
+ type: string
MonitoringSubscriptionMemcached:
default: 'overcloud-memcached'
type: string
@@ -35,8 +41,17 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
memcached::listen_ip: {get_param: [ServiceNetMap, MemcachedNetwork]}
+ memcached::max_memory: {get_param: MemcachedMaxMemory}
tripleo.memcached.firewall_rules:
'121 memcached':
dport: 11211
step_config: |
include ::tripleo::profile::base::memcached
+ service_config_settings:
+ collectd:
+ tripleo.collectd.plugins.memcached:
+ - memcached
+ collectd::plugin::memcached::instances:
+ local:
+ host: "%{hiera('memcached::listen_ip')}"
+ port: 11211
diff --git a/puppet/services/metrics/collectd.yaml b/puppet/services/metrics/collectd.yaml
new file mode 100644
index 00000000..a3e3b842
--- /dev/null
+++ b/puppet/services/metrics/collectd.yaml
@@ -0,0 +1,120 @@
+heat_template_version: ocata
+
+description: Collectd client service
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ CollectdDefaultPlugins:
+ default:
+ - disk
+ - interface
+ - load
+ - memory
+ - processes
+ - tcpconns
+ type: comma_delimited_list
+ description: >
+ List of collectd plugins to activate on all overcloud hosts. See
+ the documentation for the puppet-collectd module for a list plugins
+ supported by the module (https://github.com/voxpupuli/puppet-collectd).
+ Set this key to override the default list of plugins. Use
+ CollectdExtraPlugins if you want to load additional plugins without
+ overriding the defaults.
+ CollectdExtraPlugins:
+ default: []
+ type: comma_delimited_list
+ description: >
+ List of collectd plugins to activate on all overcloud hosts. See
+ the documentation for the puppet-collectd module for a list plugins
+ supported by the module (https://github.com/voxpupuli/puppet-collectd).
+ Set this key to load plugins in addition to those in
+ CollectdDefaultPlugins.
+ CollectdServer:
+ type: string
+ description: >
+ Address of remote collectd server to which we will send
+ metrics.
+ default: ''
+ CollectdServerPort:
+ type: number
+ default: 25826
+ description: >
+ Port on remote collectd server to which we will send
+ metrics.
+ CollectdUsername:
+ type: string
+ description: >
+ Username for authenticating to the remote collectd server. The default
+ is to not configure any authentication.
+ default: ''
+ CollectdPassword:
+ type: string
+ hidden: true
+ description: >
+ Password for authenticating to the remote collectd server. The
+ default is to not configure any authentication.
+ default: ''
+ CollectdSecurityLevel:
+ type: string
+ description: >
+ Security level setting for remote collectd connection.
+ default: 'None'
+ constraints:
+ - allowed_values:
+ - None
+ - Sign
+ - Encrypt
+
+outputs:
+ role_data:
+ description: Role data for the Collectd client role.
+ value:
+ service_name: collectd
+ config_settings:
+ collectd::manage_repo: false
+ collectd::purge: true
+ collectd::recurse: true
+ collectd::purge_config: true
+ collectd::minimum_version: "5.7"
+ tripleo::profile::base::metrics::collectd::collectd_server:
+ get_param: CollectdServer
+ tripleo::profile::base::metrics::collectd::collectd_port:
+ get_param: CollectdServerPort
+ tripleo::profile::base::metrics::collectd::collectd_username:
+ get_param: CollectdUsername
+ tripleo::profile::base::metrics::collectd::collectd_password:
+ get_param: CollectdPassword
+ tripleo::profile::base::metrics::collectd::collectd_securitylevel:
+ get_param: CollectdSecurityLevel
+ tripleo.collectd.plugins.collectd:
+ yaql:
+ data:
+ default_plugins: {get_param: CollectdDefaultPlugins}
+ extra_plugins: {get_param: CollectdExtraPlugins}
+ expression: >
+ ($.data.default_plugins + $.data.extra_plugins)
+ .flatten().distinct()
+ step_config: |
+ include ::tripleo::profile::base::metrics::collectd
+ upgrade_tasks:
+ - name: Check status of collectd service
+ shell: >
+ /usr/bin/systemctl show collectd --property ActiveState |
+ grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop collectd service
+ tags: step2
+ service: name=collectd state=stopped
diff --git a/puppet/services/mistral-api.yaml b/puppet/services/mistral-api.yaml
index 44d30358..daa1dc7c 100644
--- a/puppet/services/mistral-api.yaml
+++ b/puppet/services/mistral-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Mistral API service configured with Puppet
diff --git a/puppet/services/mistral-base.yaml b/puppet/services/mistral-base.yaml
index a11624c0..e1030346 100644
--- a/puppet/services/mistral-base.yaml
+++ b/puppet/services/mistral-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Mistral base service. Shared for all Mistral services.
@@ -65,6 +65,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/mistral'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
mistral::rabbit_userid: {get_param: RabbitUserName}
mistral::rabbit_password: {get_param: RabbitPassword}
mistral::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
@@ -74,7 +75,7 @@ outputs:
mistral::keystone_tenant: 'service'
mistral::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
mistral::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
- mistral::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ mistral::identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
service_config_settings:
keystone:
mistral::keystone::auth::tenant: 'service'
diff --git a/puppet/services/mistral-engine.yaml b/puppet/services/mistral-engine.yaml
index 10af670d..4a92b863 100644
--- a/puppet/services/mistral-engine.yaml
+++ b/puppet/services/mistral-engine.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Mistral Engine service configured with Puppet
diff --git a/puppet/services/mistral-executor.yaml b/puppet/services/mistral-executor.yaml
index 7afaf0db..6e273b92 100644
--- a/puppet/services/mistral-executor.yaml
+++ b/puppet/services/mistral-executor.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Mistral API service configured with Puppet
diff --git a/puppet/services/monitoring/sensu-base.yaml b/puppet/services/monitoring/sensu-base.yaml
index ea23b8b6..a8303a59 100644
--- a/puppet/services/monitoring/sensu-base.yaml
+++ b/puppet/services/monitoring/sensu-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: Sensu base service
diff --git a/puppet/services/monitoring/sensu-client.yaml b/puppet/services/monitoring/sensu-client.yaml
index a26c7458..d74a68a2 100644
--- a/puppet/services/monitoring/sensu-client.yaml
+++ b/puppet/services/monitoring/sensu-client.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: Sensu client configured with Puppet
@@ -62,3 +62,12 @@ outputs:
region: {get_param: KeystoneRegion}
step_config: |
include ::tripleo::profile::base::monitoring::sensu
+ upgrade_tasks:
+ - name: Check status of sensu-client service
+ shell: >
+ /usr/bin/systemctl show sensu-client --property ActiveState |
+ grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop sensu-client service
+ tags: step2
+ service: name=sensu-client state=stopped
diff --git a/puppet/services/pacemaker/neutron-midonet.yaml b/puppet/services/network/contrail-analytics-database.yaml
index fdd5dafb..67341ed3 100644
--- a/puppet/services/pacemaker/neutron-midonet.yaml
+++ b/puppet/services/network/contrail-analytics-database.yaml
@@ -1,7 +1,10 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- OpenStack Neutron Midonet with Pacemaker configured with Puppet
+ Contrail Analytics Database service deployment using puppet, this YAML file
+ creates the interface between the HOT template
+ and the puppet manifest that actually installs
+ and configures Contrail Analytics Database.
parameters:
ServiceNetMap:
@@ -20,9 +23,8 @@ parameters:
type: json
resources:
-
- NeutronMidonetBase:
- type: ../neutron-midonet.yaml
+ ContrailBase:
+ type: ./contrail-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
@@ -30,12 +32,12 @@ resources:
outputs:
role_data:
- description: Role data for the Neutron Midonet plugin.
+ description: Role Contrail Analytics Database using composable services.
value:
- service_name: neutron_midonet
- monitoring_subscription: {get_attr: [NeutronMidonetBase, role_data, monitoring_subscription]}
+ service_name: contrail_analytics_database
config_settings:
map_merge:
- - get_attr: [NeutronMidonetBase, role_data, config_settings]
+ - get_attr: [ContrailBase, role_data, config_settings]
+ - contrail::analytics::database::host_ip: {get_param: [ServiceNetMap, ContrailAnalyticsDatabaseNetwork]}
step_config: |
- include ::tripleo::profile::pacemaker::neutron::plugins::midonet
+ include ::tripleo::network::contrail::analyticsdatabase
diff --git a/puppet/services/network/contrail-analytics.yaml b/puppet/services/network/contrail-analytics.yaml
index 1c2331fa..e3e0ec4b 100644
--- a/puppet/services/network/contrail-analytics.yaml
+++ b/puppet/services/network/contrail-analytics.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Contrail Analytics service deployment using puppet, this YAML file
@@ -21,44 +21,6 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- ContrailAnalyticsHostIP:
- description: host IP address of Analytics
- type: string
- ContrailAnalyticsRedisServerIp:
- description: Redis server ip address
- type: string
- ContrailAnalyticsCollectorServerHttpPort:
- description: Collector http port
- type: number
- default: 8089
- ContrailAnalyticsCollectorSandeshPort:
- description: Collector sandesh port
- type: number
- default: 8086
- ContrailAnalyticsHttpServerPort:
- description: Analytics http port
- type: number
- default: 8090
- ContrailAnalyticsListenAddress:
- default: '0.0.0.0'
- description: IP address Config API is listening on
- type: string
- ContrailAnalyticsListenPort:
- default: 8082
- description: Port Config API is listening on
- type: number
- ContrailAnalyticsRedisServerPort:
- description: Redis server port
- type: number
- default: 6379
- ContrailAnalyticsRestApiIp:
- description: IP address Analytics rest interface listens on
- type: string
- default: '0.0.0.0'
- ContrailAnalyticsRestApiPort:
- description: Analytics rest port
- type: number
- default: 8081
resources:
ContrailBase:
@@ -76,15 +38,14 @@ outputs:
config_settings:
map_merge:
- get_attr: [ContrailBase, role_data, config_settings]
- - contrail::analytics::collector_http_server_port: {get_param: ContrailAnalyticsCollectorServerHttpPort}
- contrail::analytics::collector_sandesh_port: {get_param: ContrailAnalyticsCollectorSandeshPort}
- contrail::analytics::host_ip: {get_param: ContrailAnalyticsHostIP}
- contrail::analytics::http_server_port: {get_param: ContrailAnalyticsHttpServerPort}
- contrail::analytics::listen_ip_address: {get_param: ContrailAnalyticsListenAddress}
- contrail::analytics::listen_port: {get_param: ContrailAnalyticsListenPort}
- contrail::analytics::redis_server: {get_param: ContrailAnalyticsRedisServerIp}
- contrail::analytics::redis_server_port: {get_param: ContrailAnalyticsRedisServerPort}
- contrail::analytics::rest_api_ip: {get_param: ContrailAnalyticsRestApiIp}
- contrail::analytics::rest_api_port: {get_param: ContrailAnalyticsRestApiPort}
+ - contrail::analytics::collector_http_server_port: {get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, port]}
+ contrail::analytics::collector_sandesh_port: {get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, port]}
+ contrail::analytics::host_ip: {get_param: [ServiceNetMap, ContrailAnalyticsNetwork]}
+ contrail::analytics::http_server_port: {get_param: [EndpointMap, ContrailAnalyticsHttpInternal, port]}
+ contrail::analytics::listen_ip_address: {get_param: [ServiceNetMap, ContrailAnalyticsNetwork]}
+ contrail::analytics::redis_server: '127.0.0.1'
+ contrail::analytics::redis_server_port: {get_param: [EndpointMap, ContrailAnalyticsRedisInternal, port]}
+ contrail::analytics::rest_api_ip: {get_param: [ServiceNetMap, ContrailAnalyticsNetwork]}
+ contrail::analytics::rest_api_port: {get_param: [EndpointMap, ContrailAnalyticsApiInternal, port]}
step_config: |
include ::tripleo::network::contrail::analytics
diff --git a/puppet/services/network/contrail-base.yaml b/puppet/services/network/contrail-base.yaml
index 03dbea5b..bc56a3ca 100644
--- a/puppet/services/network/contrail-base.yaml
+++ b/puppet/services/network/contrail-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Base parameters for all Contrail Services.
@@ -18,47 +18,42 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ ContrailAAAMode:
+ description: AAAmode can be no-auth, cloud-admin or rbac
+ type: string
+ default: 'rbac'
+ ContrailAAAModeAnalytics:
+ description: AAAmode for analytics can be no-auth, cloud-admin or rbac
+ type: string
+ default: 'no-auth'
AdminPassword:
description: Keystone admin user password
type: string
+ hidden: true
AdminTenantName:
description: Keystone admin tenant name
type: string
+ default: 'admin'
AdminToken:
description: Keystone admin token
type: string
+ hidden: true
AdminUser:
description: Keystone admin user name
type: string
- AuthHost:
- description: Keystone host IP address
- type: string
- AuthPort:
- default: 35357
- description: Keystone port
+ default: 'admin'
+ AuthPortSSL:
+ default: 13357
+ description: Keystone SSL port
+ type: number
+ AuthPortSSLPublic:
+ default: 13000
+ description: Keystone Public SSL port
type: number
- AuthProtocol:
- default: 'http'
- description: Keystone authentication protocol
- type: string
- ContrailDiscoveryServerIp:
- description: Discovery server ip address
- type: string
- ContrailKafkaBrokerList:
- description: List of kafka servers
- type: comma_delimited_list
ContrailAuth:
default: 'keystone'
description: Keystone authentication method
type: string
- ContrailCassandraServerList:
- default: []
- description: List of cassandra servers
- type: comma_delimited_list
- ContrailDiscoveryServerPort:
- description: Discovery server port
- type: number
- default: 5998
ContrailInsecure:
default: false
description: Keystone insecure mode
@@ -67,14 +62,18 @@ parameters:
default: '127.0.0.1:12111'
description: Memcached server
type: string
- ContrailMultiTenancy:
- default: true
- description: Turn on/off multi-tenancy
- type: boolean
- ContrailZkServerIp:
- default: []
- description: List of zookeeper servers
- type: comma_delimited_list
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
outputs:
role_data:
@@ -82,19 +81,23 @@ outputs:
value:
service_name: contrail_base
config_settings:
+ contrail::aaa_mode: {get_param: ContrailAAAMode}
+ contrail::analytics_aaa_mode: {get_param: ContrailAAAModeAnalytics}
contrail::admin_password: {get_param: AdminPassword}
contrail::admin_tenant_name: {get_param: AdminTenantName}
contrail::admin_token: {get_param: AdminToken}
contrail::admin_user: {get_param: AdminUser}
- contrail::auth_host: {get_param: [EndpointMap, KeystoneInternal, host] }
- contrail::auth_port: {get_param: [EndpointMap, KeystoneInternal, port] }
- contrail::auth_protocol: {get_param: [EndpointMap, KeystoneInternal, protocol] }
- contrail::disc_server_ip: {get_param: ContrailDiscoveryServerIp}
- contrail::kafka_broker_list: {get_param: ContrailKafkaBrokerList}
contrail::auth: {get_param: ContrailAuth}
- contrail::cassandra_server_list: {get_param: ContrailCassandraServerList}
- contrail::disc_server_port: {get_param: ContrailDiscoveryServerPort}
+ contrail::auth_host: {get_param: [EndpointMap, KeystonePublic, host] }
+ contrail::auth_port: {get_param: [EndpointMap, KeystoneAdmin, port] }
+ contrail::auth_port_ssl: {get_param: AuthPortSSL }
+ contrail::auth_port_public: {get_param: [EndpointMap, KeystonePublic, port] }
+ contrail::auth_port_ssl_public: {get_param: AuthPortSSLPublic }
+ contrail::auth_protocol: {get_param: [EndpointMap, KeystoneInternal, protocol] }
+ contrail::api_port: {get_param: [EndpointMap, ContrailConfigInternal, port] }
+ contrail::disc_server_port: {get_param: [EndpointMap, ContrailDiscoveryInternal, port] }
contrail::insecure: {get_param: ContrailInsecure}
contrail::memcached_server: {get_param: ContrailMemcachedServer}
- contrail::multi_tenancy: {get_param: ContrailMultiTenancy}
- contrail::zk_server_ip: {get_param: ContrailZkServerIp}
+ contrail::rabbit_password: {get_param: RabbitPassword}
+ contrail::rabbit_user: {get_param: RabbitUserName}
+ contrail::rabbit_port: {get_param: RabbitClientPort}
diff --git a/puppet/services/network/contrail-config.yaml b/puppet/services/network/contrail-config.yaml
index 0987fc75..185b6094 100644
--- a/puppet/services/network/contrail-config.yaml
+++ b/puppet/services/network/contrail-config.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Contrail Config service deployment using puppet, this YAML file
@@ -21,29 +21,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- ContrailConfigIfmapServerIp:
- description: Ifmap server ip address
- type: string
ContrailConfigIfmapUserName:
description: Ifmap user name
type: string
+ default: 'api-server'
ContrailConfigIfmapUserPassword:
description: Ifmap user password
type: string
- ContrailConfigRabbitServerIp:
- description: RabbitMq server ip address
- type: string
- ContrailConfigRedisServerIp:
- description: Redis server ip address
- type: string
- ContrailConfigListenAddress:
- default: '0.0.0.0'
- description: IP address Config API is listening on
- type: string
- ContrailConfigListenPort:
- default: 8082
- description: Port Config API is listening on
- type: number
+ default: 'api-server'
resources:
ContrailBase:
@@ -62,11 +47,10 @@ outputs:
map_merge:
- get_attr: [ContrailBase, role_data, config_settings]
- contrail::config::ifmap_password: {get_param: ContrailConfigIfmapUserPassword}
- contrail::config::ifmap_server_ip: {get_param: ContrailConfigIfmapServerIp}
contrail::config::ifmap_username: {get_param: ContrailConfigIfmapUserName}
- contrail::config::listen_ip_address: {get_param: ContrailConfigListenAddress}
- contrail::config::listen_port: {get_param: ContrailConfigListenPort}
- contrail::config::rabbit_server: {get_param: ContrailConfigRabbitServerIp}
- contrail::config::redis_server: {get_param: ContrailConfigRedisServerIp}
+ contrail::config::listen_ip_address: {get_param: [ServiceNetMap, ContrailConfigNetwork]}
+ contrail::config::listen_port: {get_param: [EndpointMap, ContrailConfigInternal, port] }
+ contrail::config::redis_server: '127.0.0.1'
+ contrail::config::host_ip: {get_param: [ServiceNetMap, ContrailConfigNetwork] }
step_config: |
include ::tripleo::network::contrail::config
diff --git a/puppet/services/network/contrail-control.yaml b/puppet/services/network/contrail-control.yaml
index 9356e9e9..0964989b 100644
--- a/puppet/services/network/contrail-control.yaml
+++ b/puppet/services/network/contrail-control.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Contrail Control service deployment using puppet, this YAML file
@@ -21,15 +21,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- ContrailControlHostIP:
- description: host IP address of Analytics
- type: string
- ContrailControlIfmapUserName:
- description: Ifmap user name
- type: string
- ContrailControlIfmapUserPassword:
- description: Ifmap user password
+ ContrailControlASN:
+ description: Autonomous System Number
+ type: number
+ default: 64512
+ ContrailControlRNDCSecret:
+ description: sda1/256 hmac key, e.g. echo -n "values" | openssl dgst -sha256 -hmac key -binary | base64
type: string
+ hidden: true
resources:
ContrailBase:
@@ -47,8 +46,8 @@ outputs:
config_settings:
map_merge:
- get_attr: [ContrailBase, role_data, config_settings]
- - contrail::control::host_ip: {get_param: ContrailControlHostIP}
- contrail::control::ifmap_username: {get_param: ContrailControlIfmapUserName}
- contrail::control::ifmap_password: {get_param: ContrailControlIfmapUserPassword}
+ - contrail::control::asn: {get_param: ContrailControlASN }
+ contrail::control::host_ip: {get_param: [ServiceNetMap, ContrailControlNetwork]}
+ contrail::control::rndc_secret: {get_param: ContrailControlRNDCSecret}
step_config: |
include ::tripleo::network::contrail::control
diff --git a/puppet/services/network/contrail-database.yaml b/puppet/services/network/contrail-database.yaml
index e5712618..b47c2c36 100644
--- a/puppet/services/network/contrail-database.yaml
+++ b/puppet/services/network/contrail-database.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Contrail Database service deployment using puppet, this YAML file
@@ -21,13 +21,6 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- ContrailDatabaseHostIP:
- description: host IP address of Database node
- type: string
- ContrailDatabaseMinDisk:
- description: Minimum disk size for database
- type: number
- default: 64
resources:
ContrailBase:
@@ -45,7 +38,6 @@ outputs:
config_settings:
map_merge:
- get_attr: [ContrailBase, role_data, config_settings]
- - contrail::database::host_ip: {get_param: ContrailDatabaseHostIP}
- contrail::database::minimum_diskGB: {get_param: ContrailDatabaseMinDisk}
+ - contrail::database::host_ip: {get_param: [ServiceNetMap, ContrailDatabaseNetwork]}
step_config: |
- include ::tripleo::profile::contrail::database
+ include ::tripleo::network::contrail::database
diff --git a/puppet/services/pacemaker/neutron-plugin-nuage.yaml b/puppet/services/network/contrail-heat.yaml
index 9fca2cc3..4dfc6579 100644
--- a/puppet/services/pacemaker/neutron-plugin-nuage.yaml
+++ b/puppet/services/network/contrail-heat.yaml
@@ -1,7 +1,8 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- OpenStack Neutron Nuage Plugin with Pacemaker configured with Puppet
+ Contrail Heat plugin adds Contrail specific heat resources enabling heat
+ to orchestrate Contrail
parameters:
ServiceNetMap:
@@ -20,9 +21,8 @@ parameters:
type: json
resources:
-
- NeutronPluginNuageBase:
- type: ../neutron-plugin-nuage.yaml
+ ContrailBase:
+ type: ./contrail-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
@@ -30,11 +30,11 @@ resources:
outputs:
role_data:
- description: Role data for the Neutron Nuage plugin.
+ description: Contrail Heat plugin
value:
- service_name: neutron_plugin_nuage
+ service_name: contrail_heat
config_settings:
map_merge:
- - get_attr: [NeutronPluginNuageBase, role_data, config_settings]
+ - get_attr: [ContrailBase, role_data, config_settings]
step_config: |
- include ::tripleo::profile::pacemaker::neutron::plugins::nuage
+ include ::tripleo::network::contrail::heat
diff --git a/puppet/services/pacemaker/database/mongodb.yaml b/puppet/services/network/contrail-neutron-plugin.yaml
index 982b6064..2f2ceb37 100644
--- a/puppet/services/pacemaker/database/mongodb.yaml
+++ b/puppet/services/network/contrail-neutron-plugin.yaml
@@ -1,10 +1,9 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- MongoDb service deployment using puppet
+ OpenStack Neutron Opencontrail plugin
parameters:
- #Parameters not used EndpointMap
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -19,10 +18,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ ContrailExtensions:
+ description: List of OpenContrail extensions to be enabled
+ type: comma_delimited_list
+ default: ''
resources:
- MongoDbBase:
- type: ../../database/mongodb.yaml
+ ContrailBase:
+ type: ./contrail-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
@@ -30,13 +33,13 @@ resources:
outputs:
role_data:
- description: Service mongodb using composable services.
+ description: Role data for the Neutron Opencontrail plugin
value:
- service_name: mongodb
+ service_name: contrail_neutron_plugin
config_settings:
map_merge:
- - get_attr: [MongoDbBase, role_data, config_settings]
- - tripleo::profile::pacemaker::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]}
- mongodb::server::service_manage: False
+ - get_attr: [ContrailBase, role_data, config_settings]
+ - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions
+ contrail::vrouter::contrail_extensions: {get_param: ContrailExtensions}
step_config: |
- include ::tripleo::profile::pacemaker::database::mongodb
+ include tripleo::network::contrail::neutron_plugin
diff --git a/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml b/puppet/services/network/contrail-provision.yaml
index 5dd4e588..765be9a9 100644
--- a/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml
+++ b/puppet/services/network/contrail-provision.yaml
@@ -1,7 +1,7 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- OpenStack Neutron PLUMgrid Plugin with Pacemaker configured with Puppet
+ Provision Contrail services after deployment
parameters:
ServiceNetMap:
@@ -20,9 +20,8 @@ parameters:
type: json
resources:
-
- NeutronPluginPlumgridBase:
- type: ../neutron-plugin-ml2.yaml
+ ContrailBase:
+ type: ./contrail-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
@@ -30,11 +29,11 @@ resources:
outputs:
role_data:
- description: Role data for the Neutron PLUMgrid plugin.
+ description: Contrail provisioning role
value:
- service_name: neutron_plugin_plumgrid
+ service_name: contrail_provision
config_settings:
map_merge:
- - get_attr: [NeutronPluginPlumgridBase, role_data, config_settings]
+ - get_attr: [ContrailBase, role_data, config_settings]
step_config: |
- include ::tripleo::profile::pacemaker::neutron::plugins::plumgrid
+ include ::tripleo::network::contrail::provision
diff --git a/puppet/services/network/contrail-tsn.yaml b/puppet/services/network/contrail-tsn.yaml
new file mode 100644
index 00000000..88adc4a5
--- /dev/null
+++ b/puppet/services/network/contrail-tsn.yaml
@@ -0,0 +1,64 @@
+heat_template_version: ocata
+
+description: >
+ Contrail TSN Service
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronMetadataProxySharedSecret:
+ description: Metadata Secret
+ type: string
+ VrouterPhysicalInterface:
+ default: 'eth0'
+ description: vRouter physical interface
+ type: string
+ VrouterGateway:
+ default: '192.168.24.1'
+ description: vRouter default gateway
+ type: string
+ VrouterNetmask:
+ default: '255.255.255.0'
+ description: vRouter netmask
+ type: string
+
+resources:
+ ContrailBase:
+ type: ./contrail-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Contrail TSN Service
+ value:
+ service_name: contrail_tsn
+ config_settings:
+ map_merge:
+ - get_attr: [ContrailBase, role_data, config_settings]
+ - contrail::vrouter::host_ip: {get_param: [ServiceNetMap, NeutronCorePluginOpencontrailNetwork]}
+ contrail::vrouter::physical_interface: {get_param: VrouterPhysicalInterface}
+ contrail::vrouter::gateway: {get_param: VrouterGateway}
+ contrail::vrouter::netmask: {get_param: VrouterNetmask}
+ contrail::vrouter::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
+ contrail::vrouter::is_tsn: 'true'
+ tripleo.neutron_compute_plugin_opencontrail.firewall_rules:
+ '111 neutron_compute_plugin_opencontrail proxy':
+ dport: 8097
+ proto: tcp
+ step_config: |
+ include ::tripleo::network::contrail::vrouter
diff --git a/puppet/services/network/contrail-vrouter.yaml b/puppet/services/network/contrail-vrouter.yaml
new file mode 100644
index 00000000..db9f0836
--- /dev/null
+++ b/puppet/services/network/contrail-vrouter.yaml
@@ -0,0 +1,64 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Neutron Compute OpenContrail plugin
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronMetadataProxySharedSecret:
+ description: Metadata Secret
+ type: string
+ hidden: true
+ ContrailVrouterPhysicalInterface:
+ default: 'eth0'
+ description: vRouter physical interface
+ type: string
+ ContrailVrouterGateway:
+ default: '192.0.2.1'
+ description: vRouter default gateway
+ type: string
+ ContrailVrouterNetmask:
+ default: '255.255.255.0'
+ description: vRouter netmask
+ type: string
+
+resources:
+ ContrailBase:
+ type: ./contrail-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Compute OpenContrail plugin
+ value:
+ service_name: contrail_vrouter
+ config_settings:
+ map_merge:
+ - get_attr: [ContrailBase, role_data, config_settings]
+ - contrail::vrouter::host_ip: {get_param: [ServiceNetMap, NeutronCorePluginOpencontrailNetwork]}
+ contrail::vrouter::physical_interface: {get_param: ContrailVrouterPhysicalInterface}
+ contrail::vrouter::gateway: {get_param: ContrailVrouterGateway}
+ contrail::vrouter::netmask: {get_param: ContrailVrouterNetmask}
+ contrail::vrouter::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
+ tripleo.neutron_compute_plugin_opencontrail.firewall_rules:
+ '111 neutron_compute_plugin_opencontrail proxy':
+ dport: 8097
+ proto: tcp
+ step_config: |
+ include ::tripleo::network::contrail::vrouter
diff --git a/puppet/services/network/contrail-webui.yaml b/puppet/services/network/contrail-webui.yaml
index 72b9e1c0..3786cdd1 100644
--- a/puppet/services/network/contrail-webui.yaml
+++ b/puppet/services/network/contrail-webui.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Contrail WebUI service deployment using puppet, this YAML file
@@ -21,27 +21,6 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- ContrailWebUiAnalyticsVip:
- description: Contrail Analytics VIP
- type: string
- ContrailWebUiConfigVip:
- description: Contrail Config VIP
- type: string
- ContrailWebUiNeutronVip:
- description: Neutron VIP
- type: string
- ContrailWebuiHttpPort:
- default: 8080
- description: HTTP Port of Webui
- type: number
- ContrailWebuiHttpsPort:
- default: 8143
- description: HTTPS Port of Webui
- type: number
- ContrailWebUiRedisIp:
- description: Redis IP
- type: string
- default: '127.0.0.1'
resources:
ContrailBase:
@@ -59,11 +38,8 @@ outputs:
config_settings:
map_merge:
- get_attr: [ContrailBase, role_data, config_settings]
- - contrail::webui::contrail_analytics_vip: {get_param: ContrailWebUiAnalyticsVip}
- contrail::webui::contrail_config_vip: {get_param: ContrailWebUiConfigVip}
- contrail::webui::contrail_webui_http_port: {get_param: ContrailWebuiHttpPort}
- contrail::webui::contrail_webui_https_port: {get_param: ContrailWebuiHttpsPort}
- contrail::webui::neutron_vip: {get_param: ContrailWebUiNeutronVip}
- contrail::webui::redis_ip: {get_param: ContrailWebUiRedisIp}
+ - contrail::webui::http_port: {get_param: [EndpointMap, ContrailWebuiHttpInternal, port] }
+ contrail::webui::https_port: {get_param: [EndpointMap, ContrailWebuiHttpsInternal, port] }
+ contrail::webui::redis_ip: '127.0.0.1'
step_config: |
include ::tripleo::network::contrail::webui
diff --git a/puppet/services/neutron-api.yaml b/puppet/services/neutron-api.yaml
index 5fd9d7a2..4d671e15 100644
--- a/puppet/services/neutron-api.yaml
+++ b/puppet/services/neutron-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Neutron Server configured with Puppet
@@ -71,6 +71,9 @@ parameters:
removed in Ocata. Future releases will enable L3 HA by default if it is
appropriate for the deployment type. Alternate mechanisms will be
available to override.
+ EnableInternalTLS:
+ type: boolean
+ default: false
parameter_groups:
- label: deprecated
@@ -82,8 +85,19 @@ parameter_groups:
parameters:
- NeutronL3HA
+conditions:
+ use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
+
resources:
+ TLSProxyBase:
+ type: OS::TripleO::Services::TLSProxyBase
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
NeutronBase:
type: ./neutron-base.yaml
properties:
@@ -103,6 +117,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
+ - get_attr: [TLSProxyBase, role_data, config_settings]
- neutron::server::database_connection:
list_join:
- ''
@@ -112,15 +127,14 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ovs_neutron'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
neutron::server::api_workers: {get_param: NeutronWorkers}
neutron::server::rpc_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron::server::enable_proxy_headers_parsing: true
neutron::keystone::authtoken::password: {get_param: NeutronPassword}
-
- neutron::server::notifications::nova_url: { get_param: [ EndpointMap, NovaInternal, uri ] }
neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] }
neutron::server::notifications::tenant_name: 'service'
neutron::server::notifications::project_name: 'service'
@@ -138,7 +152,23 @@ outputs:
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- neutron::bind_host: {get_param: [ServiceNetMap, NeutronApiNetwork]}
+ tripleo::profile::base::neutron::server::tls_proxy_bind_ip:
+ get_param: [ServiceNetMap, NeutronApiNetwork]
+ tripleo::profile::base::neutron::server::tls_proxy_fqdn:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NeutronApiNetwork]}
+ tripleo::profile::base::neutron::server::tls_proxy_port:
+ get_param: [EndpointMap, NeutronInternal, port]
+ # Bind to localhost if internal TLS is enabled, since we put a TLS
+ # proxy in front.
+ neutron::bind_host:
+ if:
+ - use_tls_proxy
+ - 'localhost'
+ - {get_param: [ServiceNetMap, NeutronApiNetwork]}
tripleo::profile::base::neutron::server::l3_ha_override: {get_param: NeutronL3HA}
step_config: |
include tripleo::profile::base::neutron::server
@@ -158,3 +188,10 @@ outputs:
neutron::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service neutron-server is running"
+ shell: /usr/bin/systemctl show 'neutron-server' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop neutron_api service
+ tags: step2
+ service: name=neutron-server state=stopped
diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml
index 3d03c313..43657bd9 100644
--- a/puppet/services/neutron-base.yaml
+++ b/puppet/services/neutron-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron base service. Shared for all Neutron agents.
diff --git a/puppet/services/neutron-compute-plugin-midonet.yaml b/puppet/services/neutron-compute-plugin-midonet.yaml
index 26b6fa6b..5b6fcca6 100644
--- a/puppet/services/neutron-compute-plugin-midonet.yaml
+++ b/puppet/services/neutron-compute-plugin-midonet.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Compute Midonet plugin
diff --git a/puppet/services/neutron-compute-plugin-nuage.yaml b/puppet/services/neutron-compute-plugin-nuage.yaml
index c4f8ad12..04431e28 100644
--- a/puppet/services/neutron-compute-plugin-nuage.yaml
+++ b/puppet/services/neutron-compute-plugin-nuage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Compute Nuage plugin
diff --git a/puppet/services/neutron-compute-plugin-ovn.yaml b/puppet/services/neutron-compute-plugin-ovn.yaml
index 95e05dd4..e3a4da99 100644
--- a/puppet/services/neutron-compute-plugin-ovn.yaml
+++ b/puppet/services/neutron-compute-plugin-ovn.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Compute OVN agent
@@ -18,9 +18,6 @@ parameters:
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
- OVNDbHost:
- description: IP address on which the OVN DB servers are listening
- type: string
OVNSouthboundServerPort:
description: Port of the Southbound DB Server
type: number
@@ -29,6 +26,16 @@ parameters:
description: Tunnel encapsulation type
type: string
default: geneve
+ NeutronBridgeMappings:
+ description: >
+ The OVS logical->physical bridge mappings to use. See the Neutron
+ documentation for details. Defaults to mapping br-ex - the external
+ bridge on hosts - to a physical name 'datacentre' which can be used
+ to create provider networks (and we use this for the default floating
+ network) - if changing this either use different post-install network
+ scripts or be sure to keep 'datacentre' as a mapping network name
+ type: comma_delimited_list
+ default: "datacentre:br-ex"
outputs:
@@ -37,9 +44,16 @@ outputs:
value:
service_name: neutron_compute_plugin_ovn
config_settings:
- tripleo::profile::base::neutron::agents::ovn::ovn_db_host: {get_param: OVNDbHost}
ovn::southbound::port: {get_param: OVNSouthboundServerPort}
- ovn::southbound::encap_type: {get_param: OVNTunnelEncapType}
+ ovn::controller::ovn_encap_type: {get_param: OVNTunnelEncapType}
ovn::controller::ovn_encap_ip: {get_param: [ServiceNetMap, NeutronApiNetwork]}
+ ovn::controller::ovn_bridge_mappings: {get_param: NeutronBridgeMappings}
+ tripleo.neutron_compute_plugin_ovn.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '119 neutron geneve networks':
+ proto: 'udp'
+ dport: 6081
step_config: |
include ::tripleo::profile::base::neutron::agents::ovn
diff --git a/puppet/services/neutron-compute-plugin-plumgrid.yaml b/puppet/services/neutron-compute-plugin-plumgrid.yaml
index 31a0a08b..09aa6191 100644
--- a/puppet/services/neutron-compute-plugin-plumgrid.yaml
+++ b/puppet/services/neutron-compute-plugin-plumgrid.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Compute Plumgrid plugin
diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml
index 2cd08f98..062edaa4 100644
--- a/puppet/services/neutron-dhcp.yaml
+++ b/puppet/services/neutron-dhcp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron DHCP agent configured with Puppet
@@ -39,6 +39,10 @@ parameters:
default:
tag: openstack.neutron.agent.dhcp
path: /var/log/neutron/dhcp-agent.log
+ NeutronDhcpAgentDnsmasqDnsServers:
+ default: []
+ description: List of servers to use as dnsmasq forwarders
+ type: comma_delimited_list
resources:
@@ -64,6 +68,7 @@ outputs:
- neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
neutron::agents::dhcp::enable_force_metadata: {get_param: NeutronEnableForceMetadata}
neutron::agents::dhcp::enable_metadata_network: {get_param: NeutronEnableMetadataNetwork}
+ neutron::agents::dhcp::dnsmasq_dns_servers: {get_param: NeutronDhcpAgentDnsmasqDnsServers}
tripleo.neutron_dhcp.firewall_rules:
'115 neutron dhcp input':
proto: 'udp'
@@ -74,3 +79,10 @@ outputs:
dport: 68
step_config: |
include tripleo::profile::base::neutron::dhcp
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service neutron-dhcp-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-dhcp-agent' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop neutron_dhcp service
+ tags: step2
+ service: name=neutron-dhcp-agent state=stopped
diff --git a/puppet/services/neutron-l3-compute-dvr.yaml b/puppet/services/neutron-l3-compute-dvr.yaml
index b6c29116..1d6a2371 100644
--- a/puppet/services/neutron-l3-compute-dvr.yaml
+++ b/puppet/services/neutron-l3-compute-dvr.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron L3 agent for DVR enabled compute nodes
@@ -22,10 +22,6 @@ parameters:
Debug:
type: string
default: ''
- NeutronExternalNetworkBridge:
- description: Name of bridge used for external network traffic.
- type: string
- default: 'br-ex'
MonitoringSubscriptionNeutronL3Dvr:
default: 'overcloud-neutron-l3-dvr'
type: string
@@ -35,6 +31,19 @@ parameters:
tag: openstack.neutron.agent.l3-compute
path: /var/log/neutron/l3-agent.log
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Pike cycle.
+ NeutronExternalNetworkBridge:
+ description: Name of bridge used for external network traffic. Usually L2
+ agent handles port wiring into external bridge, and hence the
+ parameter should be unset.
+ type: string
+ default: ''
+
+conditions:
+
+ external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]}
+
resources:
NeutronBase:
@@ -56,7 +65,11 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
- neutron::agents::l3::agent_mode : 'dvr'
+ - neutron::agents::l3::agent_mode : 'dvr'
+ -
+ if:
+ - external_network_bridge_empty
+ - {}
+ - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
step_config: |
include tripleo::profile::base::neutron::l3
diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml
index a2157555..4fa49275 100644
--- a/puppet/services/neutron-l3.yaml
+++ b/puppet/services/neutron-l3.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Neutron L3 agent configured with Puppet
@@ -21,10 +21,6 @@ parameters:
Debug:
type: string
default: ''
- NeutronExternalNetworkBridge:
- description: Name of bridge used for external network traffic.
- type: string
- default: 'br-ex'
NeutronL3AgentMode:
description: |
Agent mode for L3 agent. Must be one of legacy or dvr_snat.
@@ -43,6 +39,15 @@ parameters:
tag: openstack.neutron.agent.l3
path: /var/log/neutron/l3-agent.log
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Pike cycle.
+ NeutronExternalNetworkBridge:
+ description: Name of bridge used for external network traffic. Usually L2
+ agent handles port wiring into external bridge, and hence the
+ parameter should be unset.
+ type: string
+ default: ''
+
conditions:
external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]}
@@ -68,15 +73,21 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::agents::l3::router_delete_namespaces: True
- neutron::agents::l3::agent_mode: {get_param: NeutronL3AgentMode}
+ - neutron::agents::l3::agent_mode: {get_param: NeutronL3AgentMode}
tripleo.neutron_l3.firewall_rules:
'106 neutron_l3 vrrp':
proto: vrrp
- -
+ -
if:
- external_network_bridge_empty
- {}
- neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
step_config: |
include tripleo::profile::base::neutron::l3
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service neutron-l3-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-l3-agent' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop neutron_l3 service
+ tags: step2
+ service: name=neutron-l3-agent state=stopped
diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml
index c87de285..6f5debdd 100644
--- a/puppet/services/neutron-metadata.yaml
+++ b/puppet/services/neutron-metadata.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Metadata agent configured with Puppet
@@ -70,8 +70,15 @@ outputs:
- neutron::agents::metadata::shared_secret: {get_param: NeutronMetadataProxySharedSecret}
neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers}
neutron::agents::metadata::auth_password: {get_param: NeutronPassword}
- neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
neutron::agents::metadata::auth_tenant: 'service'
neutron::agents::metadata::metadata_ip: "%{hiera('nova_metadata_vip')}"
step_config: |
include tripleo::profile::base::neutron::metadata
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service neutron-metadata-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-metadata-agent' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop neutron_metadata service
+ tags: step2
+ service: name=neutron-metadata-agent state=stopped
diff --git a/puppet/services/neutron-midonet.yaml b/puppet/services/neutron-midonet.yaml
index 0de256c0..9198f352 100644
--- a/puppet/services/neutron-midonet.yaml
+++ b/puppet/services/neutron-midonet.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Midonet plugin and services
diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml
index e2b90b7b..c27bb909 100644
--- a/puppet/services/neutron-ovs-agent.yaml
+++ b/puppet/services/neutron-ovs-agent.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron OVS agent configured with Puppet
@@ -70,6 +70,9 @@ parameters:
tag: openstack.neutron.agent.openvswitch
path: /var/log/neutron/openvswitch-agent.log
+conditions:
+ no_firewall_driver: {equals : [{get_param: NeutronOVSFirewallDriver}, '']}
+
resources:
NeutronBase:
@@ -104,12 +107,23 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::agents::ml2::ovs::firewall_driver: {get_param: NeutronOVSFirewallDriver}
tripleo.neutron_ovs_agent.firewall_rules:
'118 neutron vxlan networks':
proto: 'udp'
dport: 4789
'136 neutron gre networks':
proto: 'gre'
+ -
+ if:
+ - no_firewall_driver
+ - {}
+ - neutron::agents::ml2::ovs::firewall_driver: {get_param: NeutronOVSFirewallDriver}
step_config: |
include ::tripleo::profile::base::neutron::ovs
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop neutron_ovs_agent service
+ tags: step2
+ service: name=neutron-openvswitch-agent state=stopped
diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml
index fdfa1c03..e25bc495 100644
--- a/puppet/services/neutron-ovs-dpdk-agent.yaml
+++ b/puppet/services/neutron-ovs-dpdk-agent.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron OVS DPDK configured with Puppet for Compute Role
@@ -18,6 +18,11 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "'[0-9,-]+'"
NeutronDpdkCoreList:
description: List of cores to be used for DPDK Poll Mode Driver
type: string
@@ -68,7 +73,8 @@ outputs:
- neutron::agents::ml2::ovs::enable_dpdk: true
neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType}
neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir}
- vswitch::dpdk::core_list: {get_param: NeutronDpdkCoreList}
+ vswitch::dpdk::host_core_list: {get_param: HostCpusList}
+ vswitch::dpdk::pmd_core_list: {get_param: NeutronDpdkCoreList}
vswitch::dpdk::memory_channels: {get_param: NeutronDpdkMemoryChannels}
vswitch::dpdk::socket_mem: {get_param: NeutronDpdkSocketMemory}
vswitch::dpdk::driver_type: {get_param: NeutronDpdkDriverType}
diff --git a/puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml b/puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml
new file mode 100644
index 00000000..becd25c9
--- /dev/null
+++ b/puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml
@@ -0,0 +1,73 @@
+heat_template_version: ocata
+
+description: >
+ Configure hieradata for Fujitsu C-Fabric plugin configuration
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronFujitsuCfabAddress:
+ description: 'The address of the C-Fabric to telnet to.'
+ type: string
+ NeutronFujitsuCfabUserName:
+ description: 'The C-Fabric username to use.'
+ type: string
+ NeutronFujitsuCfabPassword:
+ description: 'The C-Fabric password to use.'
+ type: string
+ hidden: true
+ NeutronFujitsuCfabPhysicalNetworks:
+ description: 'List of <physical_network>:<vfab_id> tuples specifying physical_network names and corresponding vfab ids.'
+ type: comma_delimited_list
+ default: ''
+ NeutronFujitsuCfabSharePprofile:
+ description: '"Whether to share a C-Fabric pprofile among Neutron ports using the same VLAN ID.'
+ type: boolean
+ default: false
+ NeutronFujitsuCfabPprofilePrefix:
+ description: 'The prefix string for pprofile name.'
+ type: string
+ default: ''
+ NeutronFujitsuCfabSaveConfig:
+ description: 'Whether to save configuration.'
+ type: boolean
+ default: true
+
+resources:
+
+ NeutronMl2Base:
+ type: ./neutron-plugin-ml2.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for Fujitsu Cfab ML2 Driver
+ value:
+ service_name: neutron_plugin_ml2_fujitsu_cfab
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMl2Base, role_data, config_settings]
+ - neutron::plugins::ml2::fujitsu::cfab::address: {get_param: NeutronFujitsuCfabAddress}
+ neutron::plugins::ml2::fujitsu::cfab::username: {get_param: NeutronFujitsuCfabUserName}
+ neutron::plugins::ml2::fujitsu::cfab::password: {get_param: NeutronFujitsuCfabPassword}
+ neutron::plugins::ml2::fujitsu::cfab::physical_networks: {get_param: NeutronFujitsuCfabPhysicalNetworks}
+ neutron::plugins::ml2::fujitsu::cfab::share_pprofile: {get_param: NeutronFujitsuCfabSharePprofile}
+ neutron::plugins::ml2::fujitsu::cfab::pprofile_prefix: {get_param: NeutronFujitsuCfabPprofilePrefix}
+ neutron::plugins::ml2::fujitsu::cfab::save_config: {get_param: NeutronFujitsuCfabSaveConfig}
+ step_config: |
+ include ::tripleo::profile::base::neutron::plugins::ml2
diff --git a/puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml b/puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml
new file mode 100644
index 00000000..85971f17
--- /dev/null
+++ b/puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml
@@ -0,0 +1,78 @@
+heat_template_version: ocata
+
+description: Configure hieradata for Fujitsu fossw plugin configuration
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronFujitsuFosswIps:
+ description: 'The List of IP address of all fos switches.'
+ type: comma_delimited_list
+ NeutronFujitsuFosswUserName:
+ description: 'The username of the fos switches.'
+ type: string
+ NeutronFujitsuFosswPassword:
+ description: 'The password of the fos switches.'
+ type: string
+ hidden: true
+ NeutronFujitsuFosswPort:
+ description: 'The port number used for SSH connection.'
+ type: number
+ default: 22
+ NeutronFujitsuFosswTimeout:
+ description: 'The timeout os SSH connection.'
+ type: number
+ default: 30
+ NeutronFujitsuFosswUdpDestPort:
+ description: 'The port number of VXLAN UDP destination on the fos switches.'
+ type: number
+ default: 4789
+ NeutronFujitsuFosswOvsdbVlanidRangeMin:
+ description: 'The minimum VLAN ID in the range that is used for binding VNI and physical port.'
+ type: number
+ default: 2
+ NeutronFujitsuFosswOvsdbPort:
+ description: 'The port number which OVSDB server on the fos switches listen.'
+ type: number
+ default: 6640
+
+resources:
+
+ NeutronMl2Base:
+ type: ./neutron-plugin-ml2.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for Fujitsu Fossw ML2 Driver
+ value:
+ service_name: neutron_plugin_ml2_fujitsu_fossw
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMl2Base, role_data, config_settings]
+ - neutron::plugins::ml2::fujitsu::fossw::fossw_ips: {get_param: NeutronFujitsuFosswIps}
+ neutron::plugins::ml2::fujitsu::fossw::username: {get_param: NeutronFujitsuFosswUserName}
+ neutron::plugins::ml2::fujitsu::fossw::password: {get_param: NeutronFujitsuFosswPassword}
+ neutron::plugins::ml2::fujitsu::fossw::port: {get_param: NeutronFujitsuFosswPort}
+ neutron::plugins::ml2::fujitsu::fossw::timeout: {get_param: NeutronFujitsuFosswTimeout}
+ neutron::plugins::ml2::fujitsu::fossw::udp_dest_port: {get_param: NeutronFujitsuFosswUdpDestPort}
+ neutron::plugins::ml2::fujitsu::fossw::ovsdb_vlanid_range_min: {get_param: NeutronFujitsuFosswOvsdbVlanidRangeMin}
+ neutron::plugins::ml2::fujitsu::fossw::ovsdb_port: {get_param: NeutronFujitsuFosswOvsdbPort}
+ step_config: |
+ include ::tripleo::profile::base::neutron::plugins::ml2
+
diff --git a/puppet/services/neutron-plugin-ml2-ovn.yaml b/puppet/services/neutron-plugin-ml2-ovn.yaml
index e98ed497..4d4c3900 100644
--- a/puppet/services/neutron-plugin-ml2-ovn.yaml
+++ b/puppet/services/neutron-plugin-ml2-ovn.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron ML2/OVN plugin configured with Puppet
@@ -18,17 +18,14 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- OVNDbHost:
- description: IP address on which the OVN DB servers are listening
- type: string
- OVNNorthboundServerPort:
- description: Port of the OVN Northbound DB server
+ OVNSouthboundServerPort:
+ description: Port of the OVN Southbound DB server
type: number
- default: 6641
+ default: 6642
OVNDbConnectionTimeout:
description: Timeout in seconds for the OVSDB connection transaction
type: number
- default: 60
+ default: 180
OVNVifType:
description: Type of VIF to be used for ports
type: string
@@ -50,6 +47,10 @@ parameters:
description: OVN notification driver for Neutron QOS service plugin
type: string
default: NULL
+ NeutronGeneveMaxHeaderSize:
+ description: Geneve encapsulation header size
+ type: number
+ default: 38
resources:
@@ -68,12 +69,12 @@ outputs:
config_settings:
map_merge:
- get_attr: [NeutronMl2Base, role_data, config_settings]
- - ovn::northbound::port: {get_param: OVNNorthboundServerPort}
- tripleo::profile::base::neutron::plugins::ml2::ovn::ovn_db_host: {get_param: OVNDbHost}
- neutron::plugins::ovn::ovsdb_connection_timeout: {get_param: OVNDbConnectionTimeout}
- neutron::plugins::ovn::neutron_sync_mode: {get_param: OVNNeutronSyncMode}
- neutron::plugins::ovn::ovn_l3_mode: true
- neutron::plugins::ovn::vif_type: {get_param: OVNVifType}
+ - ovn::southbound::port: {get_param: OVNSouthboundServerPort}
+ neutron::plugins::ml2::ovn::ovsdb_connection_timeout: {get_param: OVNDbConnectionTimeout}
+ neutron::plugins::ml2::ovn::neutron_sync_mode: {get_param: OVNNeutronSyncMode}
+ neutron::plugins::ml2::ovn::ovn_l3_mode: true
+ neutron::plugins::ml2::ovn::vif_type: {get_param: OVNVifType}
neutron::server::qos_notification_drivers: {get_param: OVNQosDriver}
+ neutron::plugins::ml2::max_header_size: {get_param: NeutronGeneveMaxHeaderSize}
step_config: |
include ::tripleo::profile::base::neutron::plugins::ml2
diff --git a/puppet/services/neutron-plugin-ml2.yaml b/puppet/services/neutron-plugin-ml2.yaml
index 88b5064c..3abd04f3 100644
--- a/puppet/services/neutron-plugin-ml2.yaml
+++ b/puppet/services/neutron-plugin-ml2.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron ML2 Plugin configured with Puppet
@@ -60,12 +60,6 @@ parameters:
default: 'vxlan'
description: The tenant network type for Neutron.
type: comma_delimited_list
- NeutronSupportedPCIVendorDevs:
- description: |
- List of supported pci vendor devices in the format VendorID:ProductID.
- By default Intel & Mellanox SR-IOV capable NICs are supported.
- type: comma_delimited_list
- default: ['15b3:1004','8086:10ca']
resources:
NeutronBase:
@@ -91,7 +85,9 @@ outputs:
neutron::plugins::ml2::tunnel_id_ranges: {get_param: NeutronTunnelIdRanges}
neutron::plugins::ml2::vni_ranges: {get_param: NeutronVniRanges}
neutron::plugins::ml2::tenant_network_types: {get_param: NeutronNetworkType}
- neutron::plugins::ml2::supported_pci_vendor_devs: {get_param: NeutronSupportedPCIVendorDevs}
step_config: |
include ::tripleo::profile::base::neutron::plugins::ml2
+ service_config_settings:
+ horizon:
+ neutron::plugins::ml2::mechanism_drivers: {get_param: NeutronMechanismDrivers}
diff --git a/puppet/services/neutron-plugin-nuage.yaml b/puppet/services/neutron-plugin-nuage.yaml
index 838ec5ea..e09cd704 100644
--- a/puppet/services/neutron-plugin-nuage.yaml
+++ b/puppet/services/neutron-plugin-nuage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Nuage plugin
diff --git a/puppet/services/neutron-plugin-opencontrail.yaml b/puppet/services/neutron-plugin-opencontrail.yaml
deleted file mode 100644
index 098c9d05..00000000
--- a/puppet/services/neutron-plugin-opencontrail.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron Opencontrail plugin
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- AdminPassword:
- description: The password for the keystone admin account, used for monitoring, querying neutron etc.
- type: string
- hidden: true
- AdminToken:
- description: The keystone auth secret and db password.
- type: string
- hidden: true
- ContrailApiServerIp:
- description: IP address of the OpenContrail API server
- type: string
- ContrailApiServerPort:
- description: Port of the OpenContrail API
- type: string
- default: 8082
- ContrailMultiTenancy:
- description: Whether to enable multi tenancy
- type: boolean
- default: false
- ContrailExtensions:
- description: List of OpenContrail extensions to be enabled
- type: comma_delimited_list
- default: ''
-
-resources:
-
- NeutronBase:
- type: ./neutron-base.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron Opencontrail plugin
- value:
- service_name: neutron_plugin_opencontrail
- config_settings:
- map_merge:
- - get_attr: [NeutronBase, role_data, config_settings]
- - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions,/usr/lib/python2.7/site-packages/neutron_lbaas/extensions
-
- neutron::plugins::opencontrail::api_server_ip: {get_param: ContrailApiServerIp}
- neutron::plugins::opencontrail::api_server_port: {get_param: ContrailApiServerPort}
- neutron::plugins::opencontrail::multi_tenancy: {get_param: ContrailMultiTenancy}
- neutron::plugins::opencontrail::contrail_extensions: {get_param: ContrailExtensions}
- neutron::plugins::opencontrail::keystone_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri] }
- neutron::plugins::opencontrail::keystone_admin_user: admin
- neutron::plugins::opencontrail::keystone_admin_tenant_name: admin
- neutron::plugins::opencontrail::keystone_admin_password: {get_param: AdminPassword}
- neutron::plugins::opencontrail::keystone_admin_token: {get_param: AdminToken}
- step_config: |
- include tripleo::profile::base::neutron::plugins::opencontrail
diff --git a/puppet/services/neutron-plugin-plumgrid.yaml b/puppet/services/neutron-plugin-plumgrid.yaml
index 30af8a3f..f948dd07 100644
--- a/puppet/services/neutron-plugin-plumgrid.yaml
+++ b/puppet/services/neutron-plugin-plumgrid.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron Plumgrid plugin
@@ -100,7 +100,8 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ovs_neutron'
- neutron::plugins::plumgrid::controller_priv_host: {get_param: [EndpointMap, KeystoneAdmin, host]}
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ neutron::plugins::plumgrid::controller_priv_host: {get_param: [EndpointMap, KeystoneInternal, host]}
neutron::plugins::plumgrid::admin_password: {get_param: AdminPassword}
neutron::plugins::plumgrid::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
neutron::plugins::plumgrid::director_server: {get_param: PLUMgridDirectorServer}
diff --git a/puppet/services/neutron-sriov-agent.yaml b/puppet/services/neutron-sriov-agent.yaml
index 44f7f242..d3c82d88 100644
--- a/puppet/services/neutron-sriov-agent.yaml
+++ b/puppet/services/neutron-sriov-agent.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Neutron SR-IOV nic agent configured with Puppet
@@ -25,6 +25,7 @@ parameters:
All physical networks listed in network_vlan_ranges
on the server should have mappings to appropriate
interfaces on each agent.
+ Example "tenant0:ens2f0,tenant1:ens2f1"
type: comma_delimited_list
default: ""
NeutronExcludeDevices:
@@ -40,8 +41,8 @@ parameters:
NeutronSriovNumVFs:
description: >
Provide the list of VFs to be reserved for each SR-IOV interface.
- Format "<interface_name1>:<numvfs1>","<interface_name2>:<numvfs2>"
- Example "eth1:4096","eth2:128"
+ Format "<interface_name1>:<numvfs1>,<interface_name2>:<numvfs2>"
+ Example "eth1:4096,eth2:128"
type: comma_delimited_list
default: ""
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index 50e4c996..0adefecd 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Nova API service configured with Puppet
@@ -54,18 +54,28 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ NovaDefaultFloatingPool:
+ default: 'public'
+ description: Default pool for floating IP addresses
+ type: string
+ NovaDbSyncTimeout:
+ default: 300
+ description: Timeout for Nova db sync
+ type: number
conditions:
nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
resources:
- ApacheServiceBase:
- type: ./apache.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
- EnableInternalTLS: {get_param: EnableInternalTLS}
+ # Temporarily disable Nova API deployed in WSGI
+ # https://bugs.launchpad.net/nova/+bug/1661360
+ # ApacheServiceBase:
+ # type: ./apache.yaml
+ # properties:
+ # ServiceNetMap: {get_param: ServiceNetMap}
+ # DefaultPasswords: {get_param: DefaultPasswords}
+ # EndpointMap: {get_param: EndpointMap}
+ # EnableInternalTLS: {get_param: EnableInternalTLS}
NovaBase:
type: ./nova-base.yaml
@@ -86,7 +96,9 @@ outputs:
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- - get_attr: [ApacheServiceBase, role_data, config_settings]
+ # Temporarily disable Nova API deployed in WSGI
+ # https://bugs.launchpad.net/nova/+bug/1661360
+ # - get_attr: [ApacheServiceBase, role_data, config_settings]
- nova::cron::archive_deleted_rows::hour: '*/12'
nova::cron::archive_deleted_rows::destination: '/dev/null'
tripleo.nova_api.firewall_rules:
@@ -100,9 +112,9 @@ outputs:
nova::keystone::authtoken::project_name: 'service'
nova::keystone::authtoken::password: {get_param: NovaPassword}
nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
nova::api::enabled: true
- nova::api::default_floating_pool: 'public'
+ nova::api::default_floating_pool: {get_param: NovaDefaultFloatingPool}
nova::api::sync_db_api: true
nova::api::enable_proxy_headers_parsing: true
nova::api::api_bind_address:
@@ -111,20 +123,23 @@ outputs:
"%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
- nova::api::service_name: 'httpd'
- nova::wsgi::apache::ssl: {get_param: EnableInternalTLS}
+ # Temporarily disable Nova API deployed in WSGI
+ # https://bugs.launchpad.net/nova/+bug/1661360
+ nova_wsgi_enabled: false
+ # nova::api::service_name: 'httpd'
+ # nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- nova::wsgi::apache::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
- nova::wsgi::apache::servername:
- str_replace:
- template:
- "%{hiera('fqdn_$NETWORK')}"
- params:
- $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ # nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ # nova::wsgi::apache_api::servername:
+ # str_replace:
+ # template:
+ # "%{hiera('fqdn_$NETWORK')}"
+ # params:
+ # $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
nova::api::instance_name_template: {get_param: InstanceNameTemplate}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
@@ -133,10 +148,29 @@ outputs:
- nova_workers_zero
- {}
- nova::api::osapi_compute_workers: {get_param: NovaWorkers}
- nova::wsgi::apache::workers: {get_param: NovaWorkers}
+ # Temporarily disable Nova API deployed in WSGI
+ # https://bugs.launchpad.net/nova/+bug/1661360
+ # nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
step_config: |
include tripleo::profile::base::nova::api
service_config_settings:
+ mysql:
+ map_merge:
+ - {get_attr: [NovaBase, role_data, service_config_settings, mysql]}
+ - nova::db::mysql::password: {get_param: NovaPassword}
+ nova::db::mysql::user: nova
+ nova::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ nova::db::mysql::dbname: nova
+ nova::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+ nova::db::mysql_api::password: {get_param: NovaPassword}
+ nova::db::mysql_api::user: nova_api
+ nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ nova::db::mysql_api::dbname: nova_api
+ nova::db::mysql_api::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
keystone:
nova::keystone::auth::tenant: 'service'
nova::keystone::auth::public_url: {get_param: [EndpointMap, NovaPublic, uri]}
@@ -144,18 +178,90 @@ outputs:
nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
nova::keystone::auth::password: {get_param: NovaPassword}
nova::keystone::auth::region: {get_param: KeystoneRegion}
- mysql:
- nova::db::mysql::password: {get_param: NovaPassword}
- nova::db::mysql::user: nova
- nova::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- nova::db::mysql::dbname: nova
- nova::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
- nova::db::mysql_api::password: {get_param: NovaPassword}
- nova::db::mysql_api::user: nova_api
- nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- nova::db::mysql_api::dbname: nova_api
- nova::db::mysql_api::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
+ # Temporarily disable Nova API deployed in WSGI
+ # https://bugs.launchpad.net/nova/+bug/1661360
+ # metadata_settings:
+ # get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: get bootstrap nodeid
+ tags: common
+ command: hiera bootstrap_nodeid
+ register: bootstrap_node
+ - name: set is_bootstrap_node fact
+ tags: common
+ set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}}
+ - name: Extra migration for nova tripleo/+bug/1656791
+ tags: step0,pre-upgrade
+ when: is_bootstrap_node
+ command: nova-manage db online_data_migrations
+ - name: Stop and disable nova_api service (pre-upgrade not under httpd)
+ tags: step2
+ service: name=openstack-nova-api state=stopped enabled=no
+ - name: update nova api
+ tags: step2
+ yum: name=openstack-nova-api state=latest
+ - name: Create puppet manifest to set transport_url in nova.conf
+ tags: step5
+ when: is_bootstrap_node
+ copy:
+ dest: /root/nova-api_upgrade_manifest.pp
+ mode: 0600
+ content: >
+ $transport_url = os_transport_url({
+ 'transport' => hiera('messaging_service_name', 'rabbit'),
+ 'hosts' => any2array(hiera('rabbitmq_node_names', undef)),
+ 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
+ 'username' => hiera('nova::rabbit_userid', 'guest'),
+ 'password' => hiera('nova::rabbit_password'),
+ 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
+ })
+ oslo::messaging::default { 'nova_config':
+ transport_url => $transport_url
+ }
+ - name: Run puppet apply to set tranport_url in nova.conf
+ tags: step5
+ when: is_bootstrap_node
+ command: puppet apply --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+ register: puppet_apply_nova_api_upgrade
+ failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
+ changed_when: puppet_apply_nova_api_upgrade.rc == 2
+ - name: Setup cell_v2 (map cell0)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 map_cell0
+ - name: Setup cell_v2 (create default cell)
+ tags: step5
+ when: is_bootstrap_node
+ # (owalsh) puppet-nova expects the cell name 'default'
+ # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
+ shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
+ register: nova_api_create_cell
+ failed_when: nova_api_create_cell.rc not in [0,2]
+ changed_when: nova_api_create_cell.rc == 0
+ - name: Setup cell_v2 (sync nova/cell DB)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage db sync
+ async: {get_param: NovaDbSyncTimeout}
+ poll: 10
+ - name: Setup cell_v2 (migrate hosts)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 map_cell_and_hosts
+ - name: Setup cell_v2 (get cell uuid)
+ tags: step5
+ when: is_bootstrap_node
+ shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
+ register: nova_api_cell_uuid
+ - name: Setup cell_v2 (migrate instances)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
+ - name: Sync nova_api DB
+ tags: step5
+ command: nova-manage api_db sync
+ when: is_bootstrap_node
+ - name: Online data migration for nova
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage db online_data_migrations
diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml
index 20bf2e42..ceacb0b2 100644
--- a/puppet/services/nova-base.yaml
+++ b/puppet/services/nova-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Nova base service. Shared for all Nova services.
@@ -18,6 +18,10 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
NovaPassword:
description: The password for the nova service and db account, used by nova-api.
type: string
@@ -66,6 +70,57 @@ parameters:
type: string
description: Nova Compute upgrade level
default: ''
+ NovaCronArchiveDeleteRowsMinute:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Minute
+ default: '1'
+ NovaCronArchiveDeleteRowsHour:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Hour
+ default: '0'
+ NovaCronArchiveDeleteRowsMonthday:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Month Day
+ default: '*'
+ NovaCronArchiveDeleteRowsMonth:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Month
+ default: '*'
+ NovaCronArchiveDeleteRowsWeekday:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Week Day
+ default: '*'
+ NovaCronArchiveDeleteRowsMaxRows:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Max Rows
+ default: '100'
+ NovaCronArchiveDeleteRowsUser:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - User
+ default: 'nova'
+ NovaCronArchiveDeleteRowsDestination:
+ type: string
+ description: >
+ Cron to move deleted instances to another table - Log destination
+ default: '/var/log/nova/nova-rowsflush.log'
+ NovaCronArchiveDeleteRowsUntilComplete:
+ type: boolean
+ description: >
+ Cron to move deleted instances to another table - Until complete
+ default: false
+ NovaPlacementAPIInterface:
+ type: string
+ description: >
+ Endpoint interface to be used for the placement API.
+ default: 'internal'
+
conditions:
compute_upgrade_level_empty: {equals : [{get_param: UpgradeLevelNovaCompute}, '']}
@@ -81,6 +136,11 @@ outputs:
nova::rabbit_userid: {get_param: RabbitUserName}
nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
nova::rabbit_port: {get_param: RabbitClientPort}
+ nova::placement::project_name: 'service'
+ nova::placement::password: {get_param: NovaPassword}
+ nova::placement::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ nova::placement::os_region_name: {get_param: KeystoneRegion}
+ nova::placement::os_interface: {get_param: NovaPlacementAPIInterface}
nova::database_connection:
list_join:
- ''
@@ -90,6 +150,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/nova'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
nova::api_database_connection:
list_join:
- ''
@@ -99,10 +160,22 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/nova_api'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ nova::placement_database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://nova_placement:'
+ - {get_param: NovaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/nova_placement'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
nova::debug: {get_param: Debug}
nova::purge_config: {get_param: EnableConfigPurge}
nova::network::neutron::neutron_project_name: 'service'
nova::network::neutron::neutron_username: 'neutron'
+ nova::network::neutron::neutron_region_name: {get_param: KeystoneRegion}
nova::network::neutron::dhcp_domain: ''
nova::network::neutron::neutron_password: {get_param: NeutronPassword}
nova::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
@@ -118,8 +191,27 @@ outputs:
nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
nova::use_ipv6: {get_param: NovaIPv6}
nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge}
- -
+ nova::cron::archive_deleted_rows::minute: {get_param: NovaCronArchiveDeleteRowsMinute}
+ nova::cron::archive_deleted_rows::hour: {get_param: NovaCronArchiveDeleteRowsHour}
+ nova::cron::archive_deleted_rows::monthday: {get_param: NovaCronArchiveDeleteRowsMonthday}
+ nova::cron::archive_deleted_rows::month: {get_param: NovaCronArchiveDeleteRowsMonth}
+ nova::cron::archive_deleted_rows::weekday: {get_param: NovaCronArchiveDeleteRowsWeekday}
+ nova::cron::archive_deleted_rows::max_rows: {get_param: NovaCronArchiveDeleteRowsMaxRows}
+ nova::cron::archive_deleted_rows::user: {get_param: NovaCronArchiveDeleteRowsUser}
+ nova::cron::archive_deleted_rows::destination: {get_param: NovaCronArchiveDeleteRowsDestination}
+ nova::cron::archive_deleted_rows::until_complete: {get_param: NovaCronArchiveDeleteRowsUntilComplete}
+ -
if:
- compute_upgrade_level_empty
- {}
- nova::upgrade_level_compute: {get_param: UpgradeLevelNovaCompute}
+ service_config_settings:
+ mysql:
+ # NOTE(aschultz): this should be configurable if/when we support more
+ # complex cell v2 configurations. For now, this is the default cell
+ # created for the cell v2 configuration
+ nova::db::mysql_api::setup_cell0: true
+ nova::rabbit_password: {get_param: RabbitPassword}
+ nova::rabbit_userid: {get_param: RabbitUserName}
+ nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ nova::rabbit_port: {get_param: RabbitClientPort}
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index 908b676e..9923e833 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova Compute service configured with Puppet
@@ -75,6 +75,10 @@ parameters:
default:
tag: openstack.nova.compute
path: /var/log/nova/nova-compute.log
+ UpgradeLevelNovaCompute:
+ type: string
+ description: Nova Compute upgrade level
+ default: auto
resources:
NovaBase:
@@ -141,3 +145,24 @@ outputs:
# We'll probably treat it like we do with Neutron plugins.
# Until then, just include it in the default nova-compute role.
include tripleo::profile::base::nova::compute::libvirt
+ service_config_settings:
+ collectd:
+ tripleo.collectd.plugins.nova_compute:
+ - virt
+ collectd::plugins::virt::connection: "qemu:///system"
+ upgrade_tasks:
+ - name: Stop nova-compute service
+ tags: step2
+ service: name=openstack-nova-compute state=stopped
+ # If not already set by puppet (e.g a pre-ocata version), set the
+ # upgrade_level for compute to "auto"
+ - name: Set compute upgrade level to auto
+ tags: step3
+ ini_file:
+ str_replace:
+ template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
+ params:
+ LEVEL: {get_param: UpgradeLevelNovaCompute}
+ - name: Start nova-compute service
+ tags: step6
+ service: name=openstack-nova-compute state=started
diff --git a/puppet/services/nova-conductor.yaml b/puppet/services/nova-conductor.yaml
index a10d9560..7b086536 100644
--- a/puppet/services/nova-conductor.yaml
+++ b/puppet/services/nova-conductor.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Nova Conductor service configured with Puppet
@@ -30,6 +30,10 @@ parameters:
default:
tag: openstack.nova.scheduler
path: /var/log/nova/nova-scheduler.log
+ UpgradeLevelNovaCompute:
+ type: string
+ description: Nova Compute upgrade level
+ default: auto
conditions:
nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
@@ -61,3 +65,19 @@ outputs:
- nova::conductor::workers: {get_param: NovaWorkers}
step_config: |
include tripleo::profile::base::nova::conductor
+ upgrade_tasks:
+ - name: Stop nova_conductor service
+ tags: step2
+ service: name=openstack-nova-conductor state=stopped
+ - name: update nova conductor
+ tags: step2
+ yum: name=openstack-nova-conductor state=latest
+ # If not already set by puppet (e.g a pre-ocata version), set the
+ # upgrade_level for compute to "auto"
+ - name: Set compute upgrade level to auto
+ tags: step3
+ ini_file:
+ str_replace:
+ template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
+ params:
+ LEVEL: {get_param: UpgradeLevelNovaCompute}
diff --git a/puppet/services/nova-consoleauth.yaml b/puppet/services/nova-consoleauth.yaml
index 85e60420..b5a1312a 100644
--- a/puppet/services/nova-consoleauth.yaml
+++ b/puppet/services/nova-consoleauth.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova Consoleauth service configured with Puppet
@@ -48,3 +48,7 @@ outputs:
get_attr: [NovaBase, role_data, config_settings]
step_config: |
include tripleo::profile::base::nova::consoleauth
+ upgrade_tasks:
+ - name: Stop nova_consoleauth service
+ tags: step2
+ service: name=openstack-nova-consoleauth state=stopped
diff --git a/puppet/services/nova-ironic.yaml b/puppet/services/nova-ironic.yaml
index bf7639dd..5eb2170a 100644
--- a/puppet/services/nova-ironic.yaml
+++ b/puppet/services/nova-ironic.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova Compute service configured with Puppet and using Ironic
@@ -42,10 +42,10 @@ outputs:
- nova::compute::force_config_drive: true
nova::compute::reserved_host_memory: '0'
nova::compute::vnc_enabled: false
- nova::ironic::common::admin_password: {get_param: IronicPassword}
- nova::ironic::common::admin_tenant_name: 'service'
- nova::ironic::common::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri]}
- nova::ironic::common::admin_username: 'ironic'
+ nova::ironic::common::password: {get_param: IronicPassword}
+ nova::ironic::common::project_name: 'service'
+ nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ nova::ironic::common::username: 'ironic'
nova::ironic::common::api_endpoint: {get_param: [EndpointMap, IronicInternal, uri]}
nova::network::neutron::dhcp_domain: ''
nova::scheduler::filter::scheduler_host_manager: 'ironic_host_manager'
diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml
index 70774bac..faf1ae48 100644
--- a/puppet/services/nova-libvirt.yaml
+++ b/puppet/services/nova-libvirt.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Libvirt service configured with Puppet
@@ -21,6 +21,14 @@ parameters:
NovaComputeLibvirtType:
type: string
default: kvm
+ LibvirtEnabledPerfEvents:
+ type: comma_delimited_list
+ default: []
+ description: This is a performance event list which could be used as monitor.
+ For example - ``enabled_perf_events = cmt, mbml, mbmt``
+ The supported events list can be found in
+ https://libvirt.org/html/libvirt-libvirt-domain.html ,
+ which you may need to search key words ``VIR_PERF_PARAM_*``
MonitoringSubscriptionNovaLibvirt:
default: 'overcloud-nova-libvirt'
type: string
@@ -50,6 +58,11 @@ outputs:
tripleo::profile::base::nova::libvirt_enabled: true
nova::compute::libvirt::services::libvirt_virt_type: {get_param: NovaComputeLibvirtType}
nova::compute::libvirt::libvirt_virt_type: {get_param: NovaComputeLibvirtType}
+ nova::compute::libvirt::libvirt_enabled_perf_events: {get_param: LibvirtEnabledPerfEvents}
+ nova::compute::libvirt::qemu::configure_qemu: true
+ nova::compute::libvirt::qemu::max_files: 32768
+ nova::compute::libvirt::qemu::max_processes: 131072
+ nova::compute::libvirt::vncserver_listen: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
tripleo.nova_libvirt.firewall_rules:
'200 nova_libvirt':
dport:
diff --git a/puppet/services/nova-metadata.yaml b/puppet/services/nova-metadata.yaml
index 40931da6..376f95b1 100644
--- a/puppet/services/nova-metadata.yaml
+++ b/puppet/services/nova-metadata.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Nova API service configured with Puppet
diff --git a/puppet/services/nova-placement.yaml b/puppet/services/nova-placement.yaml
new file mode 100644
index 00000000..3ae19a67
--- /dev/null
+++ b/puppet/services/nova-placement.yaml
@@ -0,0 +1,133 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Nova Placement API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NovaWorkers:
+ default: 0
+ description: Number of workers for Nova Placement API service.
+ type: number
+ NovaPassword:
+ description: The password for the nova service and db account, used by nova-placement.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ MonitoringSubscriptionNovaPlacement:
+ default: 'overcloud-nova-placement'
+ type: string
+ NovaPlacementLoggingSource:
+ type: json
+ default:
+ tag: openstack.nova.placement
+ path: /var/log/httpd/nova_placement_wsgi_error_ssl.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
+
+resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
+ NovaBase:
+ type: ./nova-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Placement API service.
+ value:
+ service_name: nova_placement
+ monitoring_subscription: {get_param: MonitoringSubscriptionNovaPlacement}
+ logging_source: {get_param: NovaPlacementLoggingSource}
+ logging_groups:
+ - nova
+ config_settings:
+ map_merge:
+ - get_attr: [NovaBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.nova_placement.firewall_rules:
+ '138 nova_placement':
+ dport:
+ - 8778
+ - 13778
+ nova::keystone::authtoken::project_name: 'service'
+ nova::keystone::authtoken::password: {get_param: NovaPassword}
+ nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ nova::wsgi::apache_placement::api_port: '8778'
+ nova::wsgi::apache_placement::ssl: {get_param: EnableInternalTLS}
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ nova::wsgi::apache_placement::bind_host: {get_param: [ServiceNetMap, NovaPlacementNetwork]}
+ nova::wsgi::apache_placement::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NovaPlacementNetwork]}
+ -
+ if:
+ - nova_workers_zero
+ - {}
+ - nova::wsgi::apache_placement::workers: {get_param: NovaWorkers}
+ step_config: |
+ include tripleo::profile::base::nova::placement
+ service_config_settings:
+ keystone:
+ nova::keystone::auth_placement::tenant: 'service'
+ nova::keystone::auth_placement::public_url: {get_param: [EndpointMap, NovaPlacementPublic, uri]}
+ nova::keystone::auth_placement::internal_url: {get_param: [EndpointMap, NovaPlacementInternal, uri]}
+ nova::keystone::auth_placement::admin_url: {get_param: [EndpointMap, NovaPlacementAdmin, uri]}
+ nova::keystone::auth_placement::password: {get_param: NovaPassword}
+ nova::keystone::auth_placement::region: {get_param: KeystoneRegion}
+ mysql:
+ map_merge:
+ - {get_attr: [NovaBase, role_data, service_config_settings, mysql]}
+ - nova::db::mysql_placement::password: {get_param: NovaPassword}
+ nova::db::mysql_placement::user: nova_placement
+ nova::db::mysql_placement::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ nova::db::mysql_placement::dbname: nova_placement
+ nova::db::mysql_placement::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
+ upgrade_tasks:
+ - name: Stop nova_placement service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped
+ # The nova placement API isn't installed in newton images, so install
+ # it on upgrade
+ - name: Install nova-placement packages on upgrade
+ tags: step3
+ yum: name=openstack-nova-placement-api state=latest
diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml
index d4e5fff6..0e0b9d1e 100644
--- a/puppet/services/nova-scheduler.yaml
+++ b/puppet/services/nova-scheduler.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova Scheduler service configured with Puppet
@@ -63,3 +63,10 @@ outputs:
nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters}
step_config: |
include tripleo::profile::base::nova::scheduler
+ upgrade_tasks:
+ - name: Stop nova_scheduler service
+ tags: step2
+ service: name=openstack-nova-scheduler state=stopped
+ - name: update nova scheduler
+ tags: step2
+ yum: name=openstack-nova-scheduler state=latest
diff --git a/puppet/services/nova-vnc-proxy.yaml b/puppet/services/nova-vnc-proxy.yaml
index e6b0703f..f6cf9649 100644
--- a/puppet/services/nova-vnc-proxy.yaml
+++ b/puppet/services/nova-vnc-proxy.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Nova Vncproxy service configured with Puppet
@@ -64,3 +64,7 @@ outputs:
- 13080
step_config: |
include tripleo::profile::base::nova::vncproxy
+ upgrade_tasks:
+ - name: Stop nova_vnc_proxy service
+ tags: step2
+ service: name=openstack-nova-consoleauth state=stopped
diff --git a/puppet/services/octavia-api.yaml b/puppet/services/octavia-api.yaml
new file mode 100644
index 00000000..909a3030
--- /dev/null
+++ b/puppet/services/octavia-api.yaml
@@ -0,0 +1,98 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Octavia API service.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OctaviaPassword:
+ description: The password for the Octavia's database account.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ MonitoringSubscriptionOctaviaApi:
+ default: 'overcloud-octavia-api'
+ type: string
+ OctaviaApiLoggingSource:
+ type: json
+ default:
+ tag: openstack.octavia.api
+ path: /var/log/octavia/api.log
+
+resources:
+
+ OctaviaBase:
+ type: ./octavia-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Octavia API service.
+ value:
+ service_name: octavia_api
+ monitoring_subscription: {get_param: MonitoringSubscriptionOctaviaApi}
+ logging_source: {get_param: OctaviaApiLoggingSource}
+ logging_groups:
+ - octavia
+ config_settings:
+ map_merge:
+ - get_attr: [OctaviaBase, role_data, config_settings]
+ - octavia::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ octavia::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://octavia:'
+ - {get_param: OctaviaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/octavia'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ octavia::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ octavia::keystone::authtoken::project_name: 'service'
+ octavia::keystone::authtoken::password: {get_param: OctaviaPassword}
+ octavia::api::sync_db: true
+ tripleo.octavia_api.firewall_rules:
+ '120 octavia api':
+ dport:
+ - 9876
+ - 13876
+ octavia::api::host: {get_param: [ServiceNetMap, OctaviaApiNetwork]}
+ neutron::server::service_providers: ['LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default']
+ step_config: |
+ include tripleo::profile::base::octavia::api
+ service_config_settings:
+ keystone:
+ octavia::keystone::auth::tenant: 'service'
+ octavia::keystone::auth::public_url: {get_param: [EndpointMap, OctaviaPublic, uri]}
+ octavia::keystone::auth::internal_url: { get_param: [ EndpointMap, OctaviaInternal, uri ] }
+ octavia::keystone::auth::admin_url: { get_param: [ EndpointMap, OctaviaAdmin, uri ] }
+ octavia::keystone::auth::password: {get_param: OctaviaPassword}
+ octavia::keystone::auth::region: {get_param: KeystoneRegion}
+ mysql:
+ octavia::db::mysql::password: {get_param: OctaviaPassword}
+ octavia::db::mysql::user: octavia
+ octavia::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ octavia::db::mysql::dbname: octavia
+ octavia::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/octavia-base.yaml b/puppet/services/octavia-base.yaml
new file mode 100644
index 00000000..b537a2bc
--- /dev/null
+++ b/puppet/services/octavia-base.yaml
@@ -0,0 +1,62 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Octavia base service. Shared for all Octavia services
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ Debug:
+ type: string
+ default: ''
+ description: Set to True to enable debugging on all services.
+ EnableConfigPurge:
+ type: boolean
+ default: true
+ description: >
+ Remove configuration that is not generated by TripleO. Setting
+ to false may result in configuration remnants after updates/upgrades.
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+
+outputs:
+ role_data:
+ description: Base role data for Octavia services
+ value:
+ service_name: octavia_base
+ config_settings:
+ octavia::debug: {get_param: Debug}
+ octavia::purge_config: {get_param: EnableConfigPurge}
+ octavia::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ tripleo::profile::base::octavia::rabbit_user: {get_param: RabbitUserName}
+ tripleo::profile::base::octavia::rabbit_password: {get_param: RabbitPassword}
+ tripleo::profile::base::octavia::rabbit_port: {get_param: RabbitClientPort}
+
diff --git a/puppet/services/octavia-health-manager.yaml b/puppet/services/octavia-health-manager.yaml
new file mode 100644
index 00000000..51d32f23
--- /dev/null
+++ b/puppet/services/octavia-health-manager.yaml
@@ -0,0 +1,61 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Octavia Health Manager service.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MonitoringSubscriptionOctaviaHealthManager:
+ default: 'overcloud-octavia-health-manager'
+ type: string
+ OctaviaHealthManagerLoggingSource:
+ type: json
+ default:
+ tag: openstack.octavia.health-manager
+ path: /var/log/octavia/health-manager.log
+ OctaviaHeartbeatKey:
+ type: string
+ description: Key to identify heartbeat messages for amphorae.
+ hidden: true
+
+resources:
+
+ OctaviaBase:
+ type: ./octavia-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Octavia Health Manager service.
+ value:
+ service_name: octavia_health_manager
+ monitoring_subscription: {get_param: MonitoringSubscriptionOctaviaHealthManager}
+ logging_source: {get_param: OctaviaHealthManagerLoggingSource}
+ logging_groups:
+ - octavia
+ config_settings:
+ map_merge:
+ - get_attr: [OctaviaBase, role_data, config_settings]
+ - octavia::health_manager::heartbeat_key: {get_param: OctaviaHeartbeatKey}
+ octavia::health_manager::event_streamer_driver: 'queue_event_streamer'
+ step_config: |
+ include tripleo::profile::base::octavia::health_manager
+
+
+
diff --git a/puppet/services/octavia-housekeeping.yaml b/puppet/services/octavia-housekeeping.yaml
new file mode 100644
index 00000000..84c33433
--- /dev/null
+++ b/puppet/services/octavia-housekeeping.yaml
@@ -0,0 +1,70 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Octavia Housekeeping service.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OctaviaAmphoraExpiryAge:
+ default: 0
+ description: The interval in seconds after which an unused Amphora will
+ be considered expired and cleaned up. If left to 0, the
+ configuration will not be set and the system will use
+ the service defaults.
+ type: number
+ MonitoringSubscriptionOctaviaHousekeeping:
+ default: 'overcloud-octavia-housekeeping'
+ type: string
+ OctaviaHousekeepingLoggingSource:
+ type: json
+ default:
+ tag: openstack.octavia.housekeeping
+ path: /var/log/octavia/housekeeping.log
+
+conditions:
+ amphora_expiry_is_zero: {equals: [{get_param: OctaviaAmphoraExpiryAge}, 0]}
+
+
+resources:
+
+ OctaviaBase:
+ type: ./octavia-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Octavia Housekeeping service.
+ value:
+ service_name: octavia_housekeeping
+ monitoring_subscription: {get_param: MonitoringSubscriptionOctaviaHousekeeping}
+ logging_source: {get_param: OctaviaHousekeepingLoggingSource}
+ logging_groups:
+ - octavia
+ config_settings:
+ map_merge:
+ - get_attr: [OctaviaBase, role_data, config_settings]
+ -
+ if:
+ - amphora_expiry_is_zero
+ - {}
+ - octavia::worker::amphora_expiry_age: {get_param: OctaviaAmphoraExpiryAge}
+ step_config: |
+ include tripleo::profile::base::octavia::housekeeping
+
+
diff --git a/puppet/services/octavia-worker.yaml b/puppet/services/octavia-worker.yaml
new file mode 100644
index 00000000..9212b76b
--- /dev/null
+++ b/puppet/services/octavia-worker.yaml
@@ -0,0 +1,102 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Octavia Worker service.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MonitoringSubscriptionOctaviaWorker:
+ default: 'overcloud-octavia-worker'
+ type: string
+ OctaviaWorkerLoggingSource:
+ type: json
+ default:
+ tag: openstack.octavia.worker
+ path: /var/log/octavia/worker.log
+ OctaviaAmphoraImageTag:
+ default: ''
+ description: Glance image tag for identifying the amphora image.
+ type: string
+ OctaviaAmphoraNetworkList:
+ default: []
+ description: List of networks to attach to amphorae.
+ type: comma_delimited_list
+ OctaviaLoadBalancerTopology:
+ default: ''
+ description: Load balancer topology configuration.
+ type: string
+ OctaviaFlavorId:
+ default: 65
+ description: Nova flavor ID to be used when creating the nova flavor for
+ amphora.
+ type: number
+ OctaviaFlavorProperties:
+ default: {}
+ description: Dictionary describing the nova flavor for amphora.
+ type: json
+ OctaviaManageNovaFlavor:
+ default: false
+ description: Configure the nova flavor for the amphora.
+ type: boolean
+ OctaviaSSHKeyName:
+ default: 'octavia-ssh-key'
+ description: name for ssh key to be configured so the amphora can
+ be logged into.
+ type: string
+
+conditions:
+ octavia_topology_unset: {equals : [{get_param: OctaviaLoadBalancerTopology}, ""]}
+ octavia_amphora_tag_unset: {equals: [{get_param: OctaviaAmphoraImageTag}, ""]}
+
+resources:
+
+ OctaviaBase:
+ type: ./octavia-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Octavia WoWorker service.
+ value:
+ service_name: octavia_worker
+ monitoring_subscription: {get_param: MonitoringSubscriptionOctaviaWorker}
+ logging_source: {get_param: OctaviaWorkerLoggingSource}
+ logging_groups:
+ -octavia
+ config_settings:
+ map_merge:
+ - get_attr: [OctaviaBase, role_data, config_settings]
+ - octavia::worker::amp_boot_network_list: {get_param: OctaviaAmphoraNetworkList}
+ octavia::worker::amp_flavor_id: {get_param: OctaviaFlavorId}
+ octavia::worker::nova_flavor_config: {get_param: OctaviaFlavorProperties}
+ octavia::worker::manage_nova_flavor: {get_param: OctaviaManageNovaFlavor}
+ octavia::worker::ssh_key_name: {get_param: OctaviaSSHKeyName}
+ -
+ if:
+ - octavia_amphora_tag_unset
+ - {}
+ - octavia::worker::amp_image_tag: {get_param: OctaviaAmphoraImageTag}
+ -
+ if:
+ - octavia_topology_unset
+ - {}
+ - octavia::worker::loadbalancer_topology: {get_param: OctaviaLoadBalancerTopology}
+ step_config: |
+ include tripleo::profile::base::octavia::worker
+
diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml
index 253d63ef..0ed9d206 100644
--- a/puppet/services/opendaylight-api.yaml
+++ b/puppet/services/opendaylight-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenDaylight SDN Controller.
@@ -17,10 +17,6 @@ parameters:
type: string
description: The password for the opendaylight server.
hidden: true
- OpenDaylightEnableL3:
- description: Knob to enable/disable ODL L3
- type: string
- default: 'no'
OpenDaylightEnableDHCP:
description: Knob to enable/disable ODL DHCP Server
type: boolean
@@ -56,9 +52,14 @@ outputs:
opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
opendaylight::username: {get_param: OpenDaylightUsername}
opendaylight::password: {get_param: OpenDaylightPassword}
- opendaylight::enable_l3: {get_param: OpenDaylightEnableL3}
opendaylight::extra_features: {get_param: OpenDaylightFeatures}
opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
+ tripleo.opendaylight_api.firewall_rules:
+ '137 opendaylight api':
+ dport:
+ - {get_param: OpenDaylightPort}
+ - 6640
+ - 6653
step_config: |
include tripleo::profile::base::neutron::opendaylight
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index 907ecddc..cfec3c48 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenDaylight OVS Configuration.
@@ -8,6 +8,15 @@ parameters:
default: 8081
description: Set opendaylight service port
type: number
+ OpenDaylightUsername:
+ default: 'admin'
+ description: The username for the opendaylight server.
+ type: string
+ OpenDaylightPassword:
+ default: 'admin'
+ type: string
+ description: The password for the opendaylight server.
+ hidden: true
OpenDaylightConnectionProtocol:
description: L7 protocol used for REST access
type: string
@@ -46,6 +55,8 @@ outputs:
service_name: opendaylight_ovs
config_settings:
opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
+ opendaylight::username: {get_param: OpenDaylightUsername}
+ opendaylight::password: {get_param: OpenDaylightPassword}
opendaylight_check_url: {get_param: OpenDaylightCheckURL}
opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
diff --git a/puppet/services/ovn-dbs.yaml b/puppet/services/ovn-dbs.yaml
new file mode 100644
index 00000000..7f81afde
--- /dev/null
+++ b/puppet/services/ovn-dbs.yaml
@@ -0,0 +1,40 @@
+heat_template_version: ocata
+
+description: >
+ OVN databases configured with puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OVNNorthboundServerPort:
+ description: Port of the OVN Northbound DB server
+ type: number
+ default: 6641
+ OVNSouthboundServerPort:
+ description: Port of the OVN Southbound DB server
+ type: number
+ default: 6642
+
+outputs:
+ role_data:
+ description: Role data for the OVN northd service
+ value:
+ service_name: ovn_dbs
+ config_settings:
+ ovn::northbound::port: {get_param: OVNNorthboundServerPort}
+ ovn::southbound::port: {get_param: OVNSouthboundServerPort}
+ ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]}
+ step_config: |
+ include ::tripleo::profile::base::neutron::ovn_northd
diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml
index abfb9c80..ca21cfbe 100644
--- a/puppet/services/pacemaker.yaml
+++ b/puppet/services/pacemaker.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Pacemaker service configured with Puppet
@@ -29,11 +29,22 @@ parameters:
default: false
description: Whether to enable fencing in Pacemaker or not.
type: boolean
+ PacemakerRemoteAuthkey:
+ type: string
+ description: The authkey for the pacemaker remote service.
+ hidden: true
+ default: ''
PcsdPassword:
type: string
description: The password for the 'pcsd' user for pacemaker.
hidden: true
default: ''
+ CorosyncSettleTries:
+ type: number
+ description: Number of tries for cluster settling. This has the
+ same default as the pacemaker puppet module. Override
+ to a smaller value when in need to replace a controller node.
+ default: 360
FencingConfig:
default: {}
description: |
@@ -76,6 +87,10 @@ parameters:
\[(?<pid>[^ ]*)\]
(?<host>[^ ]*)
(?<message>.*)$/
+ PacemakerResources:
+ type: comma_delimited_list
+ description: List of resources managed by pacemaker
+ default: ['rabbitmq','haproxy']
outputs:
role_data:
@@ -92,6 +107,7 @@ outputs:
pacemaker::resource_defaults::defaults:
resource-stickiness: { value: INFINITY }
corosync_token_timeout: 10000
+ pacemaker::corosync::settle_tries: {get_param: CorosyncSettleTries}
tripleo.pacemaker.firewall_rules:
'130 pacemaker tcp':
proto: 'tcp'
@@ -112,5 +128,20 @@ outputs:
passwords:
- {get_param: PcsdPassword}
- {get_param: [DefaultPasswords, pcsd_password]}
+ tripleo::profile::base::pacemaker::remote_authkey: {get_param: PacemakerRemoteAuthkey}
step_config: |
include ::tripleo::profile::base::pacemaker
+ upgrade_tasks:
+ - name: Check pacemaker cluster running before upgrade
+ tags: step0,validation
+ pacemaker_cluster: state=online check_and_fail=true
+ - name: Stop pacemaker cluster
+ tags: step1
+ pacemaker_cluster: state=offline
+ - name: Start pacemaker cluster
+ tags: step4
+ pacemaker_cluster: state=online
+ - name: Check pacemaker resource
+ tags: step4
+ pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=200
+ with_items: {get_param: PacemakerResources}
diff --git a/puppet/services/pacemaker/ceilometer-agent-central.yaml b/puppet/services/pacemaker/ceilometer-agent-central.yaml
deleted file mode 100644
index 5dcb62ca..00000000
--- a/puppet/services/pacemaker/ceilometer-agent-central.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Ceilometer Central Agent service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- MonitoringSubscriptionCeilometerCentral:
- default: 'overcloud-ceilometer-agent-central'
- type: string
-
-resources:
- CeilometerServiceBase:
- type: ../ceilometer-agent-central.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Ceilometer Central Agent pacemaker role.
- value:
- service_name: ceilometer_agent_central
- monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCentral}
- config_settings:
- map_merge:
- - get_attr: [CeilometerServiceBase, role_data, config_settings]
- - ceilometer::agent::central::manage_service: false
- ceilometer::agent::central::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::ceilometer::agent::central
diff --git a/puppet/services/pacemaker/ceilometer-agent-notification.yaml b/puppet/services/pacemaker/ceilometer-agent-notification.yaml
deleted file mode 100644
index dbe14499..00000000
--- a/puppet/services/pacemaker/ceilometer-agent-notification.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Ceilometer Notification Agent service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- MonitoringSubscriptionCeilometerNotification:
- default: 'overcloud-ceilometer-agent-notification'
- type: string
-
-resources:
- CeilometerServiceBase:
- type: ../ceilometer-agent-notification.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Ceilometer Notification Agent pacemaker role.
- value:
- service_name: ceilometer_agent_notification
- monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerNotification}
- config_settings:
- map_merge:
- - get_attr: [CeilometerServiceBase, role_data, config_settings]
- - ceilometer::agent::notification::manage_service: false
- ceilometer::agent::notification::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::ceilometer::agent::notification
diff --git a/puppet/services/pacemaker/ceilometer-collector.yaml b/puppet/services/pacemaker/ceilometer-collector.yaml
deleted file mode 100644
index 4c919515..00000000
--- a/puppet/services/pacemaker/ceilometer-collector.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Ceilometer Collector service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- MonitoringSubscriptionCeilometerCollector:
- default: 'overcloud-ceilometer-collector'
- type: string
-
-resources:
- CeilometerServiceBase:
- type: ../ceilometer-collector.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Ceilometer Collector pacemaker role.
- value:
- service_name: ceilometer_collector
- monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCollector}
- config_settings:
- map_merge:
- - get_attr: [CeilometerServiceBase, role_data, config_settings]
- - ceilometer::collector::manage_service: false
- ceilometer::collector::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::ceilometer::collector
diff --git a/puppet/services/pacemaker/gnocchi-api.yaml b/puppet/services/pacemaker/ceph-rbdmirror.yaml
index 6a9161fa..7686028d 100644
--- a/puppet/services/pacemaker/gnocchi-api.yaml
+++ b/puppet/services/pacemaker/ceph-rbdmirror.yaml
@@ -1,7 +1,7 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- Gnocchi service configured with Puppet
+ Ceph RBD mirror service.
parameters:
ServiceNetMap:
@@ -18,13 +18,13 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- MonitoringSubscriptionGnocchiApi:
- default: 'overcloud-gnocchi-api'
+ CephClientUserName:
+ default: openstack
type: string
resources:
- GnocchiServiceBase:
- type: ../gnocchi-api.yaml
+ CephBase:
+ type: ../ceph-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
@@ -32,14 +32,16 @@ resources:
outputs:
role_data:
- description: Role data for the Gnocchi role.
+ description: Role data for the Ceph RBD mirrror service.
value:
- service_name: gnocchi_api
- monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiApi}
+ service_name: ceph_rbdmirror
config_settings:
map_merge:
- - get_attr: [GnocchiServiceBase, role_data, config_settings]
- - gnocchi::metricd::manage_service: false
- gnocchi::metricd::enabled: false
+ - get_attr: [CephBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::ceph::rbdmirror::client_name: {get_param: CephClientUserName}
+ tripleo.ceph_rbdmirror.firewall_rules:
+ '113 ceph_rbdmirror':
+ dport:
+ - '6800-7300'
step_config: |
- include ::tripleo::profile::pacemaker::gnocchi::api
+ include ::tripleo::profile::pacemaker::ceph::rbdmirror \ No newline at end of file
diff --git a/puppet/services/pacemaker/cinder-api.yaml b/puppet/services/pacemaker/cinder-api.yaml
deleted file mode 100644
index 6823789e..00000000
--- a/puppet/services/pacemaker/cinder-api.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Cinder API service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- CinderApiBase:
- type: ../cinder-api.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Cinder API role.
- value:
- service_name: cinder_api
- monitoring_subscription: {get_attr: [CinderApiBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [CinderApiBase, role_data, logging_source]}
- logging_groups: {get_attr: [CinderApiBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [CinderApiBase, role_data, config_settings]
- - cinder::api::manage_service: false
- cinder::api::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::cinder::api
diff --git a/puppet/services/pacemaker/cinder-backup.yaml b/puppet/services/pacemaker/cinder-backup.yaml
index 2ebc7680..e75ac973 100644
--- a/puppet/services/pacemaker/cinder-backup.yaml
+++ b/puppet/services/pacemaker/cinder-backup.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder Backup service with Pacemaker configured with Puppet
diff --git a/puppet/services/pacemaker/cinder-scheduler.yaml b/puppet/services/pacemaker/cinder-scheduler.yaml
deleted file mode 100644
index 15e44be2..00000000
--- a/puppet/services/pacemaker/cinder-scheduler.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Cinder Scheduler service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- CinderSchedulerBase:
- type: ../cinder-scheduler.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Cinder Scheduler role.
- value:
- service_name: cinder_scheduler
- monitoring_subscription: {get_attr: [CinderSchedulerBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [CinderSchedulerBase, role_data, logging_source]}
- logging_groups: {get_attr: [CinderSchedulerBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [CinderSchedulerBase, role_data, config_settings]
- - cinder::scheduler::manage_service: false
- cinder::scheduler::enabled: false
- step_config:
- include ::tripleo::profile::pacemaker::cinder::scheduler
diff --git a/puppet/services/pacemaker/cinder-volume.yaml b/puppet/services/pacemaker/cinder-volume.yaml
index d91a0181..bef47a57 100644
--- a/puppet/services/pacemaker/cinder-volume.yaml
+++ b/puppet/services/pacemaker/cinder-volume.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Cinder Volume service with Pacemaker configured with Puppet
diff --git a/puppet/services/pacemaker/database/mysql.yaml b/puppet/services/pacemaker/database/mysql.yaml
index f2905903..93bf5967 100644
--- a/puppet/services/pacemaker/database/mysql.yaml
+++ b/puppet/services/pacemaker/database/mysql.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
MySQL with Pacemaker service deployment using puppet
@@ -53,3 +53,9 @@ outputs:
get_param: [ServiceNetMap, MysqlNetwork]
step_config: |
include ::tripleo::profile::pacemaker::database::mysql
+ metadata_settings:
+ get_attr: [MysqlBase, role_data, metadata_settings]
+ upgrade_tasks:
+ - name: Check for galera root password
+ tags: step0
+ file: path=/root/.my.cnf state=file
diff --git a/puppet/services/pacemaker/database/redis.yaml b/puppet/services/pacemaker/database/redis.yaml
index 196754eb..e702d28b 100644
--- a/puppet/services/pacemaker/database/redis.yaml
+++ b/puppet/services/pacemaker/database/redis.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Redis service configured with Puppet
diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml
deleted file mode 100644
index 20a439f6..00000000
--- a/puppet/services/pacemaker/glance-api.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Glance API service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- GlanceFilePcmkDevice:
- default: ''
- description: >
- An exported storage device that should be mounted by Pacemaker
- as Glance storage. Effective when GlanceFilePcmkManage is true.
- type: string
- GlanceFilePcmkFstype:
- default: 'nfs'
- description: >
- Filesystem type for Pacemaker mount used as Glance storage.
- Effective when GlanceFilePcmkManage is true.
- type: string
- GlanceFilePcmkManage:
- default: false
- description: >
- Whether to make Glance file backend a mount managed by Pacemaker.
- Effective when GlanceBackend is 'file'.
- type: boolean
- GlanceFilePcmkOptions:
- default: ''
- description: >
- Mount options for Pacemaker mount used as Glance storage.
- Effective when GlanceFilePcmkManage is true.
- type: string
-
-resources:
-
- GlanceApiBase:
- type: ../glance-api.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Glance role.
- value:
- service_name: glance_api
- monitoring_subscription: {get_attr: [GlanceApiBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [GlanceApiBase, role_data, logging_source]}
- logging_groups: {get_attr: [GlanceApiBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [GlanceApiBase, role_data, config_settings]
- - glance_file_pcmk_device: {get_param: GlanceFilePcmkDevice}
- glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype}
- glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage}
- glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions}
- glance_file_pcmk_directory: '/var/lib/glance/images'
- glance::api::manage_service: false
- glance::api::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::glance
diff --git a/puppet/services/pacemaker/glance-registry.yaml b/puppet/services/pacemaker/glance-registry.yaml
deleted file mode 100644
index 41f89fdd..00000000
--- a/puppet/services/pacemaker/glance-registry.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Glance Registry service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- GlanceRegistryBase:
- type: ../glance-registry.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Glance role.
- value:
- service_name: glance_registry
- monitoring_subscription: {get_attr: [GlanceRegistryBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [GlanceRegistryBase, role_data, logging_source]}
- logging_groups: {get_attr: [GlanceRegistryBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [GlanceRegistryBase, role_data, config_settings]
- - glance::registry::manage_service: false
- glance::registry::enabled: false
- # No puppet manifests since glance-registry is included in
- # ::tripleo::profile::pacemaker::glance which is maintained alongside of
- # pacemaker/glance-api.yaml.
- step_config:
diff --git a/puppet/services/pacemaker/gnocchi-metricd.yaml b/puppet/services/pacemaker/gnocchi-metricd.yaml
deleted file mode 100644
index 0f36b5d5..00000000
--- a/puppet/services/pacemaker/gnocchi-metricd.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Gnocchi service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- MonitoringSubscriptionGnocchiMetricd:
- default: 'overcloud-gnocchi-metricd'
- type: string
-
-resources:
- GnocchiServiceBase:
- type: ../gnocchi-metricd.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Gnocchi role.
- value:
- service_name: gnocchi_metricd
- monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiMetricd}
- config_settings:
- map_merge:
- - get_attr: [GnocchiServiceBase, role_data, config_settings]
- - gnocchi::metricd::manage_service: false
- gnocchi::metricd::enabled: false
- tripleo::profile::pacemaker::gnocchi::gnocchi_indexer_backend: {get_attr: [GnocchiServiceBase, aux_parameters, gnocchi_indexer_backend]}
-
- step_config: |
- include ::tripleo::profile::pacemaker::gnocchi::metricd
diff --git a/puppet/services/pacemaker/gnocchi-statsd.yaml b/puppet/services/pacemaker/gnocchi-statsd.yaml
deleted file mode 100644
index b9afc590..00000000
--- a/puppet/services/pacemaker/gnocchi-statsd.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Gnocchi service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- MonitoringSubscriptionGnocchiStatsd:
- default: 'overcloud-gnocchi-statsd'
- type: string
-
-resources:
- GnocchiServiceBase:
- type: ../gnocchi-statsd.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Gnocchi role.
- value:
- service_name: gnocchi_statsd
- monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiStatsd}
- config_settings:
- map_merge:
- - get_attr: [GnocchiServiceBase, role_data, config_settings]
- - gnocchi::statsd::manage_service: false
- gnocchi::statsd::enabled: false
- tripleo::profile::pacemaker::gnocchi::gnocchi_indexer_backend: {get_attr: [GnocchiServiceBase, aux_parameters, gnocchi_indexer_backend]}
- step_config: |
- include ::tripleo::profile::pacemaker::gnocchi::statsd
diff --git a/puppet/services/pacemaker/haproxy.yaml b/puppet/services/pacemaker/haproxy.yaml
index e4115d64..598deaef 100644
--- a/puppet/services/pacemaker/haproxy.yaml
+++ b/puppet/services/pacemaker/haproxy.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
HAproxy service with Pacemaker configured with Puppet
@@ -40,3 +40,5 @@ outputs:
tripleo::haproxy::mysql_clustercheck: true
step_config: |
include ::tripleo::profile::pacemaker::haproxy
+ metadata_settings:
+ get_attr: [LoadbalancerServiceBase, role_data, metadata_settings]
diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml
deleted file mode 100644
index dd25905b..00000000
--- a/puppet/services/pacemaker/heat-api-cfn.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Openstack Heat CloudFormation API service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
- HeatApiCfnBase:
- type: ../heat-api-cfn.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Heat CloudFormation API role.
- value:
- service_name: heat_api_cfn
- monitoring_subscription: {get_attr: [HeatApiCfnBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [HeatApiCfnBase, role_data, logging_source]}
- logging_groups: {get_attr: [HeatApiCfnBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [HeatApiCfnBase, role_data, config_settings]
- - heat::api_cfn::manage_service: false
- heat::api_cfn::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::heat::api_cfn
diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml
deleted file mode 100644
index 18d2a0d5..00000000
--- a/puppet/services/pacemaker/heat-api-cloudwatch.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Openstack Heat CloudWatch API service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
- HeatApiCloudwatchBase:
- type: ../heat-api-cloudwatch.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Heat Cloudwatch API role.
- value:
- service_name: heat_api_cloudwatch
- monitoring_subscription: {get_attr: [HeatApiCloudwatchBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [HeatApiCloudwatchBase, role_data, logging_source]}
- logging_groups: {get_attr: [HeatApiCloudwatchBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [HeatApiCloudwatchBase, role_data, config_settings]
- - heat::api_cloudwatch::manage_service: false
- heat::api_cloudwatch::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::heat::api_cloudwatch
diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml
deleted file mode 100644
index 43122cb0..00000000
--- a/puppet/services/pacemaker/heat-api.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Openstack Heat API service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
- HeatApiBase:
- type: ../heat-api.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Heat API role.
- value:
- service_name: heat_api
- monitoring_subscription: {get_attr: [HeatApiBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [HeatApiBase, role_data, logging_source]}
- logging_groups: {get_attr: [HeatApiBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [HeatApiBase, role_data, config_settings]
- - heat::api::manage_service: false
- heat::api::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::heat::api
diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml
deleted file mode 100644
index 54bfdad2..00000000
--- a/puppet/services/pacemaker/heat-engine.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Openstack Heat Engine service configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
- HeatEngineBase:
- type: ../heat-engine.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-
-outputs:
- role_data:
- description: Role data for the Heat engine role.
- value:
- service_name: heat_engine
- monitoring_subscription: {get_attr: [HeatEngineBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [HeatEngineBase, role_data, logging_source]}
- logging_groups: {get_attr: [HeatEngineBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [HeatEngineBase, role_data, config_settings]
- - heat::engine::manage_service: false
- heat::engine::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::heat::engine
diff --git a/puppet/services/pacemaker/horizon.yaml b/puppet/services/pacemaker/horizon.yaml
deleted file mode 100644
index 18de23ae..00000000
--- a/puppet/services/pacemaker/horizon.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Horizon service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- HorizonBase:
- type: ../horizon.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Horizon role.
- value:
- service_name: horizon
- monitoring_subscription: {get_attr: [HorizonBase, role_data, monitoring_subscription]}
- config_settings:
- get_attr: [HorizonBase, role_data, config_settings]
- step_config: |
- include ::tripleo::profile::base::horizon
- include ::tripleo::profile::pacemaker::apache
diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml
deleted file mode 100644
index 908b9bbd..00000000
--- a/puppet/services/pacemaker/keystone.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Keystone service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- KeystoneServiceBase:
- type: ../keystone.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Keystone pacemaker role.
- value:
- service_name: keystone
- monitoring_subscription: {get_attr: [KeystoneServiceBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [KeystoneServiceBase, role_data, logging_source]}
- logging_groups: {get_attr: [KeystoneServiceBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [KeystoneServiceBase, role_data, config_settings]
- - keystone::manage_service: false
- keystone::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::keystone
diff --git a/puppet/services/pacemaker/manila-share.yaml b/puppet/services/pacemaker/manila-share.yaml
index cabc31a0..ddc13df3 100644
--- a/puppet/services/pacemaker/manila-share.yaml
+++ b/puppet/services/pacemaker/manila-share.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
The manila-share service with Pacemaker configured with Puppet
diff --git a/puppet/services/pacemaker/memcached.yaml b/puppet/services/pacemaker/memcached.yaml
deleted file mode 100644
index 04b895b6..00000000
--- a/puppet/services/pacemaker/memcached.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- Mecached service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- MemcachedServiceBase:
- type: ../memcached.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Memcached pacemaker role.
- value:
- service_name: memcached
- monitoring_subscription: {get_attr: [MemcachedServiceBase, role_data, monitoring_subscription]}
- config_settings:
- map_merge:
- - get_attr: [MemcachedServiceBase, role_data, config_settings]
- - memcached::service_manage: false
- step_config: |
- include ::tripleo::profile::pacemaker::memcached
diff --git a/puppet/services/pacemaker/neutron-dhcp.yaml b/puppet/services/pacemaker/neutron-dhcp.yaml
deleted file mode 100644
index 7fca73d6..00000000
--- a/puppet/services/pacemaker/neutron-dhcp.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron DHCP service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronDhcpBase:
- type: ../neutron-dhcp.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron DHCP role.
- value:
- service_name: neutron_dhcp
- monitoring_subscription: {get_attr: [NeutronDhcpBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NeutronDhcpBase, role_data, logging_source]}
- logging_groups: {get_attr: [NeutronDhcpBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NeutronDhcpBase, role_data, config_settings]
- - tripleo::profile::pacemaker::neutron::enable_dhcp: True
- neutron::agents::dhcp::enabled: false
- neutron::agents::dhcp::manage_service: false
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::dhcp
diff --git a/puppet/services/pacemaker/neutron-l3.yaml b/puppet/services/pacemaker/neutron-l3.yaml
deleted file mode 100644
index cdb87f50..00000000
--- a/puppet/services/pacemaker/neutron-l3.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron L3 service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronL3Base:
- type: ../neutron-l3.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron L3 role.
- value:
- service_name: neutron_l3
- monitoring_subscription: {get_attr: [NeutronL3Base, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NeutronL3Base, role_data, logging_source]}
- logging_groups: {get_attr: [NeutronL3Base, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NeutronL3Base, role_data, config_settings]
- - tripleo::profile::pacemaker::neutron::enable_l3: True
- neutron::agents::l3::enabled: false
- neutron::agents::l3::manage_service: false
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::l3
diff --git a/puppet/services/pacemaker/neutron-metadata.yaml b/puppet/services/pacemaker/neutron-metadata.yaml
deleted file mode 100644
index 49a31eb5..00000000
--- a/puppet/services/pacemaker/neutron-metadata.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron Metadata service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronMetadataBase:
- type: ../neutron-metadata.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron Metadata role.
- value:
- service_name: neutron_metadata
- monitoring_subscription: {get_attr: [NeutronMetadataBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NeutronMetadataBase, role_data, logging_source]}
- logging_groups: {get_attr: [NeutronMetadataBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NeutronMetadataBase, role_data, config_settings]
- - tripleo::profile::pacemaker::neutron::enable_metadata: True
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::metadata
diff --git a/puppet/services/pacemaker/neutron-ovs-agent.yaml b/puppet/services/pacemaker/neutron-ovs-agent.yaml
deleted file mode 100644
index a2bd7c83..00000000
--- a/puppet/services/pacemaker/neutron-ovs-agent.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron OVS agent with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronOvsBase:
- type: ../neutron-ovs-agent.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron OVS agent service.
- value:
- service_name: neutron_ovs_agent
- monitoring_subscription: {get_attr: [NeutronOvsBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NeutronOvsBase, role_data, logging_source]}
- logging_groups: {get_attr: [NeutronOvsBase, role_data, logging_groups]}
- config_settings:
- get_attr: [NeutronOvsBase, role_data, config_settings]
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::ovs
diff --git a/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml b/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml
deleted file mode 100644
index 80d6ed92..00000000
--- a/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron OpenContrail Plugin with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NeutronPluginOpenContrail:
- type: ../neutron-plugin-nuage.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron OpenContrail plugin.
- value:
- service_name: neutron_plugin_opencontrail
- config_settings:
- map_merge:
- - get_attr: [NeutronPluginOpenContrail, role_data, config_settings]
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::plugins::opencontrail
diff --git a/puppet/services/pacemaker/neutron-server.yaml b/puppet/services/pacemaker/neutron-server.yaml
deleted file mode 100644
index 33bc2d99..00000000
--- a/puppet/services/pacemaker/neutron-server.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Neutron Server with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- NeutronL3HA:
- default: true
- description: Whether to enable HA for virtual routers
- type: boolean
-
-resources:
-
- NeutronServerBase:
- type: ../neutron-server.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Neutron Server.
- value:
- service_name: neutron_server
- monitoring_subscription: {get_attr: [NeutronServerBase, role_data, monitoring_subscription]}
- config_settings:
- map_merge:
- - get_attr: [NeutronServerBase, role_data, config_settings]
- - neutron::server::enabled: false
- neutron::server::manage_service: false
- neutron::server::l3_ha: {get_param: NeutronL3HA}
- step_config: |
- include ::tripleo::profile::pacemaker::neutron::server
diff --git a/puppet/services/pacemaker/nova-api.yaml b/puppet/services/pacemaker/nova-api.yaml
deleted file mode 100644
index b86e438a..00000000
--- a/puppet/services/pacemaker/nova-api.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Nova API service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NovaApiBase:
- type: ../nova-api.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Nova API role.
- value:
- service_name: nova_api
- monitoring_subscription: {get_attr: [NovaApiBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NovaApiBase, role_data, logging_source]}
- logging_groups: {get_attr: [NovaApiBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NovaApiBase, role_data, config_settings]
- - nova::api::manage_service: false
- nova::api::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::nova::api
diff --git a/puppet/services/pacemaker/nova-conductor.yaml b/puppet/services/pacemaker/nova-conductor.yaml
deleted file mode 100644
index a0a766ec..00000000
--- a/puppet/services/pacemaker/nova-conductor.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Nova Conductor service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NovaConductorBase:
- type: ../nova-conductor.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Nova Conductor role.
- value:
- service_name: nova_conductor
- monitoring_subscription: {get_attr: [NovaConductorBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NovaConductorBase, role_data, logging_source]}
- logging_groups: {get_attr: [NovaConductorBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NovaConductorBase, role_data, config_settings]
- - nova::conductor::manage_service: false
- nova::conductor::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::nova::conductor
diff --git a/puppet/services/pacemaker/nova-consoleauth.yaml b/puppet/services/pacemaker/nova-consoleauth.yaml
deleted file mode 100644
index 5d51eb47..00000000
--- a/puppet/services/pacemaker/nova-consoleauth.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Nova Consoleauth service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NovaConsoleauthBase:
- type: ../nova-consoleauth.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Nova Consoleauth role.
- value:
- service_name: nova_consoleauth
- monitoring_subscription: {get_attr: [NovaConsoleauthBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NovaConsoleauthBase, role_data, logging_source]}
- logging_groups: {get_attr: [NovaConsoleauthBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NovaConsoleauthBase, role_data, config_settings]
- - nova::consoleauth::manage_service: false
- nova::consoleauth::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::nova::consoleauth
diff --git a/puppet/services/pacemaker/nova-scheduler.yaml b/puppet/services/pacemaker/nova-scheduler.yaml
deleted file mode 100644
index 8828ee11..00000000
--- a/puppet/services/pacemaker/nova-scheduler.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Nova Scheduler service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NovaSchedulerBase:
- type: ../nova-scheduler.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Nova Scheduler role.
- value:
- service_name: nova_scheduler
- monitoring_subscription: {get_attr: [NovaSchedulerBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NovaSchedulerBase, role_data, logging_source]}
- logging_groups: {get_attr: [NovaSchedulerBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NovaSchedulerBase, role_data, config_settings]
- - nova::scheduler::manage_service: false
- nova::scheduler::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::nova::scheduler
diff --git a/puppet/services/pacemaker/nova-vnc-proxy.yaml b/puppet/services/pacemaker/nova-vnc-proxy.yaml
deleted file mode 100644
index ebe84a03..00000000
--- a/puppet/services/pacemaker/nova-vnc-proxy.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Nova Vncproxy service with Pacemaker configured with Puppet.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- NovaVncproxyBase:
- type: ../nova-vnc-proxy.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Nova Vncproxy role.
- value:
- service_name: nova_vnc_proxy
- monitoring_subscription: {get_attr: [NovaVncproxyBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [NovaVncproxyBase, role_data, logging_source]}
- logging_groups: {get_attr: [NovaVncproxyBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [NovaVncproxyBase, role_data, config_settings]
- - nova::vncproxy::manage_service: false
- nova::vncproxy::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::nova::vncproxy
diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml
index f3fa2d28..03c2c83f 100644
--- a/puppet/services/pacemaker/rabbitmq.yaml
+++ b/puppet/services/pacemaker/rabbitmq.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
RabbitMQ service with Pacemaker configured with Puppet
diff --git a/puppet/services/pacemaker/sahara-api.yaml b/puppet/services/pacemaker/sahara-api.yaml
deleted file mode 100644
index 3dfb7d94..00000000
--- a/puppet/services/pacemaker/sahara-api.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Sahara API service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- SaharaApiBase:
- type: ../sahara-api.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Sahara API role.
- value:
- service_name: sahara_api
- monitoring_subscription: {get_attr: [SaharaApiBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [SaharaApiBase, role_data, logging_source]}
- logging_groups: {get_attr: [SaharaApiBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [SaharaApiBase, role_data, config_settings]
- - sahara::service::api::manage_service: false
- sahara::service::api::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::sahara::api
diff --git a/puppet/services/pacemaker/sahara-engine.yaml b/puppet/services/pacemaker/sahara-engine.yaml
deleted file mode 100644
index a06d11b3..00000000
--- a/puppet/services/pacemaker/sahara-engine.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-heat_template_version: 2016-04-08
-
-description: >
- OpenStack Sahara Engine service with Pacemaker configured with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- SaharaEngineBase:
- type: ../sahara-engine.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
-outputs:
- role_data:
- description: Role data for the Sahara Engine role.
- value:
- service_name: sahara_engine
- monitoring_subscription: {get_attr: [SaharaEngineBase, role_data, monitoring_subscription]}
- logging_source: {get_attr: [SaharaEngineBase, role_data, logging_source]}
- logging_groups: {get_attr: [SaharaEngineBase, role_data, logging_groups]}
- config_settings:
- map_merge:
- - get_attr: [SaharaEngineBase, role_data, config_settings]
- - sahara::service::engine::manage_service: false
- sahara::service::engine::enabled: false
- step_config: |
- include ::tripleo::profile::pacemaker::sahara::engine
diff --git a/puppet/services/pacemaker_remote.yaml b/puppet/services/pacemaker_remote.yaml
new file mode 100644
index 00000000..daee43e6
--- /dev/null
+++ b/puppet/services/pacemaker_remote.yaml
@@ -0,0 +1,57 @@
+heat_template_version: ocata
+
+description: >
+ Pacemaker remote service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ PacemakerRemoteAuthkey:
+ type: string
+ description: The authkey for the pacemaker remote service.
+ hidden: true
+ default: ''
+ MonitoringSubscriptionPacemakerRemote:
+ default: 'overcloud-pacemaker_remote'
+ type: string
+ PacemakerRemoteLoggingSource:
+ type: json
+ default:
+ tag: system.pacemaker_remote
+ path: /var/log/pacemaker.log
+ format: >-
+ /^(?<time>[^ ]*\s*[^ ]* [^ ]*)
+ \[(?<pid>[^ ]*)\]
+ (?<host>[^ ]*)
+ (?<message>.*)$/
+
+outputs:
+ role_data:
+ description: Role data for the Pacemaker remote role.
+ value:
+ service_name: pacemaker_remote
+ monitoring_subscription: {get_param: MonitoringSubscriptionPacemakerRemote}
+ logging_groups:
+ - haclient
+ logging_source: {get_param: PacemakerRemoteLoggingSource}
+ config_settings:
+ tripleo.pacemaker_remote.firewall_rules:
+ '130 pacemaker_remote tcp':
+ proto: 'tcp'
+ dport:
+ - 3121
+ tripleo::profile::base::pacemaker_remote::remote_authkey: {get_param: PacemakerRemoteAuthkey}
+ step_config: |
+ include ::tripleo::profile::base::pacemaker_remote
diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml
index a8bd5e8a..4b74ad45 100644
--- a/puppet/services/panko-api.yaml
+++ b/puppet/services/panko-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Panko API service configured with Puppet
@@ -82,3 +82,5 @@ outputs:
get_attr: [PankoBase, role_data, service_config_settings]
step_config: |
include tripleo::profile::base::panko::api
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
diff --git a/puppet/services/panko-base.yaml b/puppet/services/panko-base.yaml
index 32754a55..998e64ee 100644
--- a/puppet/services/panko-base.yaml
+++ b/puppet/services/panko-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Panko service configured with Puppet
@@ -37,7 +37,6 @@ outputs:
value:
service_name: panko_base
config_settings:
- panko_redis_password: {get_param: RedisPassword}
panko::db::database_connection:
list_join:
- ''
@@ -47,12 +46,13 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/panko'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
panko::debug: {get_param: Debug}
panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::project_name: 'service'
panko::keystone::authtoken::password: {get_param: PankoPassword}
panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::auth::auth_password: {get_param: PankoPassword}
panko::auth::auth_region: 'regionOne'
panko::auth::auth_tenant_name: 'service'
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index b77e0a91..2c4ccbc9 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
RabbitMQ service configured with Puppet
@@ -69,12 +69,16 @@ outputs:
rabbitmq::delete_guest_user: false
rabbitmq::wipe_db_on_cookie_change: true
rabbitmq::port: '5672'
+ rabbitmq::package_provider: yum
rabbitmq::package_source: undef
rabbitmq::repos_ensure: false
rabbitmq::tcp_keepalive: true
rabbitmq_environment:
+ NODE_PORT: ''
+ NODE_IP_ADDRESS: ''
RABBITMQ_NODENAME: "rabbit@%{::hostname}"
RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+ 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}"
rabbitmq_kernel_variables:
inet_dist_listen_min: '25672'
inet_dist_listen_max: '25672'
@@ -95,7 +99,7 @@ outputs:
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- rabbitmq::node_ip_address: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
step_config: |
include ::tripleo::profile::base::rabbitmq
@@ -104,6 +108,6 @@ outputs:
tags: step2
service: name=rabbitmq-server state=stopped
- name: Start rabbitmq service
- tags: step6
+ tags: step4
service: name=rabbitmq-server state=started
diff --git a/puppet/services/sahara-api.yaml b/puppet/services/sahara-api.yaml
index 54e63df4..8573ea81 100644
--- a/puppet/services/sahara-api.yaml
+++ b/puppet/services/sahara-api.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Sahara API service configured with Puppet
@@ -90,3 +90,7 @@ outputs:
sahara::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ upgrade_tasks:
+ - name: Stop sahara_api service
+ tags: step2
+ service: name=openstack-sahara-api state=stopped
diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml
index 4072a150..224989be 100644
--- a/puppet/services/sahara-base.yaml
+++ b/puppet/services/sahara-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Sahara base service. Shared for all Sahara services.
@@ -64,6 +64,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/sahara'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
sahara::rabbit_password: {get_param: RabbitPassword}
sahara::rabbit_user: {get_param: RabbitUserName}
sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
@@ -71,7 +72,7 @@ outputs:
sahara::debug: {get_param: Debug}
sahara::admin_password: {get_param: SaharaPassword}
sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- sahara::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ sahara::identity_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
sahara::use_neutron: true
sahara::plugins: {get_param: SaharaPlugins}
sahara::rpc_backend: rabbit
diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml
index 287c1c05..987fe25b 100644
--- a/puppet/services/sahara-engine.yaml
+++ b/puppet/services/sahara-engine.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Sahara Engine service configured with Puppet
@@ -49,3 +49,10 @@ outputs:
- get_attr: [SaharaBase, role_data, config_settings]
step_config: |
include ::tripleo::profile::base::sahara::engine
+ upgrade_tasks:
+ - name: Stop sahara_engine service
+ tags: step2
+ service: name=openstack-sahara-engine state=stopped
+ - name: Sync sahara_engine DB
+ tags: step5
+ command: sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
index 13df5bbe..a2286d16 100644
--- a/puppet/services/services.yaml
+++ b/puppet/services/services.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
Utility stack to convert an array of services into a set of combined
@@ -42,16 +42,17 @@ resources:
LoggingConfiguration:
type: OS::TripleO::LoggingConfiguration
+ ServiceServerMetadataHook:
+ type: OS::TripleO::ServiceServerMetadataHook
+ properties:
+ RoleData: {get_attr: [ServiceChain, role_data]}
+
outputs:
role_data:
description: Combined Role data for this set of services.
value:
service_names:
- # Filter any null/None service_names which may be present due to mapping
- # of services to OS::Heat::None
- yaql:
- expression: list($.data.s_names.where($ != null))
- data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}}
+ {get_attr: [ServiceChain, role_data, service_name]}
monitoring_subscriptions:
yaql:
expression: list($.data.role_data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
@@ -107,9 +108,15 @@ outputs:
yaql:
expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
data: {role_data: {get_attr: [ServiceChain, role_data]}}
- step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
+ step_config: {get_attr: [ServiceChain, role_data, step_config]}
upgrade_tasks:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
+ upgrade_batch_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
diff --git a/puppet/services/snmp.yaml b/puppet/services/snmp.yaml
index 4d01632d..fd6ed818 100644
--- a/puppet/services/snmp.yaml
+++ b/puppet/services/snmp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
SNMP client configured with Puppet, to facilitate Ceilometer Hardware
@@ -43,3 +43,7 @@ outputs:
proto: 'udp'
step_config: |
include ::tripleo::profile::base::snmp
+ upgrade_tasks:
+ - name: Stop snmp service
+ tags: step2
+ service: name=snmpd state=stopped
diff --git a/puppet/services/neutron-compute-plugin-opencontrail.yaml b/puppet/services/sshd.yaml
index 9f2fd13c..41e144a0 100644
--- a/puppet/services/neutron-compute-plugin-opencontrail.yaml
+++ b/puppet/services/sshd.yaml
@@ -1,7 +1,7 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
- OpenStack Neutron Compute OpenContrail plugin
+ Configure sshd_config
parameters:
ServiceNetMap:
@@ -18,12 +18,17 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ BannerText:
+ default: ''
+ description: Configures Banner text in sshd_config
+ type: string
outputs:
role_data:
- description: Role data for the Neutron Compute OpenContrail plugin
+ description: Role data for the ssh
value:
- service_name: neutron_compute_plugin_opencontrail
+ service_name: sshd
config_settings:
+ BannerText: {get_param: BannerText}
step_config: |
- include ::tripleo::profile::base::neutron::opencontrail::vrouter
+ include ::tripleo::profile::base::sshd
diff --git a/puppet/services/swift-base.yaml b/puppet/services/swift-base.yaml
index 741adb4d..6046d5e8 100644
--- a/puppet/services/swift-base.yaml
+++ b/puppet/services/swift-base.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Swift Proxy service configured with Puppet
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index ba184ab0..526fa888 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Swift Proxy service configured with Puppet
@@ -49,6 +49,24 @@ parameters:
default: guest
description: The username for RabbitMQ
type: string
+ SwiftCeilometerPipelineEnabled:
+ description: Set to False to disable the swift proxy ceilometer pipeline.
+ default: True
+ type: boolean
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+
+conditions:
+
+ ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
resources:
SwiftBase:
@@ -69,7 +87,7 @@ outputs:
- get_attr: [SwiftBase, role_data, config_settings]
- swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- swift::proxy::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ swift::proxy::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
swift::proxy::authtoken::password: {get_param: SwiftPassword}
swift::proxy::authtoken::project_name: 'service'
swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
@@ -78,6 +96,9 @@ outputs:
swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
swift::proxy::ceilometer::nonblocking_notify: true
+ tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
+ tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: {get_param: RabbitClientUseSSL}
+ tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled}
tripleo.swift_proxy.firewall_rules:
'122 swift proxy':
dport:
@@ -89,26 +110,34 @@ outputs:
- ResellerAdmin
swift::proxy::versioned_writes::allow_versioned_writes: true
swift::proxy::pipeline:
- - 'catch_errors'
- - 'healthcheck'
- - 'proxy-logging'
- - 'cache'
- - 'ratelimit'
- - 'bulk'
- - 'tempurl'
- - 'formpost'
- - 'authtoken'
- - 'keystone'
- - 'staticweb'
- - 'copy'
- - 'container-quotas'
- - 'account-quotas'
- - 'slo'
- - 'dlo'
- - 'versioned_writes'
- - 'ceilometer'
- - 'proxy-logging'
- - 'proxy-server'
+ yaql:
+ expression: $.data.pipeline.where($ != '')
+ data:
+ pipeline:
+ - 'catch_errors'
+ - 'healthcheck'
+ - 'proxy-logging'
+ - 'cache'
+ - 'ratelimit'
+ - 'bulk'
+ - 'tempurl'
+ - 'formpost'
+ - 'authtoken'
+ - 'keystone'
+ - 'staticweb'
+ - 'copy'
+ - 'container_quotas'
+ - 'account_quotas'
+ - 'slo'
+ - 'dlo'
+ - 'versioned_writes'
+ -
+ if:
+ - ceilometer_pipeline_enabled
+ - 'ceilometer'
+ - ''
+ - 'proxy-logging'
+ - 'proxy-server'
swift::proxy::account_autocreate: true
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
@@ -135,3 +164,7 @@ outputs:
- admin
- swiftoperator
- ResellerAdmin
+ upgrade_tasks:
+ - name: Stop swift_proxy service
+ tags: step2
+ service: name=openstack-swift-proxy state=stopped
diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml
index 5c70b6ab..2e3c818f 100644
--- a/puppet/services/swift-ringbuilder.yaml
+++ b/puppet/services/swift-ringbuilder.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: >
OpenStack Swift Ringbuilder
@@ -43,6 +43,16 @@ parameters:
description: 'Use a local directory for Swift storage services when building rings'
type: boolean
+conditions:
+ swift_use_local_dir:
+ and:
+ - equals:
+ - get_param: SwiftUseLocalDir
+ - true
+ - equals:
+ - get_param: SwiftRawDisks
+ - {}
+
outputs:
role_data:
description: Role data for Swift Ringbuilder configuration.
@@ -59,7 +69,7 @@ outputs:
expression: $.data.raw_disk_lists.flatten()
data:
raw_disk_lists:
- - {if: [{get_param: SwiftUseLocalDir}, [':%PORT%/d1'], []]}
+ - {if: [swift_use_local_dir, [':%PORT%/d1'], []]}
- repeat:
template: ':%PORT%/DEVICE'
for_each:
diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml
index cffe78f5..247b23ff 100644
--- a/puppet/services/swift-storage.yaml
+++ b/puppet/services/swift-storage.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
OpenStack Swift Storage service configured with Puppet
@@ -56,6 +56,17 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+conditions:
+ swift_mount_check:
+ or:
+ - equals:
+ - get_param: SwiftMountCheck
+ - true
+ - not:
+ equals:
+ - get_param: SwiftRawDisks
+ - {}
+
outputs:
role_data:
description: Role data for the Swift Proxy role.
@@ -65,7 +76,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [SwiftBase, role_data, config_settings]
- - swift::storage::all::mount_check: {get_param: SwiftMountCheck}
+ - swift::storage::all::mount_check: {if: [swift_mount_check, true, false]}
tripleo::profile::base::swift::storage::enable_swift_storage: {get_param: ControllerEnableSwiftStorage}
tripleo.swift_storage.firewall_rules:
'123 swift storage':
@@ -90,3 +101,20 @@ outputs:
swift::storage::all::storage_local_net_ip: {get_param: [ServiceNetMap, SwiftStorageNetwork]}
step_config: |
include ::tripleo::profile::base::swift::storage
+ upgrade_tasks:
+ - name: Stop swift storage services
+ tags: step2
+ service: name={{ item }} state=stopped
+ with_items:
+ - openstack-swift-account-auditor
+ - openstack-swift-account-reaper
+ - openstack-swift-account-replicator
+ - openstack-swift-account
+ - openstack-swift-container-auditor
+ - openstack-swift-container-replicator
+ - openstack-swift-container-updater
+ - openstack-swift-container
+ - openstack-swift-object-auditor
+ - openstack-swift-object-replicator
+ - openstack-swift-object-updater
+ - openstack-swift-object
diff --git a/puppet/services/tacker.yaml b/puppet/services/tacker.yaml
new file mode 100644
index 00000000..6ceb9f19
--- /dev/null
+++ b/puppet/services/tacker.yaml
@@ -0,0 +1,97 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Tacker service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ TackerPassword:
+ description: The password for the tacker service account.
+ type: string
+ hidden: true
+ Debug:
+ type: string
+ default: ''
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+
+outputs:
+ role_data:
+ description: Role data for the Tacker role.
+ value:
+ service_name: tacker
+ config_settings:
+ tacker_password: {get_param: TackerPassword}
+ tacker::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://tacker:'
+ - {get_param: TackerPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/tacker'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+
+ tacker::keystone::auth::tenant: 'service'
+ tacker::keystone::auth::password: {get_param: TackerPassword}
+ tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ tacker::debug: {get_param: Debug}
+ tacker::rpc_backend: rabbit
+ tacker::rabbit_userid: {get_param: RabbitUserName}
+ tacker::rabbit_password: {get_param: RabbitPassword}
+ tacker::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ tacker::rabbit_port: {get_param: RabbitClientPort}
+ tacker::server::bind_host: {get_param: [ServiceNetMap, TackerApiNetwork]}
+
+ tacker::db::mysql::password: {get_param: TackerPassword}
+ tacker::db::mysql::user: tacker
+ tacker::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ tacker::db::mysql::dbname: tacker
+ tacker::db::mysql::allowed_hosts:
+ - '%'
+ - {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+
+
+ step_config: |
+ include ::tripleo::profile::base::tacker
+ upgrade_tasks:
+ - name: "PreUpgrade step0,validation: Check service openstack-tacker-server is running"
+ shell: /usr/bin/systemctl show 'openstack-tacker-server' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
+ - name: Stop tacker service
+ tags: step2
+ service: name=openstack-tacker-server state=stopped
diff --git a/puppet/services/time/ntp.yaml b/puppet/services/time/ntp.yaml
index eb5237fe..b14d7bcc 100644
--- a/puppet/services/time/ntp.yaml
+++ b/puppet/services/time/ntp.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
NTP service deployment using puppet, this YAML file
@@ -22,8 +22,10 @@ parameters:
via parameter_defaults in the resource registry.
type: json
NtpServer:
- default: []
- description: NTP servers
+ default: ['pool.ntp.org']
+ description: NTP servers list. Defaulted to pool.ntp.org in order to
+ have a sane default for Pacemaker deployments when
+ not configuring this parameter by default.
type: comma_delimited_list
outputs:
@@ -38,4 +40,4 @@ outputs:
dport: 123
proto: udp
step_config: |
- include ::ntp
+ include ::tripleo::profile::base::time::ntp
diff --git a/puppet/services/time/timezone.yaml b/puppet/services/time/timezone.yaml
index 384b5191..5d0eeae3 100644
--- a/puppet/services/time/timezone.yaml
+++ b/puppet/services/time/timezone.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Composable Timezone service
diff --git a/puppet/services/tripleo-firewall.yaml b/puppet/services/tripleo-firewall.yaml
index 7eb39905..67e14d9c 100644
--- a/puppet/services/tripleo-firewall.yaml
+++ b/puppet/services/tripleo-firewall.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
TripleO Firewall settings
diff --git a/puppet/services/tripleo-packages.yaml b/puppet/services/tripleo-packages.yaml
index 69912fa5..737be829 100644
--- a/puppet/services/tripleo-packages.yaml
+++ b/puppet/services/tripleo-packages.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
TripleO Package installation settings
@@ -33,6 +33,14 @@ outputs:
step_config: |
include ::tripleo::packages
upgrade_tasks:
+ - name: Check yum for rpm-python present
+ tags: step0
+ yum: "name=rpm-python state=present"
+ register: rpm_python_check
+ - name: Fail when rpm-python wasn't present
+ fail: msg="rpm-python package was not present before this run! Check environment before re-running"
+ when: rpm_python_check.changed != false
+ tags: step0
- name: Update all packages
tags: step3
yum: name=* state=latest
diff --git a/puppet/services/zaqar.yaml b/puppet/services/zaqar.yaml
index 77240c3c..cb860fa8 100644
--- a/puppet/services/zaqar.yaml
+++ b/puppet/services/zaqar.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: ocata
description: >
Openstack Zaqar service. Shared for all Heat services.
@@ -40,7 +40,7 @@ outputs:
config_settings:
zaqar::keystone::authtoken::password: {get_param: ZaqarPassword}
zaqar::keystone::authtoken::project_name: 'service'
- zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
zaqar::debug: {get_param: Debug}
zaqar::transport::websocket::bind: {get_param: [EndpointMap, ZaqarInternal, host]}
diff --git a/puppet/upgrade_config.yaml b/puppet/upgrade_config.yaml
index c67e10b3..2cfd43f4 100644
--- a/puppet/upgrade_config.yaml
+++ b/puppet/upgrade_config.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
description: 'Upgrade for via ansible by applying a step related tag'
parameters:
@@ -11,6 +11,11 @@ parameters:
type: string
description: Step number of the upgrade
+ SkipUpgradeConfigTags:
+ type: comma_delimited_list
+ description: Ansible tags to skip during upgrade, e.g validation skips pre-upgrade validations
+ default: []
+
resources:
AnsibleConfig:
@@ -30,11 +35,16 @@ resources:
properties:
group: ansible
options:
+ skip_tags:
+ list_join:
+ - ","
+ - {get_param: SkipUpgradeConfigTags}
tags:
str_replace:
- template: "stepSTEP"
+ template: "common,stepSTEP"
params:
STEP: {get_param: step}
+ modulepath: /usr/share/ansible-modules
inputs:
- name: role
config: {get_attr: [AnsibleConfig, value]}
diff --git a/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml
new file mode 100644
index 00000000..f9afb18d
--- /dev/null
+++ b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml
@@ -0,0 +1,125 @@
+---
+prelude: >
+ 6.0.0 is the final release for Ocata.
+ It's the first release where release notes are added.
+features:
+ - Fujitsu Neutron plugin for FOS support. Users can deploy
+ Neutron with this plugin by using
+ environments/neutron-ml2-fujitsu-fossw.yaml environment file.
+ - Expose InstanceDiscoveryMethod parameter to configure Ceilometer
+ method used to discover instances running on compute node.
+ Default value to 'libvirt_metadata'. Allowed values are 'naive',
+ 'libvirt_metadata' and 'workload_partitioning'.
+ - Make ServiceNetMap support custom network names.
+ Note that operators will still be expected to pass any ServiceNetMap
+ overrides with the "new" network name, e.g whatever NetName specifies,
+ otherwise environment files could get very confusing.
+ - Nova Placement API support. As this new service is required, deploy it
+ by default in WSGI with Apache, like other API services.
+ - Cinder pass-through iSER backend support.
+ - etcd composable services, used by networking-vpp ML2 driver as the
+ messaging mechanism.
+ - Allow to configure cron parameters for Cinder, Heat, Keystone and Nova
+ crontabs.
+ - Export NovaDefaultFloatingPool parameter to configure the default pool
+ of floating IP addressed available. Default to 'public' for backward
+ compatibility.
+ - Bump Heat Templates to 'ocata' version, to match Heat requirements.
+ - Configure OVS agent firewall driver only if NeutronOVSFirewallDriver
+ is set.
+ - Expose RbdDefaultFeatures parameter to configure the default features
+ enabled when creating a block device image.
+ Only applies to format '2' images. Set to '1' for Jewel clients using
+ older Ceph servers.
+ - Cinder HPELeftHandISCSIDriver backend support.
+ - Pacemaker stopped to manage Ceilometer, Cinder API,
+ Cinder Scheduler, MongoDB, Glance, Gnocchi, Heat, Apache, Memcached,
+ Neutron, Nova and Sahara.
+ - Ceph MDS service support. Service can be enable with
+ environments/services/ceph-mds.yaml environment file.
+ - Expose HeatConvergenceEngine and HeatMaxResourcesPerStack parameters
+ to configure Heat.
+ - Add pre-network hook and example showing config-then-reboot.
+ - Expose LibvirtEnabledPerfEvents parameter in Nova Compute service.
+ Default to an empty array.
+ This is a performance event list which could be used as monitor.
+ - Increase libvirt/qemu.conf max_files to 32768 and max_processes to
+ 131072.
+ - Split OVN northd and ml2 plugin, so we can deploy OVNDBs and Northd
+ services on different nodes.
+ - Add hook to generate metadata from service profiles.
+ This is useful for nova vendordata plugins that can parse said metadata.
+ - Expose EventPipelinePublishers to Ceilometer and set the default to
+ 'notifier://?topic=alarm.all'.
+ - Add Panko service support. This service is not enabled by default. Use
+ environments/services/enable-panko.yaml to include it in your deployment.
+ - Add EC2-API composable service support.
+ - Allow dnsmasq_dns_servers to be configured for Neutron DHCP Agent with a
+ new parameter (NeutronDhcpAgentDnsmasqDnsServers, default to []).
+ - Add support for Ceph RBD mirroring daemon managed by Pacemaker.
+ - Add deployed server bootstrap for RHEL.
+ - Configure VNC Server listen address on internal_api network by default.
+ - Support for Cinder Dell EMC PS Series.
+ - Support for Cinder Dell EMC EMC Storage Center.
+ - Support for Octavia composable services for LBaaS with Neutron.
+ - Support for Collectd composable services for performance monitoring.
+ - Support for Tacker composable service for VNF management.
+upgrade:
+ - Update OpenDaylight deployment to use networking-odl v2 as a mechanism
+ driver.
+ - Update Contrail composable services.
+deprecations:
+ - Glance Registry service has been removed and Glance API v2 is now deploy
+ by default. Glance API v1 is not supported anymore in TripleO.
+ - Remove CeilometerStoreEvents parameter, which has been removed
+ in Ceilometer.
+ - Ceilometer API service is deprecated and will be removed in a future
+ release. If you would like to disable it, use
+ environments/services/disable-ceilometer-api.yaml environment file.
+ - Removes deprecated OpenDaylight L2 only deployments.
+ Deploying ODL without L3 DVR is no longer supported.
+security:
+ - Enable management of 'DISALLOW_IFRAME_EMBED' in Horizon configuration to
+ prevent dashboard being embedded within an iframe and exposed to Cross-Frame
+ Scripting (XFS) vulnerability on legacy browsers.
+ - Enable management of 'ENFORCE_PASSWORD_CHECK' in Horizons configuration to
+ display an Admin Password field on the Change Password form to verify that
+ it is indeed the admin logged-in who wants to change the password.
+ - Enable management of 'DISABLE_PASSWORD_REVEAL' in Horizon, to remove the
+ password reveal option.
+ - Enable 'SECURE_PROXY_SSL_HEADER' option in Horizons configuration to take
+ X-Forwarded-Proto header into account when forming URLs.
+ - Enable management of ENFORCE_PASSWORD_CHECK value. By setting
+ 'ENFORCE_PASSWORD_CHECK' to 'True' within Horizons local_settings.py, it
+ displays an ‘Admin Password’ field on the “Change Password” form to verify
+ that it is the admin logged-in that wants to perform the password change.
+ - Enable management of Horizons Password Validation. Enables injection of an
+ operators own password validation regex via a heat template.
+ - Enable management of '/etc/issue Banner' whereby an operator can populate
+ their own Banner warning text to be displayed upon terminal login.
+ - Enable management of auditd system. '/etc/audit/audit.rules' can now be
+ populated by means of a heat template.
+fixes:
+ - Fixes `bug 1645898
+ <https://bugs.launchpad.net/tripleo/+bug/1645898>`__ so epmd is binded on
+ the right address, where RabbitMQ is listening too.
+ - Fixes `bug 1652184
+ <https://bugs.launchpad.net/tripleo/+bug/1652184>`__ so swap partitions
+ can be handled from an environment file thanks to AllNodesExtraConfig.
+ - Add retry to RHEL registration, useful when having network outages during
+ registration.
+ - Fixes `bug 1651476
+ <https://bugs.launchpad.net/tripleo/+bug/1651476>`__ so firewall rules
+ are created for Opendaylight API service.
+ - Fixes `bug 1643487
+ <https://bugs.launchpad.net/tripleo/+bug/1643487>`__ to prevent source
+ address from binding to a VIP for database connection.
+ - Fixes `bug 1649836
+ <https://bugs.launchpad.net/tripleo/+bug/1649836>`__ to configure
+ DPDK options to isolate PMD cores and ovs process cores.
+ - Fixes `bug 1662344
+ <https://bugs.launchpad.net/tripleo/+bug/1662344>`__ by stopping
+ to set bind_address on nova db uri.
+ This reverts the changes in https://review.openstack.org/414629 for nova as
+ they are incompatible with cell_v2.
+ This is a temporary fix for HA while a long-term solution is developed.
diff --git a/releasenotes/notes/add-default-ntp-server-696b8568e09be497.yaml b/releasenotes/notes/add-default-ntp-server-696b8568e09be497.yaml
new file mode 100644
index 00000000..78fdbb59
--- /dev/null
+++ b/releasenotes/notes/add-default-ntp-server-696b8568e09be497.yaml
@@ -0,0 +1,6 @@
+---
+issues:
+ - We add a default NTP server to the Overcloud
+ for all Pacemaker and non-Pacemaker deployments,
+ also useful for keeping time diff controlled for
+ Keystone and Ceph.
diff --git a/releasenotes/notes/composable-ha-37e2d7e1f57f5c10.yaml b/releasenotes/notes/composable-ha-37e2d7e1f57f5c10.yaml
new file mode 100644
index 00000000..e560fe95
--- /dev/null
+++ b/releasenotes/notes/composable-ha-37e2d7e1f57f5c10.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - With the composable HA work landed it is now possible
+ to split pacemaker-managed services like galera, rabbit,
+ redis, haproxy and any A/P resource, off to dedicated
+ nodes. These services can be split off to separate nodes
+ either via the normal Pacemaker service (which has a limit
+ of 16 maximum number of nodes) or via the newer PacemakerRemote
+ service (but not both on the same node). Note that until
+ https://bugzilla.redhat.com/show_bug.cgi?id=1417936 is fixed,
+ PacemakerRemote should only be used for Cinder A/P resources
+ and Manila A/P resources.
diff --git a/releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml b/releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml
new file mode 100644
index 00000000..55062b04
--- /dev/null
+++ b/releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ Composable service plugins now support two additional sections,
+ upgrade_tasks and upgrade_batch_tasks. These can be used by service
+ template authors to define the required behavior on upgrade as ansible
+ tasks, for both upgrades that require downtime, and rolling upgrades.
+ See puppet/services/README.rst for more details.
+upgrade:
+ - |
+ Please refer to tripleo-docs for full details on the upgrade workflow
+ required for Newton to Ocata upgrades, as it's possible some steps are
+ different to previous releases:
+ http://docs.openstack.org/developer/tripleo-docs/post_deployment/upgrade.html
diff --git a/releasenotes/notes/deployed-servers-fd47f18204cea105.yaml b/releasenotes/notes/deployed-servers-fd47f18204cea105.yaml
new file mode 100644
index 00000000..d05b268c
--- /dev/null
+++ b/releasenotes/notes/deployed-servers-fd47f18204cea105.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - It is now possible to deploy with tripleo-heat-templates using servers that
+ are already provisioned with an operating system, and not necessarily
+ provisioned with Nova and Ironic. This feature is enabled by making use of
+ the environments/deployed-server-environment.yaml environment file. For
+ more information, see
+ http://docs.openstack.org/developer/tripleo-docs/advanced_deployment/deployed_server.html
diff --git a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml b/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml
new file mode 100644
index 00000000..edcc1250
--- /dev/null
+++ b/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml
@@ -0,0 +1,5 @@
+---
+deprecations:
+ - The environments/puppet-pacemaker.yaml file is now deprecated and the HA
+ deployment is now the default. In order to get the non-HA deployment use
+ environments/nonha-arch.yaml explicitly.
diff --git a/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml b/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml
new file mode 100644
index 00000000..72601f9e
--- /dev/null
+++ b/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ New parameter "IronicCleaningNetwork" can be used to override the name
+ or UUID of the **overcloud** network Ironic uses for cleaning.
+fixes:
+ - |
+ A default value is now provided for Ironic ``cleaning_network``
+ configuration option. Not providing it on start up was deprecated since
+ Newton, and will result in a failure in the near future.
diff --git a/releasenotes/notes/keystone_internal-53cc7b24ebdd9df4.yaml b/releasenotes/notes/keystone_internal-53cc7b24ebdd9df4.yaml
new file mode 100644
index 00000000..1f41073b
--- /dev/null
+++ b/releasenotes/notes/keystone_internal-53cc7b24ebdd9df4.yaml
@@ -0,0 +1,9 @@
+---
+other:
+ - |
+ Use Keystone internal endpoint instead of admin for services.
+ The admin endpoint is listening on the ctlplane network by default;
+ services should ideally be using the internal api network for this kind
+ of traffic, as the ctlplane network is mostly for provisioning. On the
+ other hand, the admin endpoint shouldn't be as relevant with services
+ switching to keystone v3.
diff --git a/releasenotes/notes/memcached-max-memory-ef6834d17953fca6.yaml b/releasenotes/notes/memcached-max-memory-ef6834d17953fca6.yaml
new file mode 100644
index 00000000..c14cefa0
--- /dev/null
+++ b/releasenotes/notes/memcached-max-memory-ef6834d17953fca6.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Memcached max memory configuration is now exposed va MemcachedMaxMemory.
+upgrade:
+ - |
+ Reduce the default memory configuration for memcached from 95% to 50%.
diff --git a/releasenotes/notes/octavia-service-integration-03bd3eb6cfe1efaf.yaml b/releasenotes/notes/octavia-service-integration-03bd3eb6cfe1efaf.yaml
new file mode 100644
index 00000000..bd8d3562
--- /dev/null
+++ b/releasenotes/notes/octavia-service-integration-03bd3eb6cfe1efaf.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Added initial support for deploying the Octavia services in the overcloud.
diff --git a/releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml b/releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml
new file mode 100644
index 00000000..1949e4fe
--- /dev/null
+++ b/releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ Adds the ability to manage auditd.service and enter audit.rules via tripleo
+ heat templates. This in turn enforces an audit log of system events, such
+ as system time changes, modifications to Discretionary Access Controls,
+ Failed login attempts.
+
+
diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/releasenotes/source/_static/.placeholder
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
new file mode 100644
index 00000000..8da995b0
--- /dev/null
+++ b/releasenotes/source/conf.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'oslosphinx',
+ 'reno.sphinxext',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'tripleo-heat-templates Release Notes'
+copyright = u'2017, TripleO Developers'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The full version, including alpha/beta/rc tags.
+release = '6.0.0.0b3'
+# The short X.Y version.
+version = '6.0.0'
+
+# The full version, including alpha/beta/rc tags.
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'tripleo-heat-templatesReleaseNotesdoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ ('index', 'tripleo-heat-templatesReleaseNotes.tex', u'tripleo-heat-templates Release Notes Documentation',
+ u'2016, TripleO Developers', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'tripleo-heat-templatesreleasenotes', u'tripleo-heat-templates Release Notes Documentation',
+ [u'2016, TripleO Developers'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'tripleo-heat-templatesReleaseNotes', u'tripleo-heat-templates Release Notes Documentation',
+ u'2016, TripleO Developers', 'tripleo-heat-templatesReleaseNotes', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+# -- Options for Internationalization output ------------------------------
+locale_dirs = ['locale/']
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
new file mode 100644
index 00000000..43c77709
--- /dev/null
+++ b/releasenotes/source/index.rst
@@ -0,0 +1,19 @@
+================================================
+Welcome to tripleo-heat-templates Release Notes!
+================================================
+
+Contents
+========
+
+.. toctree::
+ :maxdepth: 2
+
+ unreleased
+ ocata
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`search`
diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst
new file mode 100644
index 00000000..ebe62f42
--- /dev/null
+++ b/releasenotes/source/ocata.rst
@@ -0,0 +1,6 @@
+===================================
+ Ocata Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: origin/stable/ocata
diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst
new file mode 100644
index 00000000..2334dd5c
--- /dev/null
+++ b/releasenotes/source/unreleased.rst
@@ -0,0 +1,5 @@
+==============================
+ Current Series Release Notes
+==============================
+
+ .. release-notes::
diff --git a/requirements.txt b/requirements.txt
index 9c4a708a..057aa287 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,6 @@
-pbr>=0.5.21,<1.0
-Jinja2>=2.8 # BSD License (3 clause)
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+pbr>=1.8 # Apache-2.0
+Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
+six>=1.9.0 # MIT
diff --git a/roles_data.yaml b/roles_data.yaml
index 81ddf9ca..9e3b0a18 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -14,31 +14,44 @@
# defaults to '%stackname%-{{role.name.lower()}}-%index%'
# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
#
+# disable_constraints: (boolean) optional, whether to disable Nova and Glance
+# constraints for each role specified in the templates.
+#
+# disable_upgrade_deployment: (boolean) optional, whether to run the
+# ansible upgrade steps for all services that are deployed on the role. If set
+# to True, the operator will drive the upgrade for this role's nodes.
+#
+# upgrade_batch_size: (number): batch size for upgrades where tasks are
+# specified by services to run in batches vs all nodes at once.
+# This defaults to 1, but larger batches may be specified here.
+#
# ServicesDefault: (list) optional default list of services to be deployed
# on the role, defaults to an empty list. Sets the default for the
# {{role.name}}Services parameter in overcloud.yaml
-- name: Controller
+- name: Controller # the 'primary' role goes first
CountDefault: 1
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::CephRbdMirror
- OS::TripleO::Services::CephRgw
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
- - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Congress
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- - OS::TripleO::Services::GlanceRegistry
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -54,15 +67,18 @@
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::MongoDb
- OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::NovaConsoleauth
- OS::TripleO::Services::NovaVncProxy
+ - OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftProxy
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::CeilometerApi
- OS::TripleO::Services::CeilometerCollector
@@ -94,13 +110,26 @@
- OS::TripleO::Services::OpenDaylightOvs
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::Collectd
- OS::TripleO::Services::BarbicanApi
- OS::TripleO::Services::PankoApi
+ - OS::TripleO::Services::Tacker
- OS::TripleO::Services::Zaqar
+ - OS::TripleO::Services::OVNDBs
+ - OS::TripleO::Services::NeutronML2FujitsuCfab
+ - OS::TripleO::Services::NeutronML2FujitsuFossw
+ - OS::TripleO::Services::CinderHPELeftHandISCSI
+ - OS::TripleO::Services::Etcd
+ - OS::TripleO::Services::AuditD
+ - OS::TripleO::Services::OctaviaApi
+ - OS::TripleO::Services::OctaviaHealthManager
+ - OS::TripleO::Services::OctaviaHousekeeping
+ - OS::TripleO::Services::OctaviaWorker
- name: Compute
CountDefault: 1
HostnameFormatDefault: '%stackname%-novacompute-%index%'
+ disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephClient
@@ -108,6 +137,7 @@
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Kernel
@@ -122,6 +152,8 @@
- OS::TripleO::Services::OpenDaylightOvs
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::AuditD
+ - OS::TripleO::Services::Collectd
- name: BlockStorage
ServicesDefault:
@@ -131,12 +163,16 @@
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::AuditD
+ - OS::TripleO::Services::Collectd
- name: ObjectStorage
+ disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::Kernel
@@ -144,11 +180,14 @@
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::AuditD
+ - OS::TripleO::Services::Collectd
- name: CephStorage
ServicesDefault:
@@ -157,8 +196,11 @@
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::AuditD
+ - OS::TripleO::Services::Collectd
diff --git a/roles_data_undercloud.yaml b/roles_data_undercloud.yaml
new file mode 100644
index 00000000..2759429c
--- /dev/null
+++ b/roles_data_undercloud.yaml
@@ -0,0 +1,35 @@
+- name: Undercloud # the 'primary' role goes first
+ CountDefault: 1
+ disable_constraints: True
+ ServicesDefault:
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MongoDb
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::Apache
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::MistralEngine
+ - OS::TripleO::Services::MistralApi
+ - OS::TripleO::Services::MistralExecutor
+ - OS::TripleO::Services::IronicApi
+ - OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::NovaIronic
+ - OS::TripleO::Services::Zaqar
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronApi
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::NeutronDhcpAgent
diff --git a/scripts/hosts-config.sh b/scripts/hosts-config.sh
index 4826d615..f456b316 100755
--- a/scripts/hosts-config.sh
+++ b/scripts/hosts-config.sh
@@ -30,17 +30,9 @@ write_entries() {
}
if [ ! -z "$hosts" ]; then
- # cloud-init files are /etc/cloud/templates/hosts.OSNAME.tmpl
- DIST=$(lsb_release -is | tr -s [A-Z] [a-z])
- case $DIST in
- fedora|redhatenterpriseserver)
- name="redhat"
- ;;
- *)
- name="$DIST"
- ;;
- esac
- write_entries "/etc/cloud/templates/hosts.${name}.tmpl" "$hosts"
+ for tmpl in /etc/cloud/templates/hosts.*.tmpl ; do
+ write_entries "$tmpl" "$hosts"
+ done
write_entries "/etc/hosts" "$hosts"
else
echo "No hosts in Heat, nothing written."
diff --git a/setup.py b/setup.py
index 70c2b3f3..782bb21f 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,6 +16,14 @@
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
setuptools.setup(
- setup_requires=['pbr'],
+ setup_requires=['pbr>=1.8'],
pbr=True)
diff --git a/test-requirements.txt b/test-requirements.txt
index c3726e8b..1c9e3b42 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1 +1,9 @@
-pyyaml
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+PyYAML>=3.10.0 # MIT
+Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
+six>=1.9.0 # MIT
+sphinx>=1.5.1 # BSD
+oslosphinx>=4.7.0 # Apache-2.0
+reno>=1.8.0 # Apache-2.0
diff --git a/tools/process-templates.py b/tools/process-templates.py
index a15b00e2..1c8c4ba6 100755
--- a/tools/process-templates.py
+++ b/tools/process-templates.py
@@ -14,9 +14,13 @@
import argparse
import jinja2
import os
+import shutil
+import six
import sys
import yaml
+__tht_root_dir = os.path.dirname(os.path.dirname(__file__))
+
def parse_opts(argv):
parser = argparse.ArgumentParser(
@@ -32,6 +36,9 @@ def parse_opts(argv):
action='store_true',
help="""Enable safe mode (do not overwrite files).""",
default=False)
+ parser.add_argument('-o', '--output-dir', metavar='OUTPUT_DIR',
+ help="""Output dir for all the templates""",
+ default='')
opts = parser.parse_args(argv[1:])
return opts
@@ -46,9 +53,14 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None,
print('ERROR: path already exists for file: %s' % outfile_name)
sys.exit(1)
+ # Search for templates relative to the current template path first
+ template_base = os.path.dirname(yaml_f)
+ j2_loader = jinja2.loaders.FileSystemLoader([template_base, __tht_root_dir])
+
try:
# Render the j2 template
- template = jinja2.Environment().from_string(j2_template)
+ template = jinja2.Environment(loader=j2_loader).from_string(
+ j2_template)
r_template = template.render(**j2_data)
except jinja2.exceptions.TemplateError as ex:
error_msg = ("Error rendering template %s : %s"
@@ -59,7 +71,7 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None,
out_f.write(r_template)
-def process_templates(template_path, role_data_path, overwrite):
+def process_templates(template_path, role_data_path, output_dir, overwrite):
with open(role_data_path) as role_data_file:
role_data = yaml.safe_load(role_data_file)
@@ -68,6 +80,11 @@ def process_templates(template_path, role_data_path, overwrite):
with open(j2_excludes_path) as role_data_file:
j2_excludes = yaml.safe_load(role_data_file)
+ if output_dir and not os.path.isdir(output_dir):
+ if os.path.exists(output_dir):
+ raise RuntimeError('Output dir %s is not a directory' % output_dir)
+ os.mkdir(output_dir)
+
role_names = [r.get('name') for r in role_data]
r_map = {}
for r in role_data:
@@ -77,6 +94,29 @@ def process_templates(template_path, role_data_path, overwrite):
if os.path.isdir(template_path):
for subdir, dirs, files in os.walk(template_path):
+
+ # NOTE(flaper87): Ignore hidden dirs as we don't
+ # generate templates for those.
+ # Note the slice assigment for `dirs` is necessary
+ # because we need to modify the *elements* in the
+ # dirs list rather than the reference to the list.
+ # This way we'll make sure os.walk will iterate over
+ # the shrunk list. os.walk doesn't have an API for
+ # filtering dirs at this point.
+ dirs[:] = [d for d in dirs if not d[0] == '.']
+ files = [f for f in files if not f[0] == '.']
+
+ # NOTE(flaper87): We could have used shutil.copytree
+ # but it requires the dst dir to not be present. This
+ # approach is safer as it doesn't require us to delete
+ # the output_dir in advance and it allows for running
+ # the command multiple times with the same output_dir.
+ out_dir = subdir
+ if output_dir:
+ out_dir = os.path.join(output_dir, subdir)
+ if not os.path.exists(out_dir):
+ os.mkdir(out_dir)
+
for f in files:
file_path = os.path.join(subdir, f)
# We do two templating passes here:
@@ -100,7 +140,7 @@ def process_templates(template_path, role_data_path, overwrite):
[role.lower(),
os.path.basename(f).replace('.role.j2.yaml',
'.yaml')])
- out_f_path = os.path.join(subdir, out_f)
+ out_f_path = os.path.join(out_dir, out_f)
if not (out_f_path in excl_templates):
_j2_render_to_file(template_data, j2_data,
out_f_path, overwrite)
@@ -111,9 +151,12 @@ def process_templates(template_path, role_data_path, overwrite):
with open(file_path) as j2_template:
template_data = j2_template.read()
j2_data = {'roles': role_data}
- out_f = file_path.replace('.j2.yaml', '.yaml')
- _j2_render_to_file(template_data, j2_data, out_f,
+ out_f = os.path.basename(f).replace('.j2.yaml', '.yaml')
+ out_f_path = os.path.join(out_dir, out_f)
+ _j2_render_to_file(template_data, j2_data, out_f_path,
overwrite)
+ elif output_dir:
+ shutil.copy(os.path.join(subdir, f), out_dir)
else:
print('Unexpected argument %s' % template_path)
@@ -122,4 +165,4 @@ opts = parse_opts(sys.argv)
role_data_path = os.path.join(opts.base_path, opts.roles_data)
-process_templates(opts.base_path, role_data_path, (not opts.safe))
+process_templates(opts.base_path, role_data_path, opts.output_dir, (not opts.safe))
diff --git a/tools/releasenotes_tox.sh b/tools/releasenotes_tox.sh
new file mode 100755
index 00000000..4fecfd92
--- /dev/null
+++ b/tools/releasenotes_tox.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+rm -rf releasenotes/build
+
+sphinx-build -a -E -W \
+ -d releasenotes/build/doctrees \
+ -b html \
+ releasenotes/source releasenotes/build/html
+BUILD_RESULT=$?
+
+UNCOMMITTED_NOTES=$(git status --porcelain | \
+ awk '$1 == "M" && $2 ~ /releasenotes\/notes/ {print $2}')
+
+if [ "${UNCOMMITTED_NOTES}" ]
+then
+ cat <<EOF
+
+REMINDER: The following changes to release notes have not been committed:
+
+${UNCOMMITTED_NOTES}
+
+While that may be intentional, keep in mind that release notes are built from
+committed changes, not the working directory.
+
+EOF
+fi
+
+exit ${BUILD_RESULT}
diff --git a/tools/tox_install.sh b/tools/tox_install.sh
new file mode 100755
index 00000000..e61b63a8
--- /dev/null
+++ b/tools/tox_install.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+# Client constraint file contains this client version pin that is in conflict
+# with installing the client from source. We should remove the version pin in
+# the constraints file before applying it for from-source installation.
+
+CONSTRAINTS_FILE="$1"
+shift 1
+
+set -e
+
+# NOTE(tonyb): Place this in the tox enviroment's log dir so it will get
+# published to logs.openstack.org for easy debugging.
+localfile="$VIRTUAL_ENV/log/upper-constraints.txt"
+
+if [[ "$CONSTRAINTS_FILE" != http* ]]; then
+ CONSTRAINTS_FILE="file://$CONSTRAINTS_FILE"
+fi
+# NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep
+curl "$CONSTRAINTS_FILE" --insecure --progress-bar --output "$localfile"
+
+pip install -c"$localfile" openstack-requirements
+
+# This is the main purpose of the script: Allow local installation of
+# the current repo. It is listed in constraints file and thus any
+# install will be constrained and we need to unconstrain it.
+edit-constraints "$localfile" -- "$CLIENT_NAME"
+
+pip install -c"$localfile" -U "$@"
+exit $?
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index 95c7d025..1d0dba02 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -19,11 +19,81 @@ import yaml
required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords']
+envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
+ 'tls-endpoints-public-ip.yaml',
+ 'tls-everywhere-endpoints-dns.yaml']
+ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
+
def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0])
sys.exit(1)
+def get_base_endpoint_map(filename):
+ try:
+ tpl = yaml.load(open(filename).read())
+ return tpl['parameters']['EndpointMap']['default']
+ except Exception:
+ print(traceback.format_exc())
+ return None
+
+
+def get_endpoint_map_from_env(filename):
+ try:
+ tpl = yaml.load(open(filename).read())
+ return {
+ 'file': filename,
+ 'map': tpl['parameter_defaults']['EndpointMap']
+ }
+ except Exception:
+ print(traceback.format_exc())
+ return None
+
+
+def validate_endpoint_map(base_map, env_map):
+ return sorted(base_map.keys()) == sorted(env_map.keys())
+
+
+def validate_mysql_connection(settings):
+ no_op = lambda *args: False
+ error_status = [0]
+
+ def mysql_protocol(items):
+ return items == ['EndpointMap', 'MysqlInternal', 'protocol']
+
+ def client_bind_address(item):
+ return 'read_default_file' in item and \
+ 'read_default_group' in item
+
+ def validate_mysql_uri(key, items):
+ # Only consider a connection if it targets mysql
+ if key.endswith('connection') and \
+ search(items, mysql_protocol, no_op):
+ # Assume the "bind_address" option is one of
+ # the token that made up the uri
+ if not search(items, client_bind_address, no_op):
+ error_status[0] = 1
+ return False
+
+ def search(item, check_item, check_key):
+ if check_item(item):
+ return True
+ elif isinstance(item, list):
+ for i in item:
+ if search(i, check_item, check_key):
+ return True
+ elif isinstance(item, dict):
+ for k in item.keys():
+ if check_key(k, item[k]):
+ return True
+ elif search(item[k], check_item, check_key):
+ return True
+ return False
+
+ search(settings, no_op, validate_mysql_uri)
+ return error_status[0]
+
+
def validate_service(filename, tpl):
if 'outputs' in tpl and 'role_data' in tpl['outputs']:
if 'value' not in tpl['outputs']['role_data']:
@@ -41,6 +111,12 @@ def validate_service(filename, tpl):
print('ERROR: service_name should match file name for service: %s.'
% filename)
return 1
+ # if service connects to mysql, the uri should use option
+ # bind_address to avoid issues with VIP failover
+ if 'config_settings' in role_data and \
+ validate_mysql_connection(role_data['config_settings']):
+ print('ERROR: mysql connection uri should use option bind_address')
+ return 1
if 'parameters' in tpl:
for param in required_params:
if param not in tpl['parameters']:
@@ -56,6 +132,13 @@ def validate(filename):
try:
tpl = yaml.load(open(filename).read())
+ # The template alias version should be used instead a date, this validation
+ # will be applied to all templates not just for those in the services folder.
+ if 'heat_template_version' in tpl and not str(tpl['heat_template_version']).isalpha():
+ print('ERROR: heat_template_version needs to be the release alias not a date: %s'
+ % filename)
+ return 1
+
if (filename.startswith('./puppet/services/') and
filename != './puppet/services/services.yaml'):
retval = validate_service(filename, tpl)
@@ -83,6 +166,8 @@ if len(sys.argv) < 2:
path_args = sys.argv[1:]
exit_val = 0
failed_files = []
+base_endpoint_map = None
+env_endpoint_maps = list()
for base_path in path_args:
if os.path.isdir(base_path):
@@ -94,6 +179,12 @@ for base_path in path_args:
if failed:
failed_files.append(file_path)
exit_val |= failed
+ if f == ENDPOINT_MAP_FILE:
+ base_endpoint_map = get_base_endpoint_map(file_path)
+ if f in envs_containing_endpoint_map:
+ env_endpoint_map = get_endpoint_map_from_env(file_path)
+ if env_endpoint_map:
+ env_endpoint_maps.append(env_endpoint_map)
elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
failed = validate(base_path)
if failed:
@@ -103,6 +194,30 @@ for base_path in path_args:
print('Unexpected argument %s' % base_path)
exit_usage()
+if base_endpoint_map and \
+ len(env_endpoint_maps) == len(envs_containing_endpoint_map):
+ for env_endpoint_map in env_endpoint_maps:
+ matches = validate_endpoint_map(base_endpoint_map,
+ env_endpoint_map['map'])
+ if not matches:
+ print("ERROR: %s needs to be updated to match changes in base "
+ "endpoint map" % env_endpoint_map['file'])
+ failed_files.append(env_endpoint_map['file'])
+ exit_val |= 1
+ else:
+ print("%s matches base endpoint map" % env_endpoint_map['file'])
+else:
+ print("ERROR: Can't validate endpoint maps since a file is missing. "
+ "If you meant to delete one of these files you should update this "
+ "tool as well.")
+ if not base_endpoint_map:
+ failed_files.append(ENDPOINT_MAP_FILE)
+ if len(env_endpoint_maps) != len(envs_containing_endpoint_map):
+ matched_files = set(os.path.basename(matched_env_file['file'])
+ for matched_env_file in env_endpoint_maps)
+ failed_files.extend(set(envs_containing_endpoint_map) - matched_files)
+ exit_val |= 1
+
if failed_files:
print('Validation failed on:')
for f in failed_files:
diff --git a/tox.ini b/tox.ini
index c8a912b8..3796a546 100644
--- a/tox.ini
+++ b/tox.ini
@@ -13,7 +13,11 @@ commands = {posargs}
[testenv:pep8]
commands =
python ./tools/process-templates.py
+ python ./network/endpoints/build_endpoint_map.py --check
python ./tools/yaml-validate.py .
[testenv:templates]
commands = python ./tools/process-templates.py
+
+[testenv:releasenotes]
+commands = bash -c tools/releasenotes_tox.sh