diff options
66 files changed, 4323 insertions, 785 deletions
diff --git a/capabilities_map.yaml b/capabilities-map.yaml index 30ee211e..30ee211e 100644 --- a/capabilities_map.yaml +++ b/capabilities-map.yaml diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml index 82572e77..8f9e9627 100644 --- a/docker/compute-post.yaml +++ b/docker/compute-post.yaml @@ -191,7 +191,6 @@ resources: list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerOvsVswitchdImage} ] - container_name: ovs-vswitchd net: host privileged: true restart: always @@ -207,7 +206,6 @@ resources: list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchDBImage} ] - container_name: ovsdb-server net: host restart: always volumes: @@ -265,7 +263,6 @@ resources: list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ] - container_name: libvirt net: host pid: host privileged: true @@ -299,7 +296,6 @@ resources: list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ] - container_name: openvswitch net: host privileged: true restart: always @@ -320,7 +316,6 @@ resources: list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ] - container_name: neutronagent net: host pid: host privileged: true @@ -345,7 +340,6 @@ resources: list_join: - '/' - [ {get_param: DockerNamespace}, {get_param: DockerComputeImage} ] - container_name: novacompute net: host privileged: true restart: always diff --git a/docker/firstboot/install_docker_agents.yaml b/docker/firstboot/install_docker_agents.yaml index 348c1755..2858552f 100644 --- a/docker/firstboot/install_docker_agents.yaml +++ b/docker/firstboot/install_docker_agents.yaml @@ -3,7 +3,7 @@ heat_template_version: 2014-10-16 parameters: DockerAgentImage: type: string - default: tripleoupstream/heat-docker-agents + default: heat-docker-agents DockerNamespace: type: string default: kollaglue @@ -26,7 +26,10 @@ resources: config: str_replace: params: - $agent_image: {get_param: DockerAgentImage} + $agent_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerAgentImage} ] $docker_registry: {get_param: DockerNamespace} $docker_namespace_is_registry: {get_param: DockerNamespaceIsRegistry} template: {get_file: ./start_docker_agents.sh} diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh index a0e95d11..963c7eee 100644 --- a/docker/firstboot/start_docker_agents.sh +++ b/docker/firstboot/start_docker_agents.sh @@ -39,14 +39,15 @@ EOF # Local docker registry 1.8 if [ $docker_namespace_is_registry ]; then - /bin/sed -i "s/# INSECURE_REGISTRY='--insecure-registry '/INSECURE_REGISTRY='--insecure-registry $docker_registry'/g" /etc/sysconfig/docker + # if namespace is used with local registry, trim all namespacing + trim_var=$docker_registry + registry_host="${trim_var%%/*}" + /bin/sed -i "s/# INSECURE_REGISTRY='--insecure-registry'/INSECURE_REGISTRY='--insecure-registry $registry_host'/g" /etc/sysconfig/docker fi /sbin/setenforce 0 /sbin/modprobe ebtables -echo nameserver 8.8.8.8 > /etc/resolv.conf - # We need hostname -f to return in a centos container for the puppet hook HOSTNAME=$(hostname) echo "127.0.0.1 $HOSTNAME.localdomain $HOSTNAME" >> /etc/hosts diff --git a/environments/docker.yaml b/environments/docker.yaml index 6376b749..7c6dc407 100644 --- a/environments/docker.yaml +++ b/environments/docker.yaml @@ -8,7 +8,7 @@ parameters: parameter_defaults: # Defaults to 'tripleoupstream'. Specify a local docker registry - # Example: 192.168.122.131:8787 + # Example: 192.0.2.1:8787/tripleoupstream DockerNamespace: tripleoupstream # Enable local Docker registry DockerNamespaceIsRegistry: false diff --git a/environments/enable-tls.yaml b/environments/enable-tls.yaml index bc4d1bef..5794c6b4 100644 --- a/environments/enable-tls.yaml +++ b/environments/enable-tls.yaml @@ -33,6 +33,9 @@ parameter_defaults: NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'} NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'} NovaEC2Public: {protocol: 'https', port: '13773', host: 'CLOUDNAME'} + NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'} + NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'} + NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'} SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'} diff --git a/environments/major-upgrade-pacemaker-converge.yaml b/environments/major-upgrade-pacemaker-converge.yaml new file mode 100644 index 00000000..f023cb32 --- /dev/null +++ b/environments/major-upgrade-pacemaker-converge.yaml @@ -0,0 +1,2 @@ +parameter_defaults: + UpgradeLevelNovaCompute: '' diff --git a/environments/major-upgrade-pacemaker.yaml b/environments/major-upgrade-pacemaker.yaml new file mode 100644 index 00000000..61186bb0 --- /dev/null +++ b/environments/major-upgrade-pacemaker.yaml @@ -0,0 +1,11 @@ +parameter_defaults: + UpgradeLevelNovaCompute: liberty + +resource_registry: + OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker.yaml + OS::TripleO::Tasks::PackageUpdate: ../extraconfig/tasks/yum_update_noop.yaml + OS::TripleO::ControllerPostDeployment: OS::Heat::None + OS::TripleO::ComputePostDeployment: OS::Heat::None + OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None + OS::TripleO::BlockStoragePostDeployment: OS::Heat::None + OS::TripleO::CephStoragePostDeployment: OS::Heat::None diff --git a/environments/net-single-nic-linux-bridge-with-vlans.yaml b/environments/net-single-nic-linux-bridge-with-vlans.yaml new file mode 100644 index 00000000..d5f2ed62 --- /dev/null +++ b/environments/net-single-nic-linux-bridge-with-vlans.yaml @@ -0,0 +1,23 @@ +# This template configures each role to use Vlans on a single nic for +# each isolated network. +# This template assumes use of network-isolation.yaml. +# +# FIXME: if/when we add functionality to heatclient to include heat +# environment files we should think about using it here to automatically +# include network-isolation.yaml. +resource_registry: + OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml + OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/controller.yaml + OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/swift-storage.yaml + OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml + +# We use parameter_defaults instead of parameters here because Tuskar munges +# the names of top level and role level parameters with the role name and a +# version. Using parameter_defaults makes it such that if the parameter name is +# not defined in the template, we don't get an error. +parameter_defaults: + # This sets 'external_network_bridge' in l3_agent.ini to an empty string + # so that external networks act like provider bridge networks (they + # will plug into br-int instead of br-ex) + NeutronExternalNetworkBridge: "''" diff --git a/environments/network-environment.yaml b/environments/network-environment.yaml new file mode 100644 index 00000000..005310c7 --- /dev/null +++ b/environments/network-environment.yaml @@ -0,0 +1,50 @@ +#This file is an example of an environment file for defining the isolated +#networks and related parameters. +resource_registry: + # Network Interface templates to use (these files must exist) + OS::TripleO::BlockStorage::Net::SoftwareConfig: + ../network/config/single-nic-vlans/cinder-storage.yaml + OS::TripleO::Compute::Net::SoftwareConfig: + ../network/config/single-nic-vlans/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: + ../network/config/single-nic-vlans/controller.yaml + OS::TripleO::ObjectStorage::Net::SoftwareConfig: + ../network/config/single-nic-vlans/swift-storage.yaml + OS::TripleO::CephStorage::Net::SoftwareConfig: + ../network/config/single-nic-vlans/ceph-storage.yaml + +parameter_defaults: + # This section is where deployment-specific configuration is done + # Customize the IP subnets to match the local environment + InternalApiNetCidr: 172.17.0.0/24 + StorageNetCidr: 172.18.0.0/24 + StorageMgmtNetCidr: 172.19.0.0/24 + TenantNetCidr: 172.16.0.0/24 + ExternalNetCidr: 10.0.0.0/24 + # CIDR subnet mask length for provisioning network + ControlPlaneSubnetCidr: 24 + # Customize the IP ranges on each network to use for static IPs and VIPs + InternalApiAllocationPools: [{'start': '172.17.0.10', 'end': '172.17.0.200'}] + StorageAllocationPools: [{'start': '172.18.0.10', 'end': '172.18.0.200'}] + StorageMgmtAllocationPools: [{'start': '172.19.0.10', 'end': '172.19.0.200'}] + TenantAllocationPools: [{'start': '172.16.0.10', 'end': '172.16.0.200'}] + # Leave room if the external network is also used for floating IPs + ExternalAllocationPools: [{'start': '10.0.0.10', 'end': '10.0.0.50'}] + # Gateway router for the external network + ExternalInterfaceDefaultRoute: 10.0.0.1 + # Gateway router for the provisioning network (or Undercloud IP) + ControlPlaneDefaultRoute: 192.0.2.254 + # Generally the IP of the Undercloud + EC2MetadataIp: 192.0.2.1 + # Define the DNS servers (maximum 2) for the overcloud nodes + DnsServers: ["8.8.8.8","8.8.4.4"] + # Customize the VLAN IDs to match the local environment + InternalApiNetworkVlanID: 10 + StorageNetworkVlanID: 20 + StorageMgmtNetworkVlanID: 30 + TenantNetworkVlanID: 40 + ExternalNetworkVlanID: 50 + # Set to empty string to enable multiple external networks or VLANs + NeutronExternalNetworkBridge: "''" + # Customize bonding options, e.g. "mode=4 lacp_rate=1 updelay=1000 miimon=100" + BondInterfaceOvsOptions: "mode=active-backup" diff --git a/environments/neutron-ml2-bigswitch.yaml b/environments/neutron-ml2-bigswitch.yaml index 69c91326..750d3c4e 100644 --- a/environments/neutron-ml2-bigswitch.yaml +++ b/environments/neutron-ml2-bigswitch.yaml @@ -2,11 +2,13 @@ # extensions, configured via puppet resource_registry: OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml + OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml parameter_defaults: # Required to fill in: NeutronBigswitchRestproxyServers: NeutronBigswitchRestproxyServerAuth: + NeutronMechanismDrivers: bsn_ml2 # Optional: # NeutronBigswitchRestproxyAutoSyncOnFailure: @@ -14,4 +16,6 @@ parameter_defaults: # NeutronBigswitchRestproxyNeutronId: # NeutronBigswitchRestproxyServerSsl: # NeutronBigswitchRestproxySslCertDirectory: + # NeutronBigswitchAgentEnabled: + # NeutronBigswitchLLDPEnabled: diff --git a/environments/neutron-ml2-cisco-nexus-ucsm.yaml b/environments/neutron-ml2-cisco-nexus-ucsm.yaml index 5a1a32a3..ad111757 100644 --- a/environments/neutron-ml2-cisco-nexus-ucsm.yaml +++ b/environments/neutron-ml2-cisco-nexus-ucsm.yaml @@ -19,7 +19,7 @@ parameter_defaults: NetworkNexusSwitchReplayCount: 3 NetworkNexusProviderVlanAutoCreate: 'true' NetworkNexusProviderVlanAutoTrunk: 'true' - NetworkNexusVxlanGlobalConfig: 'true' + NetworkNexusVxlanGlobalConfig: 'false' NetworkNexusHostKeyChecks: 'false' NetworkNexusVxlanVniRanges: '0:0' NetworkNexusVxlanMcastRanges: '0.0.0.0:0.0.0.0' diff --git a/environments/updates/README.md b/environments/updates/README.md index 8c03411d..426d7329 100644 --- a/environments/updates/README.md +++ b/environments/updates/README.md @@ -7,3 +7,6 @@ Contents **update-from-keystone-admin-internal-api.yaml** To be used if the Keystone Admin API was originally deployed on the Internal API network. + +**update-from-publicvip-on-ctlplane.yaml** + To be used if the PublicVirtualIP resource was deployed as an additional VIP on the 'ctlplane'. diff --git a/environments/updates/update-from-publicvip-on-ctlplane.yaml b/environments/updates/update-from-publicvip-on-ctlplane.yaml new file mode 100644 index 00000000..c62428b4 --- /dev/null +++ b/environments/updates/update-from-publicvip-on-ctlplane.yaml @@ -0,0 +1,2 @@ +resource_registry: + OS::TripleO::Network::Ports::ExternalVipPort: ../../network/ports/ctlplane_vip.yaml
\ No newline at end of file diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh new file mode 100644 index 00000000..2f19d573 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_compute.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# This delivers the compute upgrade script to be invoked as part of the tripleo +# major upgrade workflow. +# +set -eu + +UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh + +cat > $UPGRADE_SCRIPT << ENDOFCAT +### DO NOT MODIFY THIS FILE +### This file is automatically delivered to the compute nodes as part of the +### tripleo upgrades workflow + +# pin nova to kilo (messaging +-1) for the nova-compute service + +crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute +yum -y update + +ENDOFCAT + +# ensure the permissions are OK +chmod 0755 $UPGRADE_SCRIPT + diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh new file mode 100755 index 00000000..5604bb0d --- /dev/null +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +set -eu + +cluster_sync_timeout=600 + +if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then + echo_error "ERROR: upgrade cannot start with some cluster nodes being offline" + exit 1 +fi + +if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then + pcs resource disable httpd + check_resource httpd stopped 1800 + if pcs status | grep openstack-keystone; then + pcs resource disable openstack-keystone + check_resource openstack-keystone stopped 1800 + fi + pcs resource disable redis + check_resource redis stopped 600 + pcs resource disable mongod + check_resource mongod stopped 600 + pcs resource disable rabbitmq + check_resource rabbitmq stopped 600 + pcs resource disable memcached + check_resource memcached stopped 600 + pcs resource disable galera + check_resource galera stopped 600 + pcs cluster stop --all +fi + +# Swift isn't controled by pacemaker +for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \ +openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \ +openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy; do + systemctl stop $S +done + +tstart=$(date +%s) +while systemctl is-active pacemaker; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > cluster_sync_timeout )) ; then + echo_error "ERROR: cluster shutdown timed out" + exit 1 + fi +done + +yum update -y + +# Pin messages sent to compute nodes to kilo, these will be upgraded later +crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute" +# https://bugzilla.redhat.com/show_bug.cgi?id=1284047 +# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435 +crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit +# https://bugzilla.redhat.com/show_bug.cgi?id=1284058 +# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists +crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server" diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh new file mode 100755 index 00000000..0b92a3bb --- /dev/null +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +set -eu + +cluster_form_timeout=600 +cluster_settle_timeout=600 +galera_sync_timeout=600 + +if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then + pcs cluster start --all + + tstart=$(date +%s) + while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > cluster_form_timeout )) ; then + echo_error "ERROR: timed out forming the cluster" + exit 1 + fi + done + + if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then + echo_error "ERROR: timed out waiting for cluster to finish transition" + exit 1 + fi + + pcs resource enable galera + check_resource galera started 600 + pcs resource enable mongod + check_resource mongod started 600 + + tstart=$(date +%s) + while ! clustercheck; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > galera_sync_timeout )) ; then + echo_error "ERROR galera sync timed out" + exit 1 + fi + done + + # Run all the db syncs + # TODO: check if this can be triggered in puppet and removed from here + ceilometer-dbsync --config-file=/etc/ceilometer/ceilometer.conf + cinder-manage db sync + glance-manage --config-file=/etc/glance/glance-registry.conf db_sync + heat-manage --config-file /etc/heat/heat.conf db_sync + keystone-manage db_sync + neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head + nova-manage db sync + + pcs resource enable memcached + check_resource memcached started 600 + pcs resource enable rabbitmq + check_resource rabbitmq started 600 + pcs resource enable redis + check_resource redis started 600 + if pcs status | grep openstack-keystone; then + pcs resource enable openstack-keystone + check_resource openstack-keystone started 1800 + fi + pcs resource enable httpd + check_resource httpd started 1800 +fi + +# Swift isn't controled by heat +for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \ +openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \ +openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy; do + systemctl start $S +done diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml new file mode 100644 index 00000000..5a11bae9 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml @@ -0,0 +1,94 @@ +heat_template_version: 2014-10-16 +description: 'Upgrade for Pacemaker deployments' + +parameters: + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json + input_values: + type: json + description: input values for the software deployments + + UpgradeLevelNovaCompute: + type: string + description: Nova Compute upgrade level + default: '' + +resources: + # TODO(jistr): for Mitaka->Newton upgrades and further we can use + # map_merge with input_values instead of feeding params into scripts + # via str_replace on bash snippets + + ControllerPacemakerUpgradeConfig_Step1: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - str_replace: + template: | + #!/bin/bash + upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' + params: + UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} + - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_controller_pacemaker_1.sh + + ControllerPacemakerUpgradeDeployment_Step1: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: controller_servers} + config: {get_resource: ControllerPacemakerUpgradeConfig_Step1} + input_values: {get_param: input_values} + + ControllerPacemakerUpgradeConfig_Step2: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_controller_pacemaker_2.sh + + ControllerPacemakerUpgradeDeployment_Step2: + type: OS::Heat::SoftwareDeploymentGroup + depends_on: ControllerPacemakerUpgradeDeployment_Step1 + properties: + servers: {get_param: controller_servers} + config: {get_resource: ControllerPacemakerUpgradeConfig_Step2} + input_values: {get_param: input_values} + + ComputeDeliverUpgradeConfig_Step3: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - str_replace: + template: | + #!/bin/bash + upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' + params: + UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} + - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_compute.sh + + ComputeDeliverUpgradeConfigDeployment_Step3: + type: OS::Heat::SoftwareDeploymentGroup + depends_on: ControllerPacemakerUpgradeDeployment_Step2 + properties: + servers: {get_param: compute_servers} + config: {get_resource: ComputeDeliverUpgradeConfig_Step3} + input_values: {get_param: input_values} + + diff --git a/extraconfig/tasks/noop.yaml b/extraconfig/tasks/noop.yaml index 0cff7469..dbb863be 100644 --- a/extraconfig/tasks/noop.yaml +++ b/extraconfig/tasks/noop.yaml @@ -4,6 +4,22 @@ description: 'No-op task' parameters: servers: type: json + default: [] + controller_servers: + type: json + default: [] + compute_servers: + type: json + default: [] + blockstorage_servers: + type: json + default: [] + objectstorage_servers: + type: json + default: [] + cephstorage_servers: + type: json + default: [] input_values: type: json default: {} diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh new file mode 100755 index 00000000..32d06c4a --- /dev/null +++ b/extraconfig/tasks/pacemaker_common_functions.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +set -eu + +function check_resource { + + if [ "$#" -ne 3 ]; then + echo_error "ERROR: check_resource function expects 3 parameters, $# given" + exit 1 + fi + + service=$1 + state=$2 + timeout=$3 + + if [ "$state" = "stopped" ]; then + match_for_incomplete='Started' + else # started + match_for_incomplete='Stopped' + fi + + if timeout -k 10 $timeout crm_resource --wait; then + node_states=$(pcs status --full | grep "$service" | grep -v Clone) + if echo "$node_states" | grep -q "$match_for_incomplete"; then + echo_error "ERROR: cluster finished transition but $service was not in $state state, exiting." + exit 1 + else + echo "$service has $state" + fi + else + echo_error "ERROR: cluster remained unstable for more than $timeout seconds, exiting." + exit 1 + fi + +} + +function echo_error { + echo "$@" | tee /dev/fd2 +} diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh index 12201097..b2bdc55a 100755 --- a/extraconfig/tasks/pacemaker_resource_restart.sh +++ b/extraconfig/tasks/pacemaker_resource_restart.sh @@ -3,38 +3,6 @@ set -eux pacemaker_status=$(systemctl is-active pacemaker) -check_interval=3 - -function check_resource { - - service=$1 - state=$2 - timeout=$3 - tstart=$(date +%s) - tend=$(( $tstart + $timeout )) - - if [ "$state" = "stopped" ]; then - match_for_incomplete='Started' - else # started - match_for_incomplete='Stopped' - fi - - while (( $(date +%s) < $tend )); do - node_states=$(pcs status --full | grep "$service" | grep -v Clone) - if echo "$node_states" | grep -q "$match_for_incomplete"; then - echo "$service not yet $state, sleeping $check_interval seconds." - sleep $check_interval - else - echo "$service has $state" - timeout -k 10 $timeout crm_resource --wait - return - fi - done - - echo "$service never $state after $timeout seconds" | tee /dev/fd/2 - exit 1 - -} # Run if pacemaker is running, we're the bootstrap node, # and we're updating the deployment (not creating). diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml index 7de41d94..fbed9ce5 100644 --- a/extraconfig/tasks/post_puppet_pacemaker.yaml +++ b/extraconfig/tasks/post_puppet_pacemaker.yaml @@ -33,7 +33,11 @@ resources: type: OS::Heat::SoftwareConfig properties: group: script - config: {get_file: pacemaker_resource_restart.sh} + config: + list_join: + - '' + - - get_file: pacemaker_common_functions.sh + - get_file: pacemaker_resource_restart.sh ControllerPostPuppetRestartDeployment: type: OS::Heat::SoftwareDeployments diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index 39179024..59e4be45 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -128,6 +128,9 @@ openstack-nova-scheduler" # mongod start timeout is higher, setting only stop timeout pcs -f $pacemaker_dumpfile resource update mongod op start timeout=370s op stop timeout=200s + echo "Making sure rabbitmq has the notify=true meta parameter" + pcs -f $pacemaker_dumpfile resource update rabbitmq meta notify=true + echo "Applying new Pacemaker config" if ! pcs cluster cib-push $pacemaker_dumpfile; then echo "ERROR failed to apply new pacemaker config" @@ -151,14 +154,14 @@ openstack-nova-scheduler" kill $(ps ax | grep -e "radvd.*\.pid\.radvd" | awk '{print $1}') 2>/dev/null || : else echo "Upgrading openstack-puppet-modules" - yum -y update openstack-puppet-modules + yum -q -y update openstack-puppet-modules echo "Upgrading other packages is handled by config management tooling" echo -n "true" > $heat_outputs_path.update_managed_packages exit 0 fi command=${command:-update} -full_command="yum -y $command $command_arguments" +full_command="yum -q -y $command $command_arguments" echo "Running: $full_command" result=$($full_command) diff --git a/extraconfig/tasks/yum_update_noop.yaml b/extraconfig/tasks/yum_update_noop.yaml new file mode 100644 index 00000000..b759d9c5 --- /dev/null +++ b/extraconfig/tasks/yum_update_noop.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2014-10-16 +description: 'No-op yum update task' + +resources: + + config: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: | + #!/bin/bash + echo -n "false" > $heat_outputs_path.update_managed_packages + inputs: + - name: update_identifier + description: yum will only run for previously unused values of update_identifier + default: '' + - name: command + description: yum sub-command to run, defaults to "update" + default: update + - name: command_arguments + description: yum command arguments, defaults to "" + default: '' + outputs: + - name: update_managed_packages + description: boolean value indicating whether to upgrade managed packages + +outputs: + OS::stack_id: + value: {get_resource: config} diff --git a/network/config/single-nic-linux-bridge-vlans/README.md b/network/config/single-nic-linux-bridge-vlans/README.md new file mode 100644 index 00000000..b7ded049 --- /dev/null +++ b/network/config/single-nic-linux-bridge-vlans/README.md @@ -0,0 +1,19 @@ +This directory contains Heat templates to help configure +Vlans on a single NICs for each Overcloud role. + +Configuration +------------- + +To make use of these templates create a Heat environment that looks +something like this: + + resource\_registry: + OS::TripleO::BlockStorage::Net::SoftwareConfig: network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml + OS::TripleO::Compute::Net::SoftwareConfig: network/config/single-nic-linux-bridge-vlans/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: network/config/single-nic-linux-bridge-vlans/controller.yaml + OS::TripleO::ObjectStorage::Net::SoftwareConfig: network/config/single-nic-linux-bridge-vlans/swift-storage.yaml + OS::TripleO::CephStorage::Net::SoftwareConfig: network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml + +Or use this Heat environment file: + + environments/net-single-nic-linux-bridge-with-vlans.yaml diff --git a/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml b/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml new file mode 100644 index 00000000..a46d7e20 --- /dev/null +++ b/network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml @@ -0,0 +1,106 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure VLANs for the + ceph storage role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: linux_bridge + name: br-storage + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + members: + - + type: interface + name: nic1 + # force the MAC address of the bridge to this interface + primary: true + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + device: br-storage + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageMgmtNetworkVlanID} + device: br-storage + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml b/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml new file mode 100644 index 00000000..08613a3b --- /dev/null +++ b/network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml @@ -0,0 +1,117 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure VLANs for the + cinder storage role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: linux_bridge + name: br-storage + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + members: + - + type: interface + name: nic1 + # force the MAC address of the bridge to this interface + primary: true + - + type: vlan + vlan_id: {get_param: InternalApiNetworkVlanID} + device: br-storage + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + device: br-storage + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageMgmtNetworkVlanID} + device: br-storage + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/single-nic-linux-bridge-vlans/compute.yaml b/network/config/single-nic-linux-bridge-vlans/compute.yaml new file mode 100644 index 00000000..b6522c67 --- /dev/null +++ b/network/config/single-nic-linux-bridge-vlans/compute.yaml @@ -0,0 +1,118 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure VLANs for the + compute role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: linux_bridge + name: {get_input: bridge_name} + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + ip_netmask: 0.0.0.0/0 + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + members: + - + type: interface + name: {get_input: interface_name} + # force the MAC address of the bridge to this interface + primary: true + - + type: vlan + vlan_id: {get_param: InternalApiNetworkVlanID} + device: {get_input: bridge_name} + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + device: {get_input: bridge_name} + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: TenantNetworkVlanID} + device: {get_input: bridge_name} + addresses: + - + ip_netmask: {get_param: TenantIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/single-nic-linux-bridge-vlans/controller.yaml b/network/config/single-nic-linux-bridge-vlans/controller.yaml new file mode 100644 index 00000000..72105481 --- /dev/null +++ b/network/config/single-nic-linux-bridge-vlans/controller.yaml @@ -0,0 +1,149 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure VLANs for the + controller role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: linux_bridge + name: {get_input: bridge_name} + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + ip_netmask: 0.0.0.0/0 + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + members: + - + type: interface + name: {get_input: interface_name} + # force the MAC address of the bridge to this interface + primary: true + - + type: vlan + vlan_id: {get_param: ExternalNetworkVlanID} + device: {get_input: bridge_name} + addresses: + - + ip_netmask: {get_param: ExternalIpSubnet} + routes: + - + ip_netmask: 0.0.0.0/0 + default: true + next_hop: {get_param: ExternalInterfaceDefaultRoute} + - + type: vlan + vlan_id: {get_param: InternalApiNetworkVlanID} + device: {get_input: bridge_name} + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + device: {get_input: bridge_name} + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageMgmtNetworkVlanID} + device: {get_input: bridge_name} + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + - + type: vlan + vlan_id: {get_param: TenantNetworkVlanID} + device: {get_input: bridge_name} + addresses: + - + ip_netmask: {get_param: TenantIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml b/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml new file mode 100644 index 00000000..962b9890 --- /dev/null +++ b/network/config/single-nic-linux-bridge-vlans/swift-storage.yaml @@ -0,0 +1,117 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure VLANs for the + swift storage role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: linux_bridge + name: br-storage + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + members: + - + type: interface + name: nic1 + # force the MAC address of the bridge to this interface + primary: true + - + type: vlan + vlan_id: {get_param: InternalApiNetworkVlanID} + device: br-storage + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + device: br-storage + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageMgmtNetworkVlanID} + device: br-storage + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/endpoints/build_endpoint_map.py b/network/endpoints/build_endpoint_map.py new file mode 100755 index 00000000..056d6889 --- /dev/null +++ b/network/endpoints/build_endpoint_map.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python + +""" +Generate the endpoint_map.yaml template from data in the endpoint_data.yaml +file. + +By default the files in the same directory as this script are operated on, but +different files can be optionally specified on the command line. + +The --check option verifies that the current output file is up-to-date with the +latest data in the input file. The script exits with status code 2 if a +mismatch is detected. +""" + +from __future__ import print_function + + +__all__ = ['load_endpoint_data', 'generate_endpoint_map_template', + 'write_template', 'build_endpoint_map', 'check_up_to_date'] + + +import collections +import copy +import itertools +import os +import sys +import yaml + + +(IN_FILE, OUT_FILE) = ('endpoint_data.yaml', 'endpoint_map.yaml') + +SUBST = (SUBST_IP_ADDRESS, SUBST_CLOUDNAME) = ('IP_ADDRESS', 'CLOUDNAME') +PARAMS = (PARAM_CLOUDNAME, PARAM_ENDPOINTMAP) = ('CloudName', 'EndpointMap') +FIELDS = (F_PORT, F_PROTOCOL, F_HOST) = ('port', 'protocol', 'host') + +ENDPOINT_TYPES = frozenset(['Internal', 'Public', 'Admin']) + + +def get_file(default_fn, override=None, writable=False): + if override == '-': + if writable: + return sys.stdout + else: + return sys.stdin + + if override is not None: + filename = override + else: + filename = os.path.join(os.path.dirname(__file__), default_fn) + + return open(filename, 'w' if writable else 'r') + + +def load_endpoint_data(infile=None): + with get_file(IN_FILE, infile) as f: + return yaml.safe_load(f) + + +def vip_param_name(endpoint_type_defn): + return endpoint_type_defn['vip_param'] + 'VirtualIP' + + +def vip_param_names(config): + def ep_types(svc): + return (v for k, v in svc.items() if k in ENDPOINT_TYPES or not k) + + return set(vip_param_name(defn) + for svc in config.values() for defn in ep_types(svc)) + + +def endpoint_map_default(config): + def map_item(ep_name, ep_type, svc): + values = collections.OrderedDict([ + (F_PROTOCOL, svc.get(F_PROTOCOL, 'http')), + (F_PORT, str(svc[ep_type].get(F_PORT, svc[F_PORT]))), + (F_HOST, SUBST_IP_ADDRESS), + ]) + return ep_name + ep_type, values + + return collections.OrderedDict(map_item(ep_name, ep_type, svc) + for ep_name, svc in sorted(config.items()) + for ep_type in sorted(set(svc) & + ENDPOINT_TYPES)) + + +def make_parameter(ptype, default, description=None): + param = collections.OrderedDict([('type', ptype), ('default', default)]) + if description is not None: + param['description'] = description + return param + + +def template_parameters(config): + params = collections.OrderedDict((n, make_parameter('string', '')) + for n in sorted(vip_param_names(config))) + + params[PARAM_ENDPOINTMAP] = make_parameter('json', + endpoint_map_default(config), + 'Mapping of service endpoint ' + '-> protocol. Typically set ' + 'via parameter_defaults in the ' + 'resource registry.') + + params[PARAM_CLOUDNAME] = make_parameter('string', + 'overcloud', + 'The DNS name of this cloud. ' + 'e.g. ci-overcloud.tripleo.org') + return params + + +def template_output_definition(endpoint_name, + endpoint_variant, + endpoint_type, + vip_param, + uri_suffix=None, + name_override=None): + def extract_field(field): + assert field in FIELDS + return {'get_param': ['EndpointMap', + endpoint_name + endpoint_type, + copy.copy(field)]} + + port = extract_field(F_PORT) + protocol = extract_field(F_PROTOCOL) + host = { + 'str_replace': collections.OrderedDict([ + ('template', extract_field(F_HOST)), + ('params', { + SUBST_IP_ADDRESS: {'get_param': vip_param}, + SUBST_CLOUDNAME: {'get_param': PARAM_CLOUDNAME}, + }) + ]) + } + uri_fields = [protocol, '://', copy.deepcopy(host), ':', port] + uri_fields_suffix = (copy.deepcopy(uri_fields) + + ([uri_suffix] if uri_suffix is not None else [])) + + name = name_override if name_override is not None else (endpoint_name + + endpoint_variant + + endpoint_type) + + return name, { + 'host': host, + 'port': extract_field('port'), + 'protocol': extract_field('protocol'), + 'uri': { + 'list_join': ['', uri_fields_suffix] + }, + 'uri_no_suffix': { + 'list_join': ['', uri_fields] + }, + } + + +def template_endpoint_items(config): + def get_svc_endpoints(ep_name, svc): + for ep_type in set(svc) & ENDPOINT_TYPES: + defn = svc[ep_type] + for variant, suffix in defn.get('uri_suffixes', + {'': None}).items(): + name_override = defn.get('names', {}).get(variant) + yield template_output_definition(ep_name, variant, ep_type, + vip_param_name(defn), + suffix, + name_override) + + return itertools.chain.from_iterable(sorted(get_svc_endpoints(ep_name, + svc)) + for (ep_name, + svc) in sorted(config.items())) + + +def generate_endpoint_map_template(config): + return collections.OrderedDict([ + ('heat_template_version', '2015-04-30'), + ('description', 'A map of OpenStack endpoints.'), + ('parameters', template_parameters(config)), + ('outputs', { + 'endpoint_map': { + 'value': + collections.OrderedDict(template_endpoint_items(config)) + } + }), + ]) + + +autogen_warning = """### DO NOT MODIFY THIS FILE +### This file is automatically generated from endpoint_data.yaml +### by the script build_endpoint_map.py + +""" + + +class TemplateDumper(yaml.SafeDumper): + def represent_ordered_dict(self, data): + return self.represent_dict(data.items()) + + +TemplateDumper.add_representer(collections.OrderedDict, + TemplateDumper.represent_ordered_dict) + + +def write_template(template, filename=None): + with get_file(OUT_FILE, filename, writable=True) as f: + f.write(autogen_warning) + yaml.dump(template, f, TemplateDumper, width=68) + + +def read_template(template, filename=None): + with get_file(OUT_FILE, filename) as f: + return yaml.safe_load(f) + + +def build_endpoint_map(output_filename=None, input_filename=None): + if output_filename is not None and output_filename == input_filename: + raise Exception('Cannot read from and write to the same file') + config = load_endpoint_data(input_filename) + template = generate_endpoint_map_template(config) + write_template(template, output_filename) + + +def check_up_to_date(output_filename=None, input_filename=None): + if output_filename is not None and output_filename == input_filename: + raise Exception('Input and output filenames must be different') + config = load_endpoint_data(input_filename) + template = generate_endpoint_map_template(config) + existing_template = read_template(output_filename) + return existing_template == template + + +def get_options(): + from optparse import OptionParser + + parser = OptionParser('usage: %prog' + ' [-i INPUT_FILE] [-o OUTPUT_FILE] [--check]', + description=__doc__) + parser.add_option('-i', '--input', dest='input_file', action='store', + default=None, + help='Specify a different endpoint data file') + parser.add_option('-o', '--output', dest='output_file', action='store', + default=None, + help='Specify a different endpoint map template file') + parser.add_option('-c', '--check', dest='check', action='store_true', + default=False, help='Check that the output file is ' + 'up to date with the data') + parser.add_option('-d', '--debug', dest='debug', action='store_true', + default=False, help='Print stack traces on error') + + return parser.parse_args() + + +def main(): + options, args = get_options() + if args: + print('Warning: ignoring positional args: %s' % ' '.join(args), + file=sys.stderr) + + try: + if options.check: + if not check_up_to_date(options.output_file, options.input_file): + print('EndpointMap template does not match input data', + file=sys.stderr) + sys.exit(2) + else: + build_endpoint_map(options.output_file, options.input_file) + except Exception as exc: + if options.debug: + raise + print('%s: %s' % (type(exc).__name__, str(exc)), file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/network/endpoints/endpoint.yaml b/network/endpoints/endpoint.yaml deleted file mode 100644 index 6246cfdd..00000000 --- a/network/endpoints/endpoint.yaml +++ /dev/null @@ -1,60 +0,0 @@ -heat_template_version: 2015-04-30 - -description: > - OpenStack Endpoint - -parameters: - EndpointName: - type: string - description: The name of the Endpoint being evaluated - EndpointMap: - type: json - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - IP: - type: string - description: The IP address of the Neutron Port that the endpoint is attached to - UriSuffix: - type: string - default: '' - description: A suffix attached to the URL - CloudName: - type: string - default: '' - description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org - -outputs: - endpoint: - description: > - A Hash containing a mapping of service endpoints to ports, protocols, uris - assigned IPs, and hostnames for a specific endpoint - value: - port: {get_param: [EndpointMap, {get_param: EndpointName }, port] } - protocol: {get_param: [EndpointMap, {get_param: EndpointName }, protocol] } - ip: {get_param: IP} - host: - str_replace: - template: {get_param: [EndpointMap, {get_param: EndpointName }, host]} - params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName}} - uri: - list_join: - - '' - - - {get_param: [EndpointMap, {get_param: EndpointName }, protocol] } - - '://' - - str_replace: - template: {get_param: [EndpointMap, {get_param: EndpointName }, host]} - params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName }} - - ':' - - {get_param: [EndpointMap, {get_param: EndpointName }, port] } - - {get_param: UriSuffix } - uri_no_suffix: - list_join: - - '' - - - {get_param: [EndpointMap, {get_param: EndpointName }, protocol] } - - '://' - - str_replace: - template: {get_param: [EndpointMap, {get_param: EndpointName }, host]} - params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName} } - - ':' - - {get_param: [EndpointMap, {get_param: EndpointName }, port] } diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml new file mode 100644 index 00000000..9d508d18 --- /dev/null +++ b/network/endpoints/endpoint_data.yaml @@ -0,0 +1,194 @@ +# Data in this file is used to generate the endpoint_map.yaml template. +# Run the script build_endpoint_map.py to regenerate the file. + +Ceilometer: + Internal: + vip_param: CeilometerApi + Public: + vip_param: Public + Admin: + vip_param: CeilometerApi + port: 8777 + +Cinder: + Internal: + vip_param: CinderApi + uri_suffixes: + '': /v1/%(tenant_id)s + V2: /v2/%(tenant_id)s + Public: + vip_param: Public + uri_suffixes: + '': /v1/%(tenant_id)s + V2: /v2/%(tenant_id)s + Admin: + vip_param: CinderApi + uri_suffixes: + '': /v1/%(tenant_id)s + V2: /v2/%(tenant_id)s + port: 8776 + +Glance: + Internal: + vip_param: GlanceApi + Public: + vip_param: Public + Admin: + vip_param: GlanceApi + port: 9292 + +GlanceRegistry: + Internal: + vip_param: GlanceRegistry + Public: + vip_param: Public + Admin: + vip_param: GlanceRegistry + port: 9191 + +Mysql: + '': + vip_param: Mysql + +Heat: + Internal: + vip_param: HeatApi + uri_suffixes: + '': /v1/%(tenant_id)s + Public: + vip_param: Public + uri_suffixes: + '': /v1/%(tenant_id)s + Admin: + vip_param: HeatApi + uri_suffixes: + '': /v1/%(tenant_id)s + port: 8004 + +Horizon: + Public: + vip_param: Public + uri_suffixes: + '': /dashboard + port: 80 + +Keystone: + Internal: + vip_param: KeystonePublicApi + uri_suffixes: + '': /v2.0 + EC2: /v2.0/ec2tokens + names: + EC2: KeystoneEC2 + Public: + vip_param: Public + uri_suffixes: + '': /v2.0 + Admin: + vip_param: KeystoneAdminApi + uri_suffixes: + '': /v2.0 + port: 35357 + port: 5000 + +# TODO(ayoung): V3 is a temporary fix. Endpoints should be versionless. +# Required for https://bugs.launchpad.net/puppet-nova/+bug/1542486 +KeystoneV3: + Internal: + vip_param: KeystonePublicApi + uri_suffixes: + '': /v3 + Public: + vip_param: Public + uri_suffixes: + '': /v3 + Admin: + vip_param: KeystoneAdminApi + uri_suffixes: + '': /v3 + port: 35357 + port: 5000 + +Neutron: + Internal: + vip_param: NeutronApi + Public: + vip_param: Public + Admin: + vip_param: NeutronApi + port: 9696 + +Nova: + Internal: + vip_param: NovaApi + uri_suffixes: + '': /v2/%(tenant_id)s + V3: /v3 + Public: + vip_param: Public + uri_suffixes: + '': /v2/%(tenant_id)s + V3: /v3 + Admin: + vip_param: NovaApi + uri_suffixes: + '': /v2/%(tenant_id)s + V3: /v3 + port: 8774 + +NovaEC2: + Internal: + vip_param: NovaApi + uri_suffixes: + '': /services/Cloud + Public: + vip_param: Public + uri_suffixes: + '': /services/Cloud + Admin: + vip_param: NovaApi + uri_suffixes: + '': /services/Admin + port: 8773 + +NovaVNCProxy: + Internal: + vip_param: NovaApi + Public: + vip_param: Public + Admin: + vip_param: NovaApi + port: 6080 + +Swift: + Internal: + vip_param: SwiftProxy + uri_suffixes: + '': /v1/AUTH_%(tenant_id)s + S3: + Public: + vip_param: Public + uri_suffixes: + '': /v1/AUTH_%(tenant_id)s + S3: + Admin: + vip_param: SwiftProxy + uri_suffixes: + '': + S3: + port: 8080 + +Sahara: + Internal: + vip_param: SaharaApi + uri_suffixes: + '': /v1.1/%(tenant_id)s + Public: + vip_param: SaharaApi + uri_suffixes: + '': /v1.1/%(tenant_id)s + Admin: + vip_param: SaharaApi + uri_suffixes: + '': /v1.1/%(tenant_id)s + port: 8386 diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml index f6063c0e..e9f97fa1 100644 --- a/network/endpoints/endpoint_map.yaml +++ b/network/endpoints/endpoint_map.yaml @@ -1,484 +1,2001 @@ -heat_template_version: 2015-04-30 - -description: > - A Map of OpenStack Endpoints +### DO NOT MODIFY THIS FILE +### This file is automatically generated from endpoint_data.yaml +### by the script build_endpoint_map.py +heat_template_version: '2015-04-30' +description: A map of OpenStack endpoints. parameters: - CeilometerApiVirtualIP: - type: string - default: '' - CinderApiVirtualIP: - type: string - default: '' - GlanceApiVirtualIP: - type: string - default: '' - GlanceRegistryVirtualIP: - type: string - default: '' - HeatApiVirtualIP: - type: string - default: '' - KeystoneAdminApiVirtualIP: - type: string - default: '' - KeystonePublicApiVirtualIP: - type: string - default: '' - MysqlVirtualIP: - type: string - default: '' - NeutronApiVirtualIP: - type: string - default: '' - NovaApiVirtualIP: - type: string - default: '' - PublicVirtualIP: - type: string - default: '' - SwiftProxyVirtualIP: - type: string - default: '' - SaharaApiVirtualIP: - type: string - default: '' + CeilometerApiVirtualIP: {type: string, default: ''} + CinderApiVirtualIP: {type: string, default: ''} + GlanceApiVirtualIP: {type: string, default: ''} + GlanceRegistryVirtualIP: {type: string, default: ''} + HeatApiVirtualIP: {type: string, default: ''} + KeystoneAdminApiVirtualIP: {type: string, default: ''} + KeystonePublicApiVirtualIP: {type: string, default: ''} + MysqlVirtualIP: {type: string, default: ''} + NeutronApiVirtualIP: {type: string, default: ''} + NovaApiVirtualIP: {type: string, default: ''} + PublicVirtualIP: {type: string, default: ''} + SaharaApiVirtualIP: {type: string, default: ''} + SwiftProxyVirtualIP: {type: string, default: ''} EndpointMap: type: json default: - CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} - CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} - CeilometerPublic: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} - CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'} - CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'} - CinderPublic: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'} - GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} - GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} - GlancePublic: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} - GlanceRegistryAdmin: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'} - GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'} - GlanceRegistryPublic: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'} - HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'} - HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'} - HeatPublic: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'} - HorizonPublic: {protocol: 'http', port: '80', host: 'IP_ADDRESS'} - KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'} - KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'} - KeystonePublic: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'} - NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'} - NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'} - NeutronPublic: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'} - NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'} - NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'} - NovaPublic: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'} - NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'} - NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'} - NovaEC2Public: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'} - SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} - SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} - SwiftPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} - SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} - SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} - SaharaPublic: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} + CeilometerAdmin: {protocol: http, port: '8777', host: IP_ADDRESS} + CeilometerInternal: {protocol: http, port: '8777', host: IP_ADDRESS} + CeilometerPublic: {protocol: http, port: '8777', host: IP_ADDRESS} + CinderAdmin: {protocol: http, port: '8776', host: IP_ADDRESS} + CinderInternal: {protocol: http, port: '8776', host: IP_ADDRESS} + CinderPublic: {protocol: http, port: '8776', host: IP_ADDRESS} + GlanceAdmin: {protocol: http, port: '9292', host: IP_ADDRESS} + GlanceInternal: {protocol: http, port: '9292', host: IP_ADDRESS} + GlancePublic: {protocol: http, port: '9292', host: IP_ADDRESS} + GlanceRegistryAdmin: {protocol: http, port: '9191', host: IP_ADDRESS} + GlanceRegistryInternal: {protocol: http, port: '9191', host: IP_ADDRESS} + GlanceRegistryPublic: {protocol: http, port: '9191', host: IP_ADDRESS} + HeatAdmin: {protocol: http, port: '8004', host: IP_ADDRESS} + HeatInternal: {protocol: http, port: '8004', host: IP_ADDRESS} + HeatPublic: {protocol: http, port: '8004', host: IP_ADDRESS} + HorizonPublic: {protocol: http, port: '80', host: IP_ADDRESS} + KeystoneAdmin: {protocol: http, port: '35357', host: IP_ADDRESS} + KeystoneInternal: {protocol: http, port: '5000', host: IP_ADDRESS} + KeystonePublic: {protocol: http, port: '5000', host: IP_ADDRESS} + KeystoneV3Admin: {protocol: http, port: '35357', host: IP_ADDRESS} + KeystoneV3Internal: {protocol: http, port: '5000', host: IP_ADDRESS} + KeystoneV3Public: {protocol: http, port: '5000', host: IP_ADDRESS} + NeutronAdmin: {protocol: http, port: '9696', host: IP_ADDRESS} + NeutronInternal: {protocol: http, port: '9696', host: IP_ADDRESS} + NeutronPublic: {protocol: http, port: '9696', host: IP_ADDRESS} + NovaAdmin: {protocol: http, port: '8774', host: IP_ADDRESS} + NovaInternal: {protocol: http, port: '8774', host: IP_ADDRESS} + NovaPublic: {protocol: http, port: '8774', host: IP_ADDRESS} + NovaEC2Admin: {protocol: http, port: '8773', host: IP_ADDRESS} + NovaEC2Internal: {protocol: http, port: '8773', host: IP_ADDRESS} + NovaEC2Public: {protocol: http, port: '8773', host: IP_ADDRESS} + NovaVNCProxyAdmin: {protocol: http, port: '6080', host: IP_ADDRESS} + NovaVNCProxyInternal: {protocol: http, port: '6080', host: IP_ADDRESS} + NovaVNCProxyPublic: {protocol: http, port: '6080', host: IP_ADDRESS} + SaharaAdmin: {protocol: http, port: '8386', host: IP_ADDRESS} + SaharaInternal: {protocol: http, port: '8386', host: IP_ADDRESS} + SaharaPublic: {protocol: http, port: '8386', host: IP_ADDRESS} + SwiftAdmin: {protocol: http, port: '8080', host: IP_ADDRESS} + SwiftInternal: {protocol: http, port: '8080', host: IP_ADDRESS} + SwiftPublic: {protocol: http, port: '8080', host: IP_ADDRESS} description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - CloudName: - type: string - default: overcloud - description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org - -resources: - - CeilometerInternal: - type: OS::TripleO::Endpoint - properties: - EndpointName: CeilometerInternal - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: CeilometerApiVirtualIP} - CeilometerPublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: CeilometerPublic - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: PublicVirtualIP} - CeilometerAdmin: - type: OS::TripleO::Endpoint - properties: - EndpointName: CeilometerAdmin - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: CeilometerApiVirtualIP} - - CinderInternal: - type: OS::TripleO::Endpoint - properties: - EndpointName: CinderInternal - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: CinderApiVirtualIP} - UriSuffix: '/v1/%(tenant_id)s' - CinderPublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: CinderPublic - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: PublicVirtualIP} - UriSuffix: '/v1/%(tenant_id)s' - CinderAdmin: - type: OS::TripleO::Endpoint - properties: - EndpointName: CinderAdmin - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: CinderApiVirtualIP} - UriSuffix: '/v1/%(tenant_id)s' - - CinderV2Internal: - type: OS::TripleO::Endpoint - properties: - EndpointName: CinderInternal - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: CinderApiVirtualIP} - UriSuffix: '/v2/%(tenant_id)s' - CinderV2Public: - type: OS::TripleO::Endpoint - properties: - EndpointName: CinderPublic - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: PublicVirtualIP} - UriSuffix: '/v2/%(tenant_id)s' - CinderV2Admin: - type: OS::TripleO::Endpoint - properties: - EndpointName: CinderAdmin - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: CinderApiVirtualIP} - UriSuffix: '/v2/%(tenant_id)s' - - GlanceInternal: - type: OS::TripleO::Endpoint - properties: - EndpointName: GlanceInternal - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: GlanceApiVirtualIP} - GlancePublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: GlancePublic - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: PublicVirtualIP} - GlanceAdmin: - type: OS::TripleO::Endpoint - properties: - EndpointName: GlanceAdmin - EndpointMap: { get_param: EndpointMap } - CloudName: {get_param: CloudName} - IP: {get_param: GlanceApiVirtualIP} - GlanceRegistryInternal: - type: OS::TripleO::Endpoint - properties: - EndpointName: GlanceInternal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: GlanceRegistryVirtualIP} - GlanceRegistryPublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: GlancePublic - EndpointMap: { get_param: EndpointMap } - IP: {get_param: PublicVirtualIP} - GlanceRegistryAdmin: - type: OS::TripleO::Endpoint - properties: - EndpointName: GlanceAdmin - EndpointMap: { get_param: EndpointMap } - IP: {get_param: GlanceRegistryVirtualIP} - - HeatInternal: - type: OS::TripleO::Endpoint - properties: - EndpointName: HeatInternal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: HeatApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v1/%(tenant_id)s' - HeatPublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: HeatPublic - EndpointMap: { get_param: EndpointMap } - IP: {get_param: PublicVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v1/%(tenant_id)s' - HeatAdmin: - type: OS::TripleO::Endpoint - properties: - EndpointName: HeatAdmin - EndpointMap: { get_param: EndpointMap } - IP: {get_param: HeatApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v1/%(tenant_id)s' - - HorizonPublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: HeatPublic - EndpointMap: { get_param: EndpointMap } - IP: {get_param: PublicVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/dashboard' - - KeystoneInternal: - type: OS::TripleO::Endpoint - properties: - EndpointName: KeystoneInternal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: KeystonePublicApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v2.0' - KeystonePublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: KeystonePublic - EndpointMap: { get_param: EndpointMap } - IP: {get_param: PublicVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v2.0' - KeystoneAdmin: - type: OS::TripleO::Endpoint - properties: - EndpointName: KeystoneAdmin - EndpointMap: { get_param: EndpointMap } - IP: {get_param: KeystoneAdminApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v2.0' - KeystoneEC2: - type: OS::TripleO::Endpoint - properties: - EndpointName: KeystoneInternal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: KeystonePublicApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v2.0/ec2tokens' - - NeutronInternal: - type: OS::TripleO::Endpoint - properties: - EndpointName: NeutronInternal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: NeutronApiVirtualIP} - CloudName: {get_param: CloudName} - NeutronPublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: NeutronPublic - EndpointMap: { get_param: EndpointMap } - IP: {get_param: PublicVirtualIP} - CloudName: {get_param: CloudName} - NeutronAdmin: - type: OS::TripleO::Endpoint - properties: - EndpointName: NeutronAdmin - EndpointMap: { get_param: EndpointMap } - IP: {get_param: NeutronApiVirtualIP} - CloudName: {get_param: CloudName} - - NovaInternal: - type: OS::TripleO::Endpoint - properties: - EndpointName: NovaInternal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: NovaApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v2/%(tenant_id)s' - NovaPublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: NovaPublic - EndpointMap: { get_param: EndpointMap } - IP: {get_param: PublicVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v2/%(tenant_id)s' - NovaAdmin: - type: OS::TripleO::Endpoint - properties: - EndpointName: NovaAdmin - EndpointMap: { get_param: EndpointMap } - IP: {get_param: NovaApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v2/%(tenant_id)s' - NovaV3Internal: - type: OS::TripleO::Endpoint - properties: - EndpointName: NovaInternal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: NovaApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v3' - NovaV3Public: - type: OS::TripleO::Endpoint - properties: - EndpointName: NovaPublic - EndpointMap: { get_param: EndpointMap } - IP: {get_param: PublicVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v3' - NovaV3Admin: - type: OS::TripleO::Endpoint - properties: - EndpointName: NovaAdmin - EndpointMap: { get_param: EndpointMap } - IP: {get_param: NovaApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v3' - - NovaEC2Internal: - type: OS::TripleO::Endpoint - properties: - EndpointName: NovaEC2Internal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: NovaApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/services/Cloud' - NovaEC2Public: - type: OS::TripleO::Endpoint - properties: - EndpointName: NovaEC2Public - EndpointMap: { get_param: EndpointMap } - IP: {get_param: PublicVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/services/Cloud' - NovaEC2Admin: - type: OS::TripleO::Endpoint - properties: - EndpointName: NovaEC2Admin - EndpointMap: { get_param: EndpointMap } - IP: {get_param: NovaApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/services/Admin' - - SwiftInternal: - type: OS::TripleO::Endpoint - properties: - EndpointName: SwiftInternal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: SwiftProxyVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v1/AUTH_%(tenant_id)s' - SwiftPublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: SwiftPublic - EndpointMap: { get_param: EndpointMap } - IP: {get_param: PublicVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v1/AUTH_%(tenant_id)s' - SwiftAdmin: - type: OS::TripleO::Endpoint - properties: - EndpointName: SwiftAdmin - EndpointMap: { get_param: EndpointMap } - IP: {get_param: SwiftProxyVirtualIP} - CloudName: {get_param: CloudName} - # No Suffix for the Admin interface - SwiftS3Internal: - type: OS::TripleO::Endpoint - properties: - EndpointName: SwiftInternal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: SwiftProxyVirtualIP} - CloudName: {get_param: CloudName} - SwiftS3Public: - type: OS::TripleO::Endpoint - properties: - EndpointName: SwiftPublic - EndpointMap: { get_param: EndpointMap } - IP: {get_param: PublicVirtualIP} - CloudName: {get_param: CloudName} - SwiftS3Admin: - type: OS::TripleO::Endpoint - properties: - EndpointName: SwiftAdmin - EndpointMap: { get_param: EndpointMap } - IP: {get_param: SwiftProxyVirtualIP} - CloudName: {get_param: CloudName} - - SaharaInternal: - type: OS::TripleO::Endpoint - properties: - EndpointName: SaharaInternal - EndpointMap: { get_param: EndpointMap } - IP: {get_param: SaharaApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v1.1/%(tenant_id)s' - SaharaPublic: - type: OS::TripleO::Endpoint - properties: - EndpointName: SaharaPublic - EndpointMap: { get_param: EndpointMap } - IP: {get_param: SaharaApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v1.1/%(tenant_id)s' - SaharaAdmin: - type: OS::TripleO::Endpoint - properties: - EndpointName: SaharaAdmin - EndpointMap: { get_param: EndpointMap } - IP: {get_param: SaharaApiVirtualIP} - CloudName: {get_param: CloudName} - UriSuffix: '/v1.1/%(tenant_id)s' - + via parameter_defaults in the resource registry. + CloudName: {type: string, default: overcloud, description: The DNS name + of this cloud. e.g. ci-overcloud.tripleo.org} outputs: endpoint_map: value: - CeilometerInternal: {get_attr: [ CeilometerInternal, endpoint] } - CeilometerPublic: {get_attr: [ CeilometerPublic, endpoint] } - CeilometerAdmin: {get_attr: [ CeilometerAdmin, endpoint] } - CinderInternal: {get_attr: [ CinderInternal, endpoint] } - CinderPublic: {get_attr: [ CinderPublic, endpoint] } - CinderAdmin: {get_attr: [ CinderAdmin, endpoint] } - CinderV2Internal: {get_attr: [ CinderV2Internal, endpoint] } - CinderV2Public: {get_attr: [ CinderV2Public, endpoint] } - CinderV2Admin: {get_attr: [ CinderV2Admin, endpoint] } - GlanceInternal: {get_attr: [ GlanceInternal, endpoint] } - GlancePublic: {get_attr: [ GlancePublic, endpoint] } - GlanceAdmin: {get_attr: [ GlanceAdmin, endpoint] } - GlanceRegistryInternal: {get_attr: [ GlanceRegistryInternal, endpoint] } - GlanceRegistryPublic: {get_attr: [ GlanceRegistryPublic, endpoint] } - GlanceRegistryAdmin: {get_attr: [ GlanceRegistryAdmin, endpoint] } - HeatInternal: {get_attr: [ HeatInternal, endpoint] } - HeatPublic: {get_attr: [ HeatPublic, endpoint] } - HeatAdmin: {get_attr: [ HeatAdmin, endpoint] } - HorizonPublic: {get_attr: [ HorizonPublic, endpoint] } - KeystoneInternal: {get_attr: [ KeystoneInternal, endpoint] } - KeystonePublic: {get_attr: [ KeystonePublic, endpoint] } - KeystoneAdmin: {get_attr: [ KeystoneAdmin, endpoint] } - KeystoneEC2: {get_attr: [ KeystoneEC2, endpoint] } - NeutronInternal: {get_attr: [ NeutronInternal, endpoint] } - NeutronPublic: {get_attr: [ NeutronPublic, endpoint] } - NeutronAdmin: {get_attr: [ NeutronAdmin, endpoint] } - NovaInternal: {get_attr: [ NovaInternal, endpoint] } - NovaPublic: {get_attr: [ NovaPublic, endpoint] } - NovaAdmin: {get_attr: [ NovaAdmin, endpoint] } - NovaV3Internal: {get_attr: [ NovaV3Internal, endpoint] } - NovaV3Public: {get_attr: [ NovaV3Public, endpoint] } - NovaV3Admin: {get_attr: [ NovaV3Admin, endpoint] } - NovaEC2Internal: {get_attr: [ NovaEC2Internal, endpoint] } - NovaEC2Public: {get_attr: [ NovaEC2Public, endpoint] } - NovaEC2Admin: {get_attr: [ NovaEC2Admin, endpoint] } - SwiftInternal: {get_attr: [ SwiftInternal, endpoint] } - SwiftPublic: {get_attr: [ SwiftPublic, endpoint] } - SwiftAdmin: {get_attr: [ SwiftAdmin, endpoint] } - SwiftS3Internal: {get_attr: [ SwiftS3Internal, endpoint] } - SwiftS3Public: {get_attr: [ SwiftS3Public, endpoint] } - SwiftS3Admin: {get_attr: [ SwiftS3Admin, endpoint] } - SaharaInternal: {get_attr: [ SaharaInternal, endpoint] } - SaharaPublic: {get_attr: [ SaharaPublic, endpoint] } - SaharaAdmin: {get_attr: [ SaharaAdmin, endpoint] }
\ No newline at end of file + CeilometerAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, CeilometerAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CeilometerApiVirtualIP} + port: + get_param: [EndpointMap, CeilometerAdmin, port] + protocol: + get_param: [EndpointMap, CeilometerAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, CeilometerAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CeilometerAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CeilometerApiVirtualIP} + - ':' + - get_param: [EndpointMap, CeilometerAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, CeilometerAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CeilometerAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CeilometerApiVirtualIP} + - ':' + - get_param: [EndpointMap, CeilometerAdmin, port] + CeilometerInternal: + host: + str_replace: + template: + get_param: [EndpointMap, CeilometerInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CeilometerApiVirtualIP} + port: + get_param: [EndpointMap, CeilometerInternal, port] + protocol: + get_param: [EndpointMap, CeilometerInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, CeilometerInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CeilometerInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CeilometerApiVirtualIP} + - ':' + - get_param: [EndpointMap, CeilometerInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, CeilometerInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CeilometerInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CeilometerApiVirtualIP} + - ':' + - get_param: [EndpointMap, CeilometerInternal, port] + CeilometerPublic: + host: + str_replace: + template: + get_param: [EndpointMap, CeilometerPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, CeilometerPublic, port] + protocol: + get_param: [EndpointMap, CeilometerPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, CeilometerPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CeilometerPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, CeilometerPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, CeilometerPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CeilometerPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, CeilometerPublic, port] + CinderAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, CinderAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + port: + get_param: [EndpointMap, CinderAdmin, port] + protocol: + get_param: [EndpointMap, CinderAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, CinderAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + - ':' + - get_param: [EndpointMap, CinderAdmin, port] + - /v1/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, CinderAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + - ':' + - get_param: [EndpointMap, CinderAdmin, port] + CinderInternal: + host: + str_replace: + template: + get_param: [EndpointMap, CinderInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + port: + get_param: [EndpointMap, CinderInternal, port] + protocol: + get_param: [EndpointMap, CinderInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, CinderInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + - ':' + - get_param: [EndpointMap, CinderInternal, port] + - /v1/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, CinderInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + - ':' + - get_param: [EndpointMap, CinderInternal, port] + CinderPublic: + host: + str_replace: + template: + get_param: [EndpointMap, CinderPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, CinderPublic, port] + protocol: + get_param: [EndpointMap, CinderPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, CinderPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, CinderPublic, port] + - /v1/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, CinderPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, CinderPublic, port] + CinderV2Admin: + host: + str_replace: + template: + get_param: [EndpointMap, CinderAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + port: + get_param: [EndpointMap, CinderAdmin, port] + protocol: + get_param: [EndpointMap, CinderAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, CinderAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + - ':' + - get_param: [EndpointMap, CinderAdmin, port] + - /v2/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, CinderAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + - ':' + - get_param: [EndpointMap, CinderAdmin, port] + CinderV2Internal: + host: + str_replace: + template: + get_param: [EndpointMap, CinderInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + port: + get_param: [EndpointMap, CinderInternal, port] + protocol: + get_param: [EndpointMap, CinderInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, CinderInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + - ':' + - get_param: [EndpointMap, CinderInternal, port] + - /v2/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, CinderInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: CinderApiVirtualIP} + - ':' + - get_param: [EndpointMap, CinderInternal, port] + CinderV2Public: + host: + str_replace: + template: + get_param: [EndpointMap, CinderPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, CinderPublic, port] + protocol: + get_param: [EndpointMap, CinderPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, CinderPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, CinderPublic, port] + - /v2/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, CinderPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, CinderPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, CinderPublic, port] + GlanceAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, GlanceAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceApiVirtualIP} + port: + get_param: [EndpointMap, GlanceAdmin, port] + protocol: + get_param: [EndpointMap, GlanceAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, GlanceAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceApiVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, GlanceAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceApiVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceAdmin, port] + GlanceInternal: + host: + str_replace: + template: + get_param: [EndpointMap, GlanceInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceApiVirtualIP} + port: + get_param: [EndpointMap, GlanceInternal, port] + protocol: + get_param: [EndpointMap, GlanceInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, GlanceInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceApiVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, GlanceInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceApiVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceInternal, port] + GlancePublic: + host: + str_replace: + template: + get_param: [EndpointMap, GlancePublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, GlancePublic, port] + protocol: + get_param: [EndpointMap, GlancePublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, GlancePublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlancePublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, GlancePublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, GlancePublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlancePublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, GlancePublic, port] + GlanceRegistryAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, GlanceRegistryAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceRegistryVirtualIP} + port: + get_param: [EndpointMap, GlanceRegistryAdmin, port] + protocol: + get_param: [EndpointMap, GlanceRegistryAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, GlanceRegistryAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceRegistryAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceRegistryVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceRegistryAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, GlanceRegistryAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceRegistryAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceRegistryVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceRegistryAdmin, port] + GlanceRegistryInternal: + host: + str_replace: + template: + get_param: [EndpointMap, GlanceRegistryInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceRegistryVirtualIP} + port: + get_param: [EndpointMap, GlanceRegistryInternal, port] + protocol: + get_param: [EndpointMap, GlanceRegistryInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, GlanceRegistryInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceRegistryInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceRegistryVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceRegistryInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, GlanceRegistryInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceRegistryInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: GlanceRegistryVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceRegistryInternal, port] + GlanceRegistryPublic: + host: + str_replace: + template: + get_param: [EndpointMap, GlanceRegistryPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, GlanceRegistryPublic, port] + protocol: + get_param: [EndpointMap, GlanceRegistryPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, GlanceRegistryPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceRegistryPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceRegistryPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, GlanceRegistryPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceRegistryPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, GlanceRegistryPublic, port] + HeatAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, HeatAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: HeatApiVirtualIP} + port: + get_param: [EndpointMap, HeatAdmin, port] + protocol: + get_param: [EndpointMap, HeatAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, HeatAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, HeatAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: HeatApiVirtualIP} + - ':' + - get_param: [EndpointMap, HeatAdmin, port] + - /v1/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, HeatAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, HeatAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: HeatApiVirtualIP} + - ':' + - get_param: [EndpointMap, HeatAdmin, port] + HeatInternal: + host: + str_replace: + template: + get_param: [EndpointMap, HeatInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: HeatApiVirtualIP} + port: + get_param: [EndpointMap, HeatInternal, port] + protocol: + get_param: [EndpointMap, HeatInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, HeatInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, HeatInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: HeatApiVirtualIP} + - ':' + - get_param: [EndpointMap, HeatInternal, port] + - /v1/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, HeatInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, HeatInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: HeatApiVirtualIP} + - ':' + - get_param: [EndpointMap, HeatInternal, port] + HeatPublic: + host: + str_replace: + template: + get_param: [EndpointMap, HeatPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, HeatPublic, port] + protocol: + get_param: [EndpointMap, HeatPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, HeatPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, HeatPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, HeatPublic, port] + - /v1/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, HeatPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, HeatPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, HeatPublic, port] + HorizonPublic: + host: + str_replace: + template: + get_param: [EndpointMap, HorizonPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, HorizonPublic, port] + protocol: + get_param: [EndpointMap, HorizonPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, HorizonPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, HorizonPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, HorizonPublic, port] + - /dashboard + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, HorizonPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, HorizonPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, HorizonPublic, port] + KeystoneAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, KeystoneAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP} + port: + get_param: [EndpointMap, KeystoneAdmin, port] + protocol: + get_param: [EndpointMap, KeystoneAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneAdmin, port] + - /v2.0 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneAdmin, port] + KeystoneEC2: + host: + str_replace: + template: + get_param: [EndpointMap, KeystoneInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP} + port: + get_param: [EndpointMap, KeystoneInternal, port] + protocol: + get_param: [EndpointMap, KeystoneInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneInternal, port] + - /v2.0/ec2tokens + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneInternal, port] + KeystoneInternal: + host: + str_replace: + template: + get_param: [EndpointMap, KeystoneInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP} + port: + get_param: [EndpointMap, KeystoneInternal, port] + protocol: + get_param: [EndpointMap, KeystoneInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneInternal, port] + - /v2.0 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneInternal, port] + KeystonePublic: + host: + str_replace: + template: + get_param: [EndpointMap, KeystonePublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, KeystonePublic, port] + protocol: + get_param: [EndpointMap, KeystonePublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, KeystonePublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystonePublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, KeystonePublic, port] + - /v2.0 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, KeystonePublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystonePublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, KeystonePublic, port] + KeystoneV3Admin: + host: + str_replace: + template: + get_param: [EndpointMap, KeystoneV3Admin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP} + port: + get_param: [EndpointMap, KeystoneV3Admin, port] + protocol: + get_param: [EndpointMap, KeystoneV3Admin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneV3Admin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneV3Admin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneV3Admin, port] + - /v3 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneV3Admin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneV3Admin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystoneAdminApiVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneV3Admin, port] + KeystoneV3Internal: + host: + str_replace: + template: + get_param: [EndpointMap, KeystoneV3Internal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP} + port: + get_param: [EndpointMap, KeystoneV3Internal, port] + protocol: + get_param: [EndpointMap, KeystoneV3Internal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneV3Internal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneV3Internal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneV3Internal, port] + - /v3 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneV3Internal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneV3Internal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: KeystonePublicApiVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneV3Internal, port] + KeystoneV3Public: + host: + str_replace: + template: + get_param: [EndpointMap, KeystoneV3Public, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, KeystoneV3Public, port] + protocol: + get_param: [EndpointMap, KeystoneV3Public, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneV3Public, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneV3Public, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneV3Public, port] + - /v3 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, KeystoneV3Public, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, KeystoneV3Public, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, KeystoneV3Public, port] + NeutronAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, NeutronAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NeutronApiVirtualIP} + port: + get_param: [EndpointMap, NeutronAdmin, port] + protocol: + get_param: [EndpointMap, NeutronAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NeutronAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NeutronAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NeutronApiVirtualIP} + - ':' + - get_param: [EndpointMap, NeutronAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NeutronAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NeutronAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NeutronApiVirtualIP} + - ':' + - get_param: [EndpointMap, NeutronAdmin, port] + NeutronInternal: + host: + str_replace: + template: + get_param: [EndpointMap, NeutronInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NeutronApiVirtualIP} + port: + get_param: [EndpointMap, NeutronInternal, port] + protocol: + get_param: [EndpointMap, NeutronInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NeutronInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NeutronInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NeutronApiVirtualIP} + - ':' + - get_param: [EndpointMap, NeutronInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NeutronInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NeutronInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NeutronApiVirtualIP} + - ':' + - get_param: [EndpointMap, NeutronInternal, port] + NeutronPublic: + host: + str_replace: + template: + get_param: [EndpointMap, NeutronPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, NeutronPublic, port] + protocol: + get_param: [EndpointMap, NeutronPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NeutronPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NeutronPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, NeutronPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NeutronPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NeutronPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, NeutronPublic, port] + NovaAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, NovaAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + port: + get_param: [EndpointMap, NovaAdmin, port] + protocol: + get_param: [EndpointMap, NovaAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaAdmin, port] + - /v2/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaAdmin, port] + NovaInternal: + host: + str_replace: + template: + get_param: [EndpointMap, NovaInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + port: + get_param: [EndpointMap, NovaInternal, port] + protocol: + get_param: [EndpointMap, NovaInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaInternal, port] + - /v2/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaInternal, port] + NovaPublic: + host: + str_replace: + template: + get_param: [EndpointMap, NovaPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, NovaPublic, port] + protocol: + get_param: [EndpointMap, NovaPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, NovaPublic, port] + - /v2/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, NovaPublic, port] + NovaV3Admin: + host: + str_replace: + template: + get_param: [EndpointMap, NovaAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + port: + get_param: [EndpointMap, NovaAdmin, port] + protocol: + get_param: [EndpointMap, NovaAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaAdmin, port] + - /v3 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaAdmin, port] + NovaV3Internal: + host: + str_replace: + template: + get_param: [EndpointMap, NovaInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + port: + get_param: [EndpointMap, NovaInternal, port] + protocol: + get_param: [EndpointMap, NovaInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaInternal, port] + - /v3 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaInternal, port] + NovaV3Public: + host: + str_replace: + template: + get_param: [EndpointMap, NovaPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, NovaPublic, port] + protocol: + get_param: [EndpointMap, NovaPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, NovaPublic, port] + - /v3 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, NovaPublic, port] + NovaEC2Admin: + host: + str_replace: + template: + get_param: [EndpointMap, NovaEC2Admin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + port: + get_param: [EndpointMap, NovaEC2Admin, port] + protocol: + get_param: [EndpointMap, NovaEC2Admin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaEC2Admin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaEC2Admin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaEC2Admin, port] + - /services/Admin + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaEC2Admin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaEC2Admin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaEC2Admin, port] + NovaEC2Internal: + host: + str_replace: + template: + get_param: [EndpointMap, NovaEC2Internal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + port: + get_param: [EndpointMap, NovaEC2Internal, port] + protocol: + get_param: [EndpointMap, NovaEC2Internal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaEC2Internal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaEC2Internal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaEC2Internal, port] + - /services/Cloud + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaEC2Internal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaEC2Internal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaEC2Internal, port] + NovaEC2Public: + host: + str_replace: + template: + get_param: [EndpointMap, NovaEC2Public, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, NovaEC2Public, port] + protocol: + get_param: [EndpointMap, NovaEC2Public, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaEC2Public, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaEC2Public, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, NovaEC2Public, port] + - /services/Cloud + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaEC2Public, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaEC2Public, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, NovaEC2Public, port] + NovaVNCProxyAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, NovaVNCProxyAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + port: + get_param: [EndpointMap, NovaVNCProxyAdmin, port] + protocol: + get_param: [EndpointMap, NovaVNCProxyAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaVNCProxyAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaVNCProxyAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaVNCProxyAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaVNCProxyAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaVNCProxyAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaVNCProxyAdmin, port] + NovaVNCProxyInternal: + host: + str_replace: + template: + get_param: [EndpointMap, NovaVNCProxyInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + port: + get_param: [EndpointMap, NovaVNCProxyInternal, port] + protocol: + get_param: [EndpointMap, NovaVNCProxyInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaVNCProxyInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaVNCProxyInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaVNCProxyInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaVNCProxyInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaVNCProxyInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: NovaApiVirtualIP} + - ':' + - get_param: [EndpointMap, NovaVNCProxyInternal, port] + NovaVNCProxyPublic: + host: + str_replace: + template: + get_param: [EndpointMap, NovaVNCProxyPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, NovaVNCProxyPublic, port] + protocol: + get_param: [EndpointMap, NovaVNCProxyPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaVNCProxyPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaVNCProxyPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, NovaVNCProxyPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaVNCProxyPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaVNCProxyPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, NovaVNCProxyPublic, port] + SaharaAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, SaharaAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SaharaApiVirtualIP} + port: + get_param: [EndpointMap, SaharaAdmin, port] + protocol: + get_param: [EndpointMap, SaharaAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, SaharaAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SaharaAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SaharaApiVirtualIP} + - ':' + - get_param: [EndpointMap, SaharaAdmin, port] + - /v1.1/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, SaharaAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SaharaAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SaharaApiVirtualIP} + - ':' + - get_param: [EndpointMap, SaharaAdmin, port] + SaharaInternal: + host: + str_replace: + template: + get_param: [EndpointMap, SaharaInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SaharaApiVirtualIP} + port: + get_param: [EndpointMap, SaharaInternal, port] + protocol: + get_param: [EndpointMap, SaharaInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, SaharaInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SaharaInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SaharaApiVirtualIP} + - ':' + - get_param: [EndpointMap, SaharaInternal, port] + - /v1.1/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, SaharaInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SaharaInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SaharaApiVirtualIP} + - ':' + - get_param: [EndpointMap, SaharaInternal, port] + SaharaPublic: + host: + str_replace: + template: + get_param: [EndpointMap, SaharaPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SaharaApiVirtualIP} + port: + get_param: [EndpointMap, SaharaPublic, port] + protocol: + get_param: [EndpointMap, SaharaPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, SaharaPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SaharaPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SaharaApiVirtualIP} + - ':' + - get_param: [EndpointMap, SaharaPublic, port] + - /v1.1/%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, SaharaPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SaharaPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SaharaApiVirtualIP} + - ':' + - get_param: [EndpointMap, SaharaPublic, port] + SwiftAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, SwiftAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + port: + get_param: [EndpointMap, SwiftAdmin, port] + protocol: + get_param: [EndpointMap, SwiftAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, SwiftAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, SwiftAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftAdmin, port] + SwiftInternal: + host: + str_replace: + template: + get_param: [EndpointMap, SwiftInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + port: + get_param: [EndpointMap, SwiftInternal, port] + protocol: + get_param: [EndpointMap, SwiftInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, SwiftInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftInternal, port] + - /v1/AUTH_%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, SwiftInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftInternal, port] + SwiftPublic: + host: + str_replace: + template: + get_param: [EndpointMap, SwiftPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, SwiftPublic, port] + protocol: + get_param: [EndpointMap, SwiftPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, SwiftPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftPublic, port] + - /v1/AUTH_%(tenant_id)s + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, SwiftPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftPublic, port] + SwiftS3Admin: + host: + str_replace: + template: + get_param: [EndpointMap, SwiftAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + port: + get_param: [EndpointMap, SwiftAdmin, port] + protocol: + get_param: [EndpointMap, SwiftAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, SwiftAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, SwiftAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftAdmin, port] + SwiftS3Internal: + host: + str_replace: + template: + get_param: [EndpointMap, SwiftInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + port: + get_param: [EndpointMap, SwiftInternal, port] + protocol: + get_param: [EndpointMap, SwiftInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, SwiftInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, SwiftInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: SwiftProxyVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftInternal, port] + SwiftS3Public: + host: + str_replace: + template: + get_param: [EndpointMap, SwiftPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, SwiftPublic, port] + protocol: + get_param: [EndpointMap, SwiftPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, SwiftPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, SwiftPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, SwiftPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, SwiftPublic, port] diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml index 888a3c89..54074d12 100644 --- a/overcloud-resource-registry-puppet.yaml +++ b/overcloud-resource-registry-puppet.yaml @@ -23,6 +23,7 @@ resource_registry: OS::TripleO::BootstrapNode::SoftwareConfig: puppet/bootstrap-config.yaml # Tasks (for internal TripleO usage) + OS::TripleO::Tasks::UpdateWorkflow: extraconfig/tasks/noop.yaml OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml OS::TripleO::Tasks::ControllerPrePuppet: extraconfig/tasks/noop.yaml OS::TripleO::Tasks::ControllerPostPuppet: extraconfig/tasks/noop.yaml @@ -116,7 +117,6 @@ resource_registry: OS::TripleO::BlockStorage::Ports::ManagementPort: network/ports/noop.yaml # Service Endpoint Mappings - OS::TripleO::Endpoint: network/endpoints/endpoint.yaml OS::TripleO::EndpointMap: network/endpoints/endpoint_map.yaml # validation resources @@ -124,3 +124,4 @@ resource_registry: parameter_defaults: EnablePackageInstall: false + SoftwareConfigTransport: POLL_TEMP_URL diff --git a/overcloud.yaml b/overcloud.yaml index d63549a9..0499fa67 100644 --- a/overcloud.yaml +++ b/overcloud.yaml @@ -88,6 +88,12 @@ parameters: default: 'REBUILD_PRESERVE_EPHEMERAL' description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string + InternalApiVirtualFixedIPs: + default: [] + description: > + Control the IP allocation for the InternalApiVirtualInterface port. E.g. + [{'ip_address':'1.2.3.4'}] + type: json KeyName: default: default description: Name of an existing Nova key pair to enable SSH access to the instances @@ -168,6 +174,15 @@ parameters: description: Shared secret to prevent spoofing type: string hidden: true + NeutronTenantMtu: + description: > + The default MTU for tenant networks. For VXLAN/GRE tunneling, this should + be at least 50 bytes smaller than the MTU on the physical network. This + value will be used to set the MTU on the virtual Ethernet device. + This value will be used to construct the NeutronDnsmasqOptions, since that + will determine the MTU that is assigned to the VM host through DHCP. + default: "1400" + type: string NeutronTunnelTypes: default: 'vxlan' description: | @@ -177,13 +192,13 @@ parameters: description: | Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation - default: ["1:1000", ] + default: ["1:4094", ] type: comma_delimited_list NeutronVniRanges: description: | Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of VXLAN VNI IDs that are available for tenant network allocation - default: ["1:1000", ] + default: ["1:4094", ] type: comma_delimited_list NeutronCorePlugin: default: 'ml2' @@ -208,7 +223,7 @@ parameters: The mechanism drivers for the Neutron tenant network. type: comma_delimited_list NeutronPluginExtensions: - default: "qos" + default: "qos,port_security" description: | Comma-separated list of extensions enabled for the Neutron plugin. type: comma_delimited_list @@ -284,6 +299,18 @@ parameters: description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true + StorageVirtualFixedIPs: + default: [] + description: > + Control the IP allocation for the StorageVirtualInterface port. E.g. + [{'ip_address':'1.2.3.4'}] + type: json + StorageMgmtVirtualFixedIPs: + default: [] + description: > + Control the IP allocation for the StorageMgmgVirtualInterface port. E.g. + [{'ip_address':'1.2.3.4'}] + type: json TimeZone: default: 'UTC' description: The timezone to be set on nodes. @@ -307,7 +334,7 @@ parameters: type: string hidden: true CinderLVMLoopDeviceSize: - default: 5000 + default: 10280 description: The size of the loopback file used by the cinder LVM driver. type: number CinderNfsMountOptions: @@ -498,8 +525,11 @@ parameters: type: number default: 4096 NeutronDnsmasqOptions: - default: 'dhcp-option-force=26,1400' - description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the tunnel overhead. + default: 'dhcp-option-force=26,%MTU%' + description: > + Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU + to be set to the value of NeutronTenantMtu, which should be set to account + for tunnel overhead. type: string NeutronPublicInterfaceDefaultRoute: default: '' @@ -545,8 +575,6 @@ parameters: description: How many replicas to use in the swift rings. SaharaPassword: description: The password for the sahara service account. - # TODO(egafford): Remove default on merge of https://review.openstack.org/#/c/221418/ (added to avoid circular dep) - default: unset type: string hidden: true @@ -859,7 +887,6 @@ resources: CinderEnableNfsBackend: {get_param: CinderEnableNfsBackend} CinderEnableIscsiBackend: {get_param: CinderEnableIscsiBackend} CinderEnableRbdBackend: {get_param: CinderEnableRbdBackend} - CloudName: {get_param: CloudName} CloudDomain: {get_param: CloudDomain} ControlVirtualInterface: {get_param: ControlVirtualInterface} ControllerExtraConfig: {get_param: controllerExtraConfig} @@ -901,6 +928,7 @@ resources: NeutronPublicInterfaceIP: {get_param: NeutronPublicInterfaceIP} NeutronFlatNetworks: {get_param: NeutronFlatNetworks} NeutronBridgeMappings: {get_param: NeutronBridgeMappings} + NeutronTenantMtu: {get_param: NeutronTenantMtu} NeutronExternalNetworkBridge: {get_param: NeutronExternalNetworkBridge} NeutronEnableIsolatedMetadata: {get_param: NeutronEnableIsolatedMetadata} NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling} @@ -910,7 +938,11 @@ resources: NeutronPublicInterfaceDefaultRoute: {get_param: NeutronPublicInterfaceDefaultRoute} NeutronPublicInterfaceRawDevice: {get_param: NeutronPublicInterfaceRawDevice} NeutronPassword: {get_param: NeutronPassword} - NeutronDnsmasqOptions: {get_param: NeutronDnsmasqOptions} + NeutronDnsmasqOptions: + str_replace: + template: {get_param: NeutronDnsmasqOptions} + params: + '%MTU%': {get_param: NeutronTenantMtu} NeutronDVR: {get_param: NeutronDVR} NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret} NeutronAgentMode: {get_param: NeutronAgentMode} @@ -999,6 +1031,7 @@ resources: KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} NeutronBridgeMappings: {get_param: NeutronBridgeMappings} + NeutronTenantMtu: {get_param: NeutronTenantMtu} NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling} NeutronEnableL2Pop : {get_param: NeutronEnableL2Pop} NeutronFlatNetworks: {get_param: NeutronFlatNetworks} @@ -1249,6 +1282,7 @@ resources: properties: ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} PortName: internal_api_virtual_ip + FixedIPs: {get_param: InternalApiVirtualFixedIPs} StorageVirtualIP: depends_on: Networks @@ -1256,6 +1290,7 @@ resources: properties: ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} PortName: storage_virtual_ip + FixedIPs: {get_param: StorageVirtualFixedIPs} StorageMgmtVirtualIP: depends_on: Networks @@ -1263,6 +1298,7 @@ resources: properties: ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} PortName: storage_management_virtual_ip + FixedIPs: {get_param: StorageMgmtVirtualFixedIPs} VipMap: type: OS::TripleO::Network::Ports::NetVipMap @@ -1486,10 +1522,23 @@ resources: config: {get_resource: AllNodesValidationConfig} servers: {get_attr: [CephStorage, attributes, nova_server_resource]} + UpdateWorkflow: + type: OS::TripleO::Tasks::UpdateWorkflow + properties: + controller_servers: {get_attr: [Controller, attributes, nova_server_resource]} + compute_servers: {get_attr: [Compute, attributes, nova_server_resource]} + blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]} + objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]} + cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]} + input_values: + deploy_identifier: {get_param: DeployIdentifier} + update_identifier: {get_param: UpdateIdentifier} + # Optional ExtraConfig for all nodes - all roles are passed in here, but # the nested template may configure each role differently (or not at all) AllNodesExtraConfig: type: OS::TripleO::AllNodesExtraConfig + depends_on: UpdateWorkflow properties: controller_servers: {get_attr: [Controller, attributes, nova_server_resource]} compute_servers: {get_attr: [Compute, attributes, nova_server_resource]} diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index 3dd3d5c9..e85975d4 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -69,8 +69,8 @@ resources: allNodesConfigImpl: type: OS::Heat::StructuredConfig properties: + group: os-apply-config config: - completion-signal: {get_input: deploy_signal_id} hosts: list_join: - "\n" @@ -227,6 +227,15 @@ resources: list_join: - "','" - {get_param: neutron_api_node_ips} + # TODO: pass a `midonet_api_node_ips` var + midonet_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: neutron_api_node_ips} keystone_public_api_node_ips: str_replace: template: "['SERVERS_LIST']" diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml index f9c53465..e90710c7 100644 --- a/puppet/ceph-storage-post.yaml +++ b/puppet/ceph-storage-post.yaml @@ -14,8 +14,19 @@ parameters: type: json description: Value which changes if the node configuration may need to be re-applied - resources: + + CephStorageArtifactsConfig: + type: deploy-artifacts.yaml + + CephStorageArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: servers} + config: {get_resource: CephStorageArtifactsConfig} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + CephStoragePuppetConfig: type: OS::Heat::SoftwareConfig properties: @@ -29,6 +40,7 @@ resources: CephStorageDeployment_Step1: type: OS::Heat::StructuredDeployments + depends_on: CephStorageArtifactsDeploy properties: name: CephStorageDeployment_Step1 servers: {get_param: servers} diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index e310e1f5..d737bcc5 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -280,11 +280,54 @@ outputs: hosts_entry: value: str_replace: - template: "IP HOST.DOMAIN HOST" + template: | + PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST + EXTERNALIP EXTERNALHOST + INTERNAL_APIIP INTERNAL_APIHOST + STORAGEIP STORAGEHOST + STORAGE_MGMTIP STORAGE_MGMTHOST + TENANTIP TENANTHOST + MANAGEMENTIP MANAGEMENTHOST params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]} + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} - HOST: {get_attr: [CephStorage, name]} + PRIMARYHOST: {get_attr: [CephStorage, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - external + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - internalapi + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - storage + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - storagemgmt + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - tenant + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: + list_join: + - '-' + - - {get_attr: [CephStorage, name]} + - management nova_server_resource: description: Heat resource handle for the ceph storage server value: diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml index 9b7c752a..f470203f 100644 --- a/puppet/cinder-storage-post.yaml +++ b/puppet/cinder-storage-post.yaml @@ -14,8 +14,20 @@ parameters: resources: + VolumeArtifactsConfig: + type: deploy-artifacts.yaml + + VolumeArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: servers} + config: {get_resource: VolumeArtifactsConfig} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + VolumePuppetConfig: type: OS::Heat::SoftwareConfig + depends_on: VolumeArtifactsDeploy properties: group: puppet options: diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index f7e8f907..dedd5142 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -13,7 +13,7 @@ parameters: description: The iSCSI helper to use with cinder. type: string CinderLVMLoopDeviceSize: - default: 5000 + default: 10280 description: The size of the loopback file used by the cinder LVM driver. type: number CinderPassword: @@ -343,11 +343,54 @@ outputs: hosts_entry: value: str_replace: - template: "IP HOST.DOMAIN HOST" + template: | + PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST + EXTERNALIP EXTERNALHOST + INTERNAL_APIIP INTERNAL_APIHOST + STORAGEIP STORAGEHOST + STORAGE_MGMTIP STORAGE_MGMTHOST + TENANTIP TENANTHOST + MANAGEMENTIP MANAGEMENTHOST params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]} + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} - HOST: {get_attr: [BlockStorage, name]} + PRIMARYHOST: {get_attr: [BlockStorage, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - external + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - internalapi + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - storage + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - storagemgmt + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - tenant + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: + list_join: + - '-' + - - {get_attr: [BlockStorage, name]} + - management nova_server_resource: description: Heat resource handle for the block storage server value: diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml index 3861e50c..a122df0e 100644 --- a/puppet/compute-post.yaml +++ b/puppet/compute-post.yaml @@ -17,6 +17,17 @@ parameters: resources: + ComputeArtifactsConfig: + type: deploy-artifacts.yaml + + ComputeArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: servers} + config: {get_resource: ComputeArtifactsConfig} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + ComputePuppetConfig: type: OS::Heat::SoftwareConfig properties: @@ -30,6 +41,7 @@ resources: ComputePuppetDeployment: type: OS::Heat::StructuredDeployments + depends_on: ComputeArtifactsDeploy properties: name: ComputePuppetDeployment servers: {get_param: servers} diff --git a/puppet/compute.yaml b/puppet/compute.yaml index 5b18dc8b..375d5032 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -118,6 +118,15 @@ parameters: default: nic1 description: A port to add to the NeutronPhysicalBridge. type: string + NeutronTenantMtu: + description: > + The default MTU for tenant networks. For VXLAN/GRE tunneling, this should + be at least 50 bytes smaller than the MTU on the physical network. This + value will be used to set the MTU on the virtual Ethernet device. + This number is related to the value of NeutronDnsmasqOptions, since that + will determine the MTU that is assigned to the VM host through DHCP. + default: 1400 + type: number NeutronTunnelTypes: type: comma_delimited_list description: | @@ -127,13 +136,13 @@ parameters: description: | Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation - default: ["1:1000", ] + default: ["1:4094", ] type: comma_delimited_list NeutronVniRanges: description: | Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of VXLAN VNI IDs that are available for tenant network allocation - default: ["1:1000", ] + default: ["1:4094", ] type: comma_delimited_list NeutronPublicInterfaceRawDevice: default: '' @@ -258,6 +267,10 @@ parameters: description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true + UpgradeLevelNovaCompute: + type: string + description: Nova Compute upgrade level + default: '' EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -434,6 +447,7 @@ resources: - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - neutron_bigswitch_data # Optionally provided by ComputeExtraConfigPre - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre - nova_nuage_data # Optionally provided by ComputeExtraConfigPre - midonet_data # Optionally provided by AllNodesExtraConfig @@ -456,6 +470,7 @@ resources: nova::rabbit_password: {get_input: rabbit_password} nova::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} nova::rabbit_port: {get_input: rabbit_client_port} + nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute} nova_compute_driver: {get_input: nova_compute_driver} nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type} nova::compute::neutron::libvirt_vif_driver: {get_input: nova_compute_libvirt_vif_driver} @@ -464,7 +479,11 @@ resources: nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend} rbd_persistent_storage: {get_input: cinder_enable_rbd_backend} nova_password: {get_input: nova_password} + nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address} + nova::vncproxy::common::vncproxy_protocol: {get_input: nova_vncproxy_protocol} + nova::vncproxy::common::vncproxy_host: {get_input: nova_vncproxy_host} + nova::vncproxy::common::vncproxy_port: {get_input: nova_vncproxy_port} nova::network::neutron::neutron_ovs_bridge: {get_input: nova_ovs_bridge} nova::network::neutron::security_group_api: {get_input: nova_security_group_api} ceilometer::debug: {get_input: debug} @@ -488,6 +507,7 @@ resources: neutron_host: {get_input: neutron_host} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} + neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} @@ -499,9 +519,9 @@ resources: neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} neutron_physical_bridge: {get_input: neutron_physical_bridge} neutron_public_interface: {get_input: neutron_public_interface} - nova::network::neutron::neutron_admin_password: {get_input: neutron_password} + nova::network::neutron::neutron_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_internal_url} - nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} + nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url} neutron_router_distributed: {get_input: neutron_router_distributed} neutron_agent_mode: {get_input: neutron_agent_mode} neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} @@ -536,8 +556,12 @@ resources: nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend} cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend} nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]} + nova_vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]} + nova_vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host]} + nova_vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]} nova_ovs_bridge: {get_param: NovaOVSBridge} nova_security_group_api: {get_param: NovaSecurityGroupAPI} + upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} ceilometer_password: {get_param: CeilometerPassword} ceilometer_compute_agent: {get_param: CeilometerComputeAgent} @@ -582,6 +606,7 @@ resources: template: MAPPINGS params: MAPPINGS: {get_param: NeutronBridgeMappings} + neutron_tenant_mtu: {get_param: NeutronTenantMtu} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} neutron_physical_bridge: {get_param: NeutronPhysicalBridge} @@ -613,7 +638,7 @@ resources: AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]} - neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]} + neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]} keystone_vip: {get_param: KeystonePublicApiVirtualIP} admin_password: {get_param: AdminPassword} rabbit_username: {get_param: RabbitUserName} @@ -690,11 +715,54 @@ outputs: Server's IP address and hostname in the /etc/hosts format value: str_replace: - template: "IP HOST.DOMAIN HOST" + template: | + PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST + EXTERNALIP EXTERNALHOST + INTERNAL_APIIP INTERNAL_APIHOST + STORAGEIP STORAGEHOST + STORAGE_MGMTIP STORAGE_MGMTHOST + TENANTIP TENANTHOST + MANAGEMENTIP MANAGEMENTHOST params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]} + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} - HOST: {get_attr: [NovaCompute, name]} + PRIMARYHOST: {get_attr: [NovaCompute, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - external + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - internalapi + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - storage + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - storagemgmt + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - tenant + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: + list_join: + - '-' + - - {get_attr: [NovaCompute, name]} + - management nova_server_resource: description: Heat resource handle for the Nova compute server value: diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index d250dd70..713ad706 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -17,6 +17,15 @@ parameters: resources: + ControllerArtifactsConfig: + type: deploy-artifacts.yaml + + ControllerArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: servers} + config: {get_resource: ControllerArtifactsConfig} + ControllerPrePuppet: type: OS::TripleO::Tasks::ControllerPrePuppet properties: @@ -33,7 +42,7 @@ resources: # e.g all Deployment resources should have a *Deployment_StepN suffix ControllerLoadBalancerDeployment_Step1: type: OS::Heat::StructuredDeployments - depends_on: ControllerPrePuppet + depends_on: [ControllerPrePuppet, ControllerArtifactsDeploy] properties: name: ControllerLoadBalancerDeployment_Step1 servers: {get_param: servers} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index bb0d5009..a28ae562 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -61,7 +61,7 @@ parameters: description: The iSCSI helper to use with cinder. type: string CinderLVMLoopDeviceSize: - default: 5000 + default: 10280 description: The size of the loopback file used by the cinder LVM driver. type: number CinderNfsMountOptions: @@ -89,10 +89,6 @@ parameters: default: 0 description: Number of workers for Cinder service. type: number - CloudName: - default: overcloud - description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org - type: string ControllerExtraConfig: default: {} description: | @@ -496,6 +492,15 @@ parameters: default: '' description: If set, the public interface is a vlan with this device as the raw device. type: string + NeutronTenantMtu: + description: > + The default MTU for tenant networks. For VXLAN/GRE tunneling, this should + be at least 50 bytes smaller than the MTU on the physical network. This + value will be used to set the MTU on the virtual Ethernet device. + This number is related to the value of NeutronDnsmasqOptions, since that + will determine the MTU that is assigned to the VM host through DHCP. + default: 1400 + type: number NeutronTunnelTypes: default: 'vxlan' description: | @@ -505,16 +510,16 @@ parameters: description: | Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation - default: ["1:1000", ] + default: ["1:4094", ] type: comma_delimited_list NeutronVniRanges: description: | Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of VXLAN VNI IDs that are available for tenant network allocation - default: ["1:1000", ] + default: ["1:4094", ] type: comma_delimited_list NeutronPluginExtensions: - default: "qos" + default: "qos,port_security" description: | Comma-separated list of extensions enabled for the Neutron plugin. type: comma_delimited_list @@ -639,6 +644,10 @@ parameters: default: 'UTC' description: The timezone to be set on controller nodes. type: string + UpgradeLevelNovaCompute: + type: string + description: Nova Compute upgrade level + default: '' VirtualIP: # DEPRECATED: use per service settings instead type: string default: '' # Has to be here because of the ignored empty value bug @@ -1065,6 +1074,7 @@ resources: params: AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_password: {get_param: NeutronPassword} + neutron_tenant_mtu: {get_param: NeutronTenantMtu} neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} neutron_dsn: list_join: @@ -1077,7 +1087,7 @@ resources: neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] } neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] } neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] } - neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri_no_suffix ] } + neutron_auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] } nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] } ceilometer_backend: {get_param: CeilometerBackend} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} @@ -1108,6 +1118,15 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/nova' + nova_api_dsn: + list_join: + - '' + - - 'mysql+pymysql://nova_api:' + - {get_param: NovaPassword} + - '@' + - {get_param: MysqlVirtualIP} + - '/nova_api' + upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} instance_name_template: {get_param: InstanceNameTemplate} fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} @@ -1306,6 +1325,9 @@ resources: glance_file_pcmk_fstype: {get_input: glance_file_pcmk_fstype} glance_file_pcmk_manage: {get_input: glance_file_pcmk_manage} glance_file_pcmk_options: {get_input: glance_file_pcmk_options} + glance::notify::rabbitmq::rabbit_userid: {get_input: rabbit_username} + glance::notify::rabbitmq::rabbit_password: {get_input: rabbit_password} + glance::notify::rabbitmq::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} # Heat heat_stack_domain_admin_password: {get_input: heat_stack_domain_admin_password} @@ -1339,8 +1361,10 @@ resources: keystone_ssl_certificate: {get_input: keystone_ssl_certificate} keystone_ssl_certificate_key: {get_input: keystone_ssl_certificate_key} keystone::database_connection: {get_input: keystone_dsn} - keystone::public_bind_host: {get_input: keystone_public_api_network} keystone::admin_bind_host: {get_input: keystone_admin_api_network} + keystone::public_bind_host: {get_input: keystone_public_api_network} + keystone::wsgi::apache::bind_host: {get_input: keystone_public_api_network} + keystone::wsgi::apache::admin_bind_host: {get_input: keystone_admin_api_network} keystone::debug: {get_input: debug} keystone::db::mysql::password: {get_input: admin_token} keystone::rabbit_userid: {get_input: rabbit_username} @@ -1358,7 +1382,7 @@ resources: keystone::admin_workers: {get_input: keystone_workers} keystone::public_workers: {get_input: keystone_workers} keystone_enable_db_purge: {get_input: keystone_enable_db_purge} - + keystone::public_endpoint: {get_input: keystone_public_url} # MongoDB mongodb::server::bind_ip: {get_input: mongo_db_network} mongodb::server::nojournal: {get_input: mongodb_no_journal} @@ -1386,6 +1410,7 @@ resources: neutron::server::database_connection: {get_input: neutron_dsn} neutron::server::api_workers: {get_input: neutron_workers} neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge} + neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata} @@ -1411,7 +1436,7 @@ resources: neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron::agents::ml2::ovs:bridge_mappings: {get_input: neutron_bridge_mappings} + neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings} neutron_public_interface: {get_input: neutron_public_interface} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route} @@ -1431,7 +1456,7 @@ resources: neutron::keystone::auth::password: {get_input: neutron_password } neutron::keystone::auth::region: {get_input: keystone_region} neutron::server::notifications::nova_url: {get_input: nova_internal_url} - neutron::server::notifications::auth_url: {get_input: neutron_admin_auth_url} + neutron::server::notifications::auth_url: {get_input: neutron_auth_url} neutron::server::notifications::tenant_name: 'service' neutron::server::notifications::project_name: 'service' neutron::server::notifications::password: {get_input: nova_password} @@ -1461,6 +1486,7 @@ resources: nova::rabbit_password: {get_input: rabbit_password} nova::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} nova::rabbit_port: {get_input: rabbit_client_port} + nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute} nova::debug: {get_input: debug} nova::api::auth_uri: {get_input: keystone_auth_uri} nova::api::identity_uri: {get_input: keystone_identity_uri} @@ -1470,15 +1496,18 @@ resources: nova::api::osapi_compute_workers: {get_input: nova_workers} nova::api::ec2_workers: {get_input: nova_workers} nova::api::metadata_workers: {get_input: nova_workers} + nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::database_connection: {get_input: nova_dsn} + nova::api_database_connection: {get_input: nova_api_dsn} nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} nova::api::instance_name_template: {get_input: instance_name_template} - nova::network::neutron::neutron_admin_password: {get_input: neutron_password} + nova::network::neutron::neutron_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_internal_url} - nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} + nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url} nova::vncproxy::host: {get_input: nova_api_network} nova::db::mysql::password: {get_input: nova_password} + nova::db::mysql_api::password: {get_input: nova_password} nova_enable_db_purge: {get_input: nova_enable_db_purge} # Horizon @@ -1603,12 +1632,54 @@ outputs: Server's IP address and hostname in the /etc/hosts format value: str_replace: - template: IP HOST.DOMAIN HOST CLOUDNAME + template: | + PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST + EXTERNALIP EXTERNALHOST + INTERNAL_APIIP INTERNAL_APIHOST + STORAGEIP STORAGEHOST + STORAGE_MGMTIP STORAGE_MGMTHOST + TENANTIP TENANTHOST + MANAGEMENTIP MANAGEMENTHOST params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]} + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} - HOST: {get_attr: [Controller, name]} - CLOUDNAME: {get_param: CloudName} + PRIMARYHOST: {get_attr: [Controller, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - external + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - internalapi + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - storage + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - storagemgmt + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - tenant + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: + list_join: + - '-' + - - {get_attr: [Controller, name]} + - management nova_server_resource: description: Heat resource handle for the Nova compute server value: diff --git a/puppet/deploy-artifacts.sh b/puppet/deploy-artifacts.sh new file mode 100644 index 00000000..22fde9a7 --- /dev/null +++ b/puppet/deploy-artifacts.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +TMP_DATA=$(mktemp -d) +function cleanup { + rm -Rf "$TMP_DATA" +} +trap cleanup EXIT + +if [ -n "$artifact_urls" ]; then + for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do + curl -o $TMP_DATA/file_data "$artifact_urls" + if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then + yum install -y $TMP_DATA/file_data + elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then + pushd / + tar xvzf $TMP_DATA/file_data + popd + else + echo "ERROR: Unsupported file format." + exit 1 + fi + rm $TMP_DATA/file_data + done +else + echo "No artifact_urls was set. Skipping..." +fi diff --git a/puppet/deploy-artifacts.yaml b/puppet/deploy-artifacts.yaml new file mode 100644 index 00000000..17f84163 --- /dev/null +++ b/puppet/deploy-artifacts.yaml @@ -0,0 +1,32 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to install deployment artifacts (tarball's and/or + distribution packages) via HTTP URLs. The contents of the URL's can + be tarballs or distribution packages (RPMs). If a tarball URL is supplied + it is extracted onto the target node during deployment. If a package is + deployed it is installed from the supplied URL. Note, you need the + heat-config-script element built into your images, due to the script group + below. + +parameters: + DeployArtifactURLs: + default: [] + description: A list of HTTP URLs containing deployment artifacts. + Currently supports tarballs and RPM packages. + type: comma_delimited_list + +resources: + DeployArtifacts: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: artifact_urls + default: {list_join: [' ', {get_param: DeployArtifactURLs}]} + config: {get_file: ./deploy-artifacts.sh} + +outputs: + OS::stack_id: + description: The ID of the DeployArtifacts resource. + value: {get_resource: DeployArtifacts} diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml index 7cefc24b..ebd6c251 100644 --- a/puppet/extraconfig/ceph/ceph-external-config.yaml +++ b/puppet/extraconfig/ceph/ceph-external-config.yaml @@ -76,7 +76,7 @@ resources: cinder_rbd_pool_name: {get_param: CinderRbdPoolName} glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} - glance::backend::rbd::rbd_store_pool: {get_param: CephClientUserName} + glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} nova::compute::rbd::rbd_keyring: list_join: - '.' diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml new file mode 100644 index 00000000..49c77190 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml @@ -0,0 +1,45 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata for Big Switch agents on compute node + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + NeutronBigswitchAgentEnabled: + description: The state of the neutron-bsn-agent service. + type: boolean + default: false + NeutronBigswitchLLDPEnabled: + description: The state of the neutron-bsn-lldp service. + type: boolean + default: true + + +resources: + NeutronBigswitchConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + neutron_bigswitch_data: + mapped_data: + neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} + neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} + + NeutronBigswitchDeployment: + type: OS::Heat::StructuredDeployment + properties: + name: NeutronBigswitchDeployment + config: {get_resource: NeutronBigswitchConfig} + server: {get_param: server} + input_values: + neutron_enable_bigswitch_agent: {get_param: NeutronBigswitchAgentEnabled} + neutron_enable_bigswitch_lldp: {get_param: NeutronBigswitchLLDPEnabled} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [NeutronBigswitchDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml index 1e652960..467f57cc 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml @@ -44,7 +44,6 @@ resources: datafiles: neutron_bigswitch_data: mapped_data: - neutron_enable_bigswitch_ml2: true neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} diff --git a/puppet/extraconfig/tls/ca-inject.yaml b/puppet/extraconfig/tls/ca-inject.yaml index 5a36e951..aab42849 100644 --- a/puppet/extraconfig/tls/ca-inject.yaml +++ b/puppet/extraconfig/tls/ca-inject.yaml @@ -45,7 +45,7 @@ resources: cat > ${cacert_path} << EOF ${cacert_content} EOF - chmod 0440 ${cacert_path} + chmod 0444 ${cacert_path} chown root:root ${cacert_path} ${update_anchor_command} md5sum ${cacert_path} > ${heat_outputs_path}.root_cert_md5sum diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index b4b51abf..03366c7e 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -8,12 +8,15 @@ ceilometer::agent::auth::auth_region: 'regionOne' ceilometer::agent::auth::auth_tenant_name: 'admin' nova::api::admin_tenant_name: 'service' -nova::network::neutron::neutron_admin_tenant_name: 'service' -nova::network::neutron::neutron_admin_username: 'neutron' +nova::network::neutron::neutron_project_name: 'service' +nova::network::neutron::neutron_username: 'neutron' nova::network::neutron::dhcp_domain: '' neutron::allow_overlapping_ips: true +kernel_modules: + nf_conntrack: {} + sysctl_settings: net.ipv4.tcp_keepalive_intvl: value: 1 @@ -21,6 +24,15 @@ sysctl_settings: value: 5 net.ipv4.tcp_keepalive_time: value: 5 + net.nf_conntrack_max: + value: 500000 + net.netfilter.nf_conntrack_max: + value: 500000 + # prevent neutron bridges from autoconfiguring ipv6 addresses + net.ipv6.conf.default.accept_ra: + value: 0 + net.ipv6.conf.default.autoconf: + value: 0 nova::rabbit_heartbeat_timeout_threshold: 60 neutron::rabbit_heartbeat_timeout_threshold: 60 diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index f8ef6408..e00fffaf 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -34,6 +34,8 @@ glance::api::keystone_tenant: 'service' glance::registry::keystone_tenant: 'service' neutron::server::auth_tenant: 'service' neutron::agents::metadata::auth_tenant: 'service' +neutron::agents::l3::router_delete_namespaces: True +neutron::agents::dhcp::dhcp_delete_namespaces: True cinder::api::keystone_tenant: 'service' swift::proxy::authtoken::admin_tenant_name: 'service' ceilometer::api::keystone_tenant: 'service' @@ -50,6 +52,8 @@ keystone::config::keystone_config: value: 'HTTP_X_FORWARDED_PROTO' ec2/driver: value: 'keystone.contrib.ec2.backends.sql.Ec2' +keystone::service_name: 'httpd' +keystone::wsgi::apache::ssl: false #swift swift::proxy::pipeline: @@ -83,9 +87,11 @@ neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf nova::notify_on_state_change: 'vm_and_task_state' nova::api::default_floating_pool: 'public' nova::api::osapi_v3: true +nova::api::sync_db_api: true nova::scheduler::filter::ram_allocation_ratio: '1.0' nova::cron::archive_deleted_rows::hour: '*/12' nova::cron::archive_deleted_rows::destination: '/dev/null' +nova::notification_driver: messaging # ceilometer ceilometer::agent::auth::auth_endpoint_type: 'internalURL' @@ -93,6 +99,7 @@ ceilometer::agent::auth::auth_endpoint_type: 'internalURL' # cinder cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler cinder::cron::db_purge::destination: '/dev/null' +cinder::host: hostgroup # heat heat::engine::configure_delegated_roles: false diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml index 89577505..61714691 100644 --- a/puppet/hieradata/database.yaml +++ b/puppet/hieradata/database.yaml @@ -6,6 +6,13 @@ nova::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" +nova::db::mysql_api::user: nova_api +nova::db::mysql_api::host: "%{hiera('mysql_virtual_ip')}" +nova::db::mysql_api::dbname: nova_api +nova::db::mysql_api::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + # Glance glance::db::mysql::user: glance glance::db::mysql::host: "%{hiera('mysql_virtual_ip')}" diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index 7444155c..0db5b45a 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp @@ -38,6 +40,7 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) { } -> Class['ceph::profile::osd'] } +include ::ceph::conf include ::ceph::profile::client include ::ceph::profile::osd diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index dec6c6a4..7925f50a 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp @@ -37,6 +39,16 @@ exec { 'libvirt-default-net-destroy': before => Service['libvirt'], } +# When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique +exec { 'reset-iscsi-initiator-name': + command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi', + onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset', +}-> + +file { '/etc/iscsi/.initiator_reset': + ensure => present, +} + include ::nova include ::nova::config include ::nova::compute @@ -49,6 +61,7 @@ nova_config { $rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) $rbd_persistent_storage = hiera('rbd_persistent_storage', false) if $rbd_ephemeral_storage or $rbd_persistent_storage { + include ::ceph::conf include ::ceph::profile::client $client_keys = hiera('ceph::profile::params::client_keys') @@ -78,6 +91,7 @@ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' } include ::nova::network::neutron include ::neutron +include ::neutron::config # If the value of core plugin is set to 'nuage', # include nuage agent, @@ -126,6 +140,10 @@ else { n1kv_version => hiera('n1kv_vem_version', undef), } } + + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::agents::bigswitch + } } diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 347ea7b1..c304e94e 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -20,7 +20,9 @@ $enable_load_balancer = hiera('enable_load_balancer', true) if hiera('step') >= 1 { + create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> $controller_node_ips = split(hiera('controller_node_ips'), ',') @@ -83,11 +85,15 @@ if hiera('step') >= 2 { $mysql_config_file = '/etc/my.cnf.d/server.cnf' } # TODO Galara + # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we + # set bind-address to a hostname instead of an ip address; to move Mysql + # from internal_api on another network we'll have to customize both + # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap class { '::mysql::server': config_file => $mysql_config_file, override_options => { 'mysqld' => { - 'bind-address' => hiera('mysql_bind_host'), + 'bind-address' => $::hostname, 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', }, @@ -100,6 +106,7 @@ if hiera('step') >= 2 { include ::keystone::db::mysql include ::glance::db::mysql include ::nova::db::mysql + include ::nova::db::mysql_api include ::neutron::db::mysql include ::cinder::db::mysql include ::heat::db::mysql @@ -137,6 +144,7 @@ if hiera('step') >= 2 { class { '::ceph::profile::params': mon_initial_members => downcase(hiera('ceph_mon_initial_members')), } + include ::ceph::conf include ::ceph::profile::mon } @@ -155,10 +163,12 @@ if hiera('step') >= 2 { } -> Class['ceph::profile::osd'] } + include ::ceph::conf include ::ceph::profile::osd } if str2bool(hiera('enable_external_ceph', false)) { + include ::ceph::conf include ::ceph::profile::client } @@ -170,6 +180,7 @@ if hiera('step') >= 3 { include ::keystone::config include ::keystone::roles::admin include ::keystone::endpoint + include ::keystone::wsgi::apache #TODO: need a cleanup-keystone-tokens.sh solution here @@ -211,13 +222,18 @@ if hiera('step') >= 3 { $http_store = ['glance.store.http.Store'] $glance_store = concat($http_store, $backend_store) - # TODO: notifications, scrubber, etc. + # TODO: scrubber and other additional optional features include ::glance + include ::glance::config class { '::glance::api': known_stores => $glance_store, } include ::glance::registry include join(['::glance::backend::', $glance_backend]) + $rabbit_port = hiera('rabbitmq::port') + class { '::glance::notify::rabbitmq': + rabbit_hosts => suffix(hiera('rabbit_node_ips'), ":${rabbit_port}"), + } class { '::nova' : memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'), @@ -242,7 +258,8 @@ if hiera('step') >= 3 { if hiera('enable_zookeeper_on_controller') { class {'::tripleo::cluster::zookeeper': zookeeper_server_ips => $zookeeper_node_ips, - zookeeper_client_ip => $ipaddress, + # TODO: create a 'bind' hiera key for zookeeper + zookeeper_client_ip => hiera('neutron::bind_host'), zookeeper_hostnames => hiera('controller_node_names') } } @@ -251,7 +268,8 @@ if hiera('step') >= 3 { if hiera('enable_cassandra_on_controller') { class {'::tripleo::cluster::cassandra': cassandra_servers => $cassandra_node_ips, - cassandra_ip => $ipaddress + # TODO: create a 'bind' hiera key for cassandra + cassandra_ip => hiera('neutron::bind_host'), } } @@ -262,10 +280,11 @@ if hiera('step') >= 3 { class {'::tripleo::network::midonet::api': zookeeper_servers => $zookeeper_node_ips, - vip => $ipaddress, - keystone_ip => $ipaddress, + vip => hiera('tripleo::loadbalancer::public_virtual_ip'), + keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), keystone_admin_token => hiera('keystone::admin_token'), - bind_address => $ipaddress, + # TODO: create a 'bind' hiera key for api + bind_address => hiera('neutron::bind_host'), admin_password => hiera('admin_password') } @@ -281,6 +300,7 @@ if hiera('step') >= 3 { include ::neutron } + include ::neutron::config include ::neutron::server include ::neutron::server::notifications @@ -309,7 +329,7 @@ if hiera('step') >= 3 { if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { class {'::neutron::plugins::midonet': - midonet_api_ip => $ipaddress, + midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), keystone_tenant => hiera('neutron::server::auth_tenant'), keystone_password => hiera('neutron::server::auth_password') } @@ -341,8 +361,9 @@ if hiera('step') >= 3 { include ::neutron::plugins::ml2::cisco::type_nexus_vxlan } - if hiera('neutron_enable_bigswitch_ml2', false) { + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::bigswitch::restproxy + include ::neutron::agents::bigswitch } neutron_l3_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); @@ -359,10 +380,13 @@ if hiera('step') >= 3 { } include ::cinder + include ::cinder::config + include ::tripleo::ssl::cinder_config include ::cinder::api include ::cinder::glance include ::cinder::scheduler include ::cinder::volume + include ::cinder::ceilometer class { '::cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -406,10 +430,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_eqlx_backend', false) { $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name') - cinder_config { - "${cinder_eqlx_backend}/host": value => 'hostgroup'; - } - cinder::backend::eqlx { $cinder_eqlx_backend : volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef), san_ip => hiera('cinder::backend::eqlx::san_ip', undef), @@ -417,7 +437,7 @@ if hiera('step') >= 3 { san_password => hiera('cinder::backend::eqlx::san_password', undef), san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef), eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef), - eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef), + eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef), eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef), eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef), eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef), @@ -427,10 +447,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_dellsc_backend', false) { $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name') - cinder_config { - "${cinder_dellsc_backend}/host": value => 'hostgroup'; - } - cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend : volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef), san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef), @@ -439,7 +455,7 @@ if hiera('step') >= 3 { dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), - dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef), + dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef), dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), } @@ -448,10 +464,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_netapp_backend', false) { $cinder_netapp_backend = hiera('cinder::backend::netapp::title') - cinder_config { - "${cinder_netapp_backend}/host": value => 'hostgroup'; - } - if hiera('cinder::backend::netapp::nfs_shares', undef) { $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',') } @@ -560,7 +572,10 @@ if hiera('step') >= 3 { Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } # Heat - include ::heat + class { '::heat' : + notification_driver => 'messaging', + } + include ::heat::config include ::heat::api include ::heat::api_cfn include ::heat::api_cloudwatch diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index c41ab2ce..c6c69b78 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -39,7 +39,9 @@ $non_pcmk_start = hiera('step') >= 4 if hiera('step') >= 1 { + create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) + Exec <| tag == 'kmod::load' |> -> Sysctl <| |> include ::timezone @@ -134,6 +136,11 @@ if hiera('step') >= 1 { $galera_nodes = downcase(hiera('galera_node_names', $::hostname)) $galera_nodes_count = count(split($galera_nodes, ',')) + # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we + # set bind-address to a hostname instead of an ip address; to move Mysql + # from internal_api on another network we'll have to customize both + # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap + $mysql_bind_host = hiera('mysql_bind_host') $mysqld_options = { 'mysqld' => { 'skip-name-resolve' => '1', @@ -143,7 +150,7 @@ if hiera('step') >= 1 { 'innodb_locks_unsafe_for_binlog'=> '1', 'query_cache_size' => '0', 'query_cache_type' => '0', - 'bind-address' => hiera('mysql_bind_host'), + 'bind-address' => $::hostname, 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', @@ -158,8 +165,8 @@ if hiera('step') >= 1 { 'wsrep_auto_increment_control' => '1', 'wsrep_drupal_282555_workaround'=> '0', 'wsrep_causal_reads' => '0', - 'wsrep_notify_cmd' => '', 'wsrep_sst_method' => 'rsync', + 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;", }, } @@ -349,6 +356,7 @@ if hiera('step') >= 2 { ocf_agent_name => 'heartbeat:rabbitmq-cluster', resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'', clone_params => 'ordered=true interleave=true', + meta_params => 'notify=true', require => Class['::rabbitmq'], } @@ -430,6 +438,9 @@ MYSQL_HOST=localhost\n", class { '::nova::db::mysql': require => Exec['galera-ready'], } + class { '::nova::db::mysql_api': + require => Exec['galera-ready'], + } class { '::neutron::db::mysql': require => Exec['galera-ready'], } @@ -461,6 +472,7 @@ MYSQL_HOST=localhost\n", class { '::ceph::profile::params': mon_initial_members => downcase(hiera('ceph_mon_initial_members')), } + include ::ceph::conf include ::ceph::profile::mon } @@ -479,10 +491,12 @@ MYSQL_HOST=localhost\n", } -> Class['ceph::profile::osd'] } + include ::ceph::conf include ::ceph::profile::osd } if str2bool(hiera('enable_external_ceph', false)) { + include ::ceph::conf include ::ceph::profile::client } @@ -551,6 +565,7 @@ if hiera('step') >= 3 { # TODO: notifications, scrubber, etc. include ::glance + include ::glance::config class { '::glance::api': known_stores => $glance_store, manage_service => false, @@ -562,6 +577,10 @@ if hiera('step') >= 3 { enabled => false, } include join(['::glance::backend::', $glance_backend]) + $rabbit_port = hiera('rabbitmq::port') + class { '::glance::notify::rabbitmq': + rabbit_hosts => suffix(hiera('rabbit_node_ips'), ":${rabbit_port}"), + } class { '::nova' : memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'), @@ -571,6 +590,7 @@ if hiera('step') >= 3 { class { '::nova::api' : sync_db => $sync_db, + sync_db_api => $sync_db, manage_service => false, enabled => false, } @@ -607,8 +627,9 @@ if hiera('step') >= 3 { if hiera('enable_zookeeper_on_controller') { class {'::tripleo::cluster::zookeeper': zookeeper_server_ips => $zookeeper_node_ips, - zookeeper_client_ip => $ipaddress, - zookeeper_hostnames => hiera('controller_node_names') + # TODO: create a 'bind' hiera key for zookeeper + zookeeper_client_ip => hiera('neutron::bind_host'), + zookeeper_hostnames => split(hiera('controller_node_names'), ',') } } @@ -616,7 +637,8 @@ if hiera('step') >= 3 { if hiera('enable_cassandra_on_controller') { class {'::tripleo::cluster::cassandra': cassandra_servers => $cassandra_node_ips, - cassandra_ip => $ipaddress + # TODO: create a 'bind' hiera key for cassandra + cassandra_ip => hiera('neutron::bind_host'), } } @@ -626,11 +648,12 @@ if hiera('step') >= 3 { } class {'::tripleo::network::midonet::api': - zookeeper_servers => hiera('neutron_api_node_ips'), - vip => $public_vip, - keystone_ip => $public_vip, + zookeeper_servers => $zookeeper_node_ips, + vip => hiera('tripleo::loadbalancer::public_virtual_ip'), + keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), keystone_admin_token => hiera('keystone::admin_token'), - bind_address => $ipaddress, + # TODO: create a 'bind' hiera key for api + bind_address => hiera('neutron::bind_host'), admin_password => hiera('admin_password') } @@ -645,6 +668,7 @@ if hiera('step') >= 3 { include ::neutron } + include ::neutron::config class { '::neutron::server' : sync_db => $sync_db, manage_service => false, @@ -659,7 +683,7 @@ if hiera('step') >= 3 { } if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { class {'::neutron::plugins::midonet': - midonet_api_ip => $public_vip, + midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'), keystone_tenant => hiera('neutron::server::auth_tenant'), keystone_password => hiera('neutron::server::auth_password') } @@ -716,8 +740,9 @@ if hiera('step') >= 3 { } } - if hiera('neutron_enable_bigswitch_ml2', false) { + if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::bigswitch::restproxy + include ::neutron::agents::bigswitch } neutron_l3_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); @@ -725,8 +750,13 @@ if hiera('step') >= 3 { neutron_dhcp_agent_config { 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); } + neutron_config { + 'DEFAULT/notification_driver': value => 'messaging'; + } include ::cinder + include ::cinder::config + include ::tripleo::ssl::cinder_config class { '::cinder::api': sync_db => $sync_db, manage_service => false, @@ -741,6 +771,7 @@ if hiera('step') >= 3 { enabled => false, } include ::cinder::glance + include ::cinder::ceilometer class { '::cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -784,10 +815,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_eqlx_backend', false) { $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name') - cinder_config { - "${cinder_eqlx_backend}/host": value => 'hostgroup'; - } - cinder::backend::eqlx { $cinder_eqlx_backend : volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef), san_ip => hiera('cinder::backend::eqlx::san_ip', undef), @@ -795,7 +822,7 @@ if hiera('step') >= 3 { san_password => hiera('cinder::backend::eqlx::san_password', undef), san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef), eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef), - eqlx_pool => hiera('cinder::backend::eqlx::eqlx_lpool', undef), + eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef), eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef), eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef), eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef), @@ -805,10 +832,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_dellsc_backend', false) { $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name') - cinder_config { - "${cinder_dellsc_backend}/host": value => 'hostgroup'; - } - cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend : volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef), san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef), @@ -817,7 +840,7 @@ if hiera('step') >= 3 { dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef), iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef), iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef), - dell_sc_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef), + dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef), dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef), dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef), } @@ -826,10 +849,6 @@ if hiera('step') >= 3 { if hiera('cinder_enable_netapp_backend', false) { $cinder_netapp_backend = hiera('cinder::backend::netapp::title') - cinder_config { - "${cinder_netapp_backend}/host": value => 'hostgroup'; - } - if hiera('cinder::backend::netapp::nfs_shares', undef) { $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',') } @@ -977,8 +996,10 @@ if hiera('step') >= 3 { Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } # Heat + include ::heat::config class { '::heat' : - sync_db => $sync_db, + sync_db => $sync_db, + notification_driver => 'messaging', } class { '::heat::api' : manage_service => false, @@ -1003,6 +1024,7 @@ if hiera('step') >= 3 { service_enable => false, # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? } + include ::keystone::wsgi::apache include ::apache::mod::status if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { $_profile_support = 'cisco' @@ -1046,57 +1068,49 @@ if hiera('step') >= 4 { if $pacemaker_master { - # Keystone - pacemaker::resource::service { $::keystone::params::service_name : - clone_params => 'interleave=true', - verify_on_create => true, - require => [File['/etc/keystone/ssl/certs/ca.pem'], - File['/etc/keystone/ssl/private/signing_key.pem'], - File['/etc/keystone/ssl/certs/signing_cert.pem']], - } if $enable_load_balancer { pacemaker::constraint::base { 'haproxy-then-keystone-constraint': constraint_type => 'order', first_resource => 'haproxy-clone', - second_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::apache::params::service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } } pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint': constraint_type => 'order', first_resource => 'rabbitmq-clone', - second_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::apache::params::service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Ocf['rabbitmq'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'memcached-then-keystone-constraint': constraint_type => 'order', first_resource => 'memcached-clone', - second_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::apache::params::service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service['memcached'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'galera-then-keystone-constraint': constraint_type => 'order', first_resource => 'galera-master', - second_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::apache::params::service_name}-clone", first_action => 'promote', second_action => 'start', require => [Pacemaker::Resource::Ocf['galera'], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } # Cinder pacemaker::resource::service { $::cinder::params::api_service : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } pacemaker::resource::service { $::cinder::params::scheduler_service : clone_params => 'interleave=true', @@ -1105,12 +1119,12 @@ if hiera('step') >= 4 { pacemaker::constraint::base { 'keystone-then-cinder-api-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::cinder::params::api_service}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint': constraint_type => 'order', @@ -1148,25 +1162,25 @@ if hiera('step') >= 4 { # Sahara pacemaker::resource::service { $::sahara::params::api_service_name : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } pacemaker::resource::service { $::sahara::params::engine_service_name : clone_params => 'interleave=true', } pacemaker::constraint::base { 'keystone-then-sahara-api-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::sahara::params::api_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } # Glance pacemaker::resource::service { $::glance::params::registry_service_name : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } pacemaker::resource::service { $::glance::params::api_service_name : clone_params => 'interleave=true', @@ -1174,12 +1188,12 @@ if hiera('step') >= 4 { pacemaker::constraint::base { 'keystone-then-glance-registry-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::glance::params::registry_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint': constraint_type => 'order', @@ -1217,12 +1231,12 @@ if hiera('step') >= 4 { } -> pacemaker::resource::service { $::neutron::params::server_service: clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name] + require => Pacemaker::Resource::Service[$::apache::params::service_name] } } else { pacemaker::resource::service { $::neutron::params::server_service: clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name] + require => Pacemaker::Resource::Service[$::apache::params::service_name] } } if hiera('neutron::enable_l3_agent', true) { @@ -1294,28 +1308,16 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], } } - pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", - second_resource => "${::neutron::params::server_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::neutron::params::server_service]], - } - if hiera('neutron::enable_ovs_agent',true) { - pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::ovs_agent_service}-clone", + constraint_type => 'order', + first_resource => "${::apache::params::service_name}-clone", + second_resource => "${::neutron::params::server_service}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], + require => [Pacemaker::Resource::Service[$::apache::params::service_name], + Pacemaker::Resource::Service[$::neutron::params::server_service]], } - } - if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) { + if hiera('neutron::enable_ovs_agent',true) { pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': constraint_type => 'order', first_resource => "${::neutron::params::ovs_agent_service}-clone", @@ -1324,8 +1326,19 @@ if hiera('step') >= 4 { second_action => 'start', require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } + } + if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) { + pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::server_service}-clone", + second_resource => "${::neutron::params::ovs_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::server_service], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], + } + pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': source => "${::neutron::params::dhcp_agent_service}-clone", target => "${::neutron::params::ovs_agent_service}-clone", @@ -1411,34 +1424,29 @@ if hiera('step') >= 4 { # Nova pacemaker::resource::service { $::nova::params::api_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::conductor_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::consoleauth_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } pacemaker::resource::service { $::nova::params::vncproxy_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::scheduler_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s', } pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::nova::params::consoleauth_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': constraint_type => 'order', @@ -1510,14 +1518,14 @@ if hiera('step') >= 4 { /mysql/: { pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } } default: { pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : clone_params => 'interleave=true', - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], + require => [Pacemaker::Resource::Service[$::apache::params::service_name], + Pacemaker::Resource::Service[$::mongodb::params::service_name]], } } } @@ -1553,12 +1561,12 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::ceilometer::params::agent_central_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint': constraint_type => 'order', @@ -1628,12 +1636,12 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'keystone-then-heat-api-constraint': constraint_type => 'order', - first_resource => "${::keystone::params::service_name}-clone", + first_resource => "${::apache::params::service_name}-clone", second_resource => "${::heat::params::api_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::keystone::params::service_name]], + Pacemaker::Resource::Service[$::apache::params::service_name]], } pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint': constraint_type => 'order', @@ -1693,9 +1701,13 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]], } - # Horizon - pacemaker::resource::service { $::horizon::params::http_service: - clone_params => 'interleave=true', + # Horizon and Keystone + pacemaker::resource::service { $::apache::params::service_name: + clone_params => 'interleave=true', + verify_on_create => true, + require => [File['/etc/keystone/ssl/certs/ca.pem'], + File['/etc/keystone/ssl/private/signing_key.pem'], + File['/etc/keystone/ssl/certs/signing_cert.pem']], } #VSM @@ -1732,12 +1744,11 @@ if hiera('step') >= 5 { if $pacemaker_master { class {'::keystone::roles::admin' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } -> class {'::keystone::endpoint' : - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + require => Pacemaker::Resource::Service[$::apache::params::service_name], } - } } #END STEP 5 diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 63ac396e..1ac66904 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 5a69725a..72cd36c3 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -16,7 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall +create_resources(kmod::load, hiera('kernel_modules'), {}) create_resources(sysctl::value, hiera('sysctl_settings'), {}) +Exec <| tag == 'kmod::load' |> -> Sysctl <| |> if count(hiera('ntp::servers')) > 0 { include ::ntp diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml index a55b3959..eb06b241 100644 --- a/puppet/swift-storage-post.yaml +++ b/puppet/swift-storage-post.yaml @@ -12,9 +12,19 @@ parameters: type: json description: Value which changes if the node configuration may need to be re-applied - resources: + StorageArtifactsConfig: + type: deploy-artifacts.yaml + + StorageArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: servers} + config: {get_resource: StorageArtifactsConfig} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + StoragePuppetConfig: type: OS::Heat::SoftwareConfig properties: @@ -28,6 +38,7 @@ resources: StorageDeployment_Step1: type: OS::Heat::StructuredDeployments + depends_on: StorageArtifactsDeploy properties: name: StorageDeployment_Step1 servers: {get_param: servers} diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 142e47cc..d36a9c17 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -302,11 +302,54 @@ outputs: hosts_entry: value: str_replace: - template: "IP HOST.DOMAIN HOST" + template: | + PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST + EXTERNALIP EXTERNALHOST + INTERNAL_APIIP INTERNAL_APIHOST + STORAGEIP STORAGEHOST + STORAGE_MGMTIP STORAGE_MGMTHOST + TENANTIP TENANTHOST + MANAGEMENTIP MANAGEMENTHOST params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]} + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]} DOMAIN: {get_param: CloudDomain} - HOST: {get_attr: [SwiftStorage, name]} + PRIMARYHOST: {get_attr: [SwiftStorage, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - external + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - internalapi + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - storage + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - storagemgmt + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - tenant + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: + list_join: + - '-' + - - {get_attr: [SwiftStorage, name]} + - management nova_server_resource: description: Heat resource handle for the swift storage server value: diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml index c49a1047..5e2f698f 100644 --- a/puppet/vip-config.yaml +++ b/puppet/vip-config.yaml @@ -16,6 +16,8 @@ resources: keystone_admin_api_vip: {get_input: keystone_admin_api_vip} keystone_public_api_vip: {get_input: keystone_public_api_vip} neutron_api_vip: {get_input: neutron_api_vip} + # TODO: pass a `midonet_api_vip` var + midonet_api_vip: {get_input: neutron_api_vip} cinder_api_vip: {get_input: cinder_api_vip} glance_api_vip: {get_input: glance_api_vip} glance_registry_vip: {get_input: glance_registry_vip} diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py index cb5669a7..fe690d8c 100755 --- a/tools/yaml-validate.py +++ b/tools/yaml-validate.py @@ -16,11 +16,13 @@ import sys import traceback import yaml -base_path = sys.argv[1] -exit_val = 0 -failed_files = [] + +def exit_usage(): + print('Usage %s <yaml file or directory>' % sys.argv[0]) + sys.exit(1) def validate(filename): + print('Validating %s' % filename) try: yaml.load(open(filename).read()) except Exception: @@ -28,14 +30,31 @@ def validate(filename): return 1 return 0 -for subdir, dirs, files in os.walk(base_path): - for f in files: - if f.endswith('.yaml'): - file_path = os.path.join(subdir, f) - failed = validate(file_path) - if failed: - failed_files.append(file_path) - exit_val |= failed +if len(sys.argv) < 2: + exit_usage() + +path_args = sys.argv[1:] +exit_val = 0 +failed_files = [] + +for base_path in path_args: + if os.path.isdir(base_path): + for subdir, dirs, files in os.walk(base_path): + for f in files: + if f.endswith('.yaml'): + file_path = os.path.join(subdir, f) + failed = validate(file_path) + if failed: + failed_files.append(file_path) + exit_val |= failed + elif os.path.isfile(base_path) and base_path.endswith('.yaml'): + failed = validate(base_path) + if failed: + failed_files.append(base_path) + exit_val |= failed + else: + print('Unexpected argument %s' % base_path) + exit_usage() if failed_files: print('Validation failed on:') @@ -10,5 +10,5 @@ deps = -r{toxinidir}/requirements.txt [testenv:venv] commands = {posargs} -[testenv:linters] +[testenv:pep8] commands = python ./tools/yaml-validate.py . diff --git a/validation-scripts/all-nodes.sh b/validation-scripts/all-nodes.sh index 8057f201..ae1fddf3 100644 --- a/validation-scripts/all-nodes.sh +++ b/validation-scripts/all-nodes.sh @@ -5,23 +5,25 @@ # attempt a ping test the remote network IP. function ping_controller_ips() { local REMOTE_IPS=$1 - for REMOTE_IP in $(echo $REMOTE_IPS | sed -e "s| |\n|g" | sort -u); do - - for LOCAL_NETWORK in $(ip r | grep -v default | cut -d " " -f 1); do - local LOCAL_CIDR=$(echo $LOCAL_NETWORK | cut -d "/" -f 2) - local LOCAL_NETMASK=$(ipcalc -m $LOCAL_NETWORK | grep NETMASK | cut -d "=" -f 2) - local REMOTE_NETWORK=$(ipcalc -np $REMOTE_IP $LOCAL_NETMASK | grep NETWORK | cut -d "=" -f 2) - - if [ $REMOTE_NETWORK/$LOCAL_CIDR == $LOCAL_NETWORK ]; then - echo -n "Trying to ping $REMOTE_IP for local network $LOCAL_NETWORK..." - if ! ping -W 300 -c 1 $REMOTE_IP &> /dev/null; then - echo "FAILURE" - echo "$REMOTE_IP is not pingable. Local Network: $LOCAL_NETWORK" >&2 - exit 1 - fi - echo "SUCCESS" - fi + if [[ $REMOTE_IP =~ ":" ]]; then + networks=$(ip -6 r | grep -v default | cut -d " " -f 1 | grep -v "unreachable") + ping=ping6 + else + networks=$(ip r | grep -v default | cut -d " " -f 1) + ping=ping + fi + for LOCAL_NETWORK in $networks; do + in_network=$(python -c "import ipaddr; net=ipaddr.IPNetwork('$LOCAL_NETWORK'); addr=ipaddr.IPAddress('$REMOTE_IP'); print(addr in net)") + if [[ $in_network == "True" ]]; then + echo -n "Trying to ping $REMOTE_IP for local network $LOCAL_NETWORK..." + if ! $ping -W 300 -c 1 $REMOTE_IP &> /dev/null; then + echo "FAILURE" + echo "$REMOTE_IP is not pingable. Local Network: $LOCAL_NETWORK" >&2 + exit 1 + fi + echo "SUCCESS" + fi done done } |