diff options
87 files changed, 2926 insertions, 470 deletions
diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh index 963c7eee..bb458a68 100644 --- a/docker/firstboot/start_docker_agents.sh +++ b/docker/firstboot/start_docker_agents.sh @@ -73,3 +73,22 @@ chmod 0640 /etc/systemd/system/heat-docker-agents.service # Disable NetworkManager and let the ifup/down scripts work properly. /usr/bin/systemctl disable NetworkManager /usr/bin/systemctl stop NetworkManager + +# Atomic's root partition & logical volume defaults to 3G. In order to launch +# larger VMs, we need to enlarge the root logical volume and scale down the +# docker_pool logical volume. We are allocating 80% of the disk space for +# vm data and the remaining 20% for docker images. +ATOMIC_ROOT='/dev/mapper/atomicos-root' +ROOT_DEVICE=`pvs -o vg_name,pv_name --no-headings | grep atomicos | awk '{ print $2}'` + +growpart $( echo "${ROOT_DEVICE}" | sed -r 's/([^0-9]*)([0-9]+)/\1 \2/' ) +pvresize "${ROOT_DEVICE}" +lvresize -l +80%FREE "${ATOMIC_ROOT}" +xfs_growfs "${ATOMIC_ROOT}" + +cat <<EOF > /etc/sysconfig/docker-storage-setup +GROWPART=true +AUTO_EXTEND_POOL=yes +POOL_AUTOEXTEND_PERCENT=30 +POOL_AUTOEXTEND_THRESHOLD=70 +EOF diff --git a/environments/enable-tls.yaml b/environments/enable-tls.yaml index b895f86a..d6328c06 100644 --- a/environments/enable-tls.yaml +++ b/environments/enable-tls.yaml @@ -5,9 +5,6 @@ parameter_defaults: SSLKey: | The contents of the private key go here EndpointMap: - AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'} - AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'} - AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'} CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'} @@ -27,6 +24,9 @@ parameter_defaults: KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'} KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'} KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'} + KeystoneV3Admin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'} + KeystoneV3Internal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'} + KeystoneV3Public: {protocol: 'https', port: '13000', host: 'CLOUDNAME'} NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'} NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'} NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'} @@ -39,6 +39,9 @@ parameter_defaults: NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'} NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'} NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'} + SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} + SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} + SaharaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'} SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'} diff --git a/environments/external-loadbalancer-vip-v6.yaml b/environments/external-loadbalancer-vip-v6.yaml new file mode 100644 index 00000000..5a2ef505 --- /dev/null +++ b/environments/external-loadbalancer-vip-v6.yaml @@ -0,0 +1,38 @@ +resource_registry: + OS::TripleO::Network::Ports::NetVipMap: ../network/ports/net_vip_map_external_v6.yaml + OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/noop.yaml + OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/noop.yaml + OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/noop.yaml + OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/noop.yaml + OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/from_service_v6.yaml + OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool_v6.yaml + OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool_v6.yaml + OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool_v6.yaml + OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool_v6.yaml + # OVS doesn't support IPv6 endpoints for tunneling yet, so this remains IPv4 for now. + OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml + +parameter_defaults: + # When using an external loadbalancer set the following in parameter_defaults + # to control your VIPs (currently one per network) + # NOTE: we will eventually move to one VIP per service + # + ControlPlaneIP: 192.0.2.251 + ExternalNetworkVip: 2001:db8:fd00:1000:0000:0000:0000:0005 + InternalApiNetworkVip: fd00:fd00:fd00:2000:0000:0000:0000:0005 + StorageNetworkVip: fd00:fd00:fd00:3000:0000:0000:0000:0005 + StorageMgmtNetworkVip: fd00:fd00:fd00:4000:0000:0000:0000:0005 + ServiceVips: + redis: fd00:fd00:fd00:2000:0000:0000:0000:0006 + ControllerIPs: + external: + - 2001:db8:fd00:1000:0000:0000:0000:0007 + internal_api: + - fd00:fd00:fd00:2000:0000:0000:0000:0007 + storage: + - fd00:fd00:fd00:3000:0000:0000:0000:0007 + storage_mgmt: + - fd00:fd00:fd00:4000:0000:0000:0000:0007 + tenant: + - 172.16.0.253 + EnableLoadBalancer: false diff --git a/environments/ips-from-pool-all.yaml b/environments/ips-from-pool-all.yaml new file mode 100644 index 00000000..f660d501 --- /dev/null +++ b/environments/ips-from-pool-all.yaml @@ -0,0 +1,75 @@ +# Environment file demonstrating how to pre-assign IPs to all node types +resource_registry: + OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml + OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml + OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml + OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml + OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml + + OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml + OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml + OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage_from_pool.yaml + OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml + OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml + + OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml + OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml + OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml + OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml + OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml + + OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml + OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml + OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml + OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml + OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml + + OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml + OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml + OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml + OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml + OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml + +parameter_defaults: + ControllerIPs: + # Each controller will get an IP from the lists below, first controller, first IP + external: + - 10.0.0.251 + internal_api: + - 172.16.2.251 + storage: + - 172.16.1.251 + storage_mgmt: + - 172.16.3.251 + tenant: + - 172.16.0.251 + NovaComputeIPs: + # Each compute will get an IP from the lists below, first compute, first IP + internal_api: + - 172.16.2.252 + storage: + - 172.16.1.252 + tenant: + - 172.16.0.252 + CephStorageIPs: + # Each ceph node will get an IP from the lists below, first node, first IP + storage: + - 172.16.1.253 + storage_mgmt: + - 172.16.3.253 + SwiftStorageIPs: + # Each swift node will get an IP from the lists below, first node, first IP + internal_api: + - 172.16.2.254 + storage: + - 172.16.1.254 + storage_mgmt: + - 172.16.3.254 + BlockStorageIPs: + # Each cinder node will get an IP from the lists below, first node, first IP + internal_api: + - 172.16.2.250 + storage: + - 172.16.1.250 + storage_mgmt: + - 172.16.3.250 diff --git a/environments/major-upgrade-pacemaker-init.yaml b/environments/major-upgrade-pacemaker-init.yaml new file mode 100644 index 00000000..d98a9cdd --- /dev/null +++ b/environments/major-upgrade-pacemaker-init.yaml @@ -0,0 +1,11 @@ +parameter_defaults: + UpgradeLevelNovaCompute: liberty + +resource_registry: + OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml + OS::TripleO::Tasks::PackageUpdate: ../extraconfig/tasks/yum_update_noop.yaml + OS::TripleO::ControllerPostDeployment: OS::Heat::None + OS::TripleO::ComputePostDeployment: OS::Heat::None + OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None + OS::TripleO::BlockStoragePostDeployment: OS::Heat::None + OS::TripleO::CephStoragePostDeployment: OS::Heat::None diff --git a/environments/net-bond-with-vlans-v6.yaml b/environments/net-bond-with-vlans-v6.yaml new file mode 100644 index 00000000..73dda3d9 --- /dev/null +++ b/environments/net-bond-with-vlans-v6.yaml @@ -0,0 +1,20 @@ +# This template configures each role to use a pair of bonded nics (nic2 and +# nic3) and configures an IP address on each relevant isolated network +# for each role, with IPv6 on the External network. This template assumes +# use of network-isolation-v6.yaml. +# +# FIXME: if/when we add functionality to heatclient to include heat +# environment files we should think about using it here to automatically +# include network-isolation-v6.yaml. +resource_registry: + OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/cinder-storage.yaml + OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/bond-with-vlans/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-v6.yaml + OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml + OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml + +parameter_defaults: + # This sets 'external_network_bridge' in l3_agent.ini to an empty string + # so that external networks act like provider bridge networks (they + # will plug into br-int instead of br-ex) + NeutronExternalNetworkBridge: "''" diff --git a/environments/net-bond-with-vlans.yaml b/environments/net-bond-with-vlans.yaml index 9600fc7e..de8f8f74 100644 --- a/environments/net-bond-with-vlans.yaml +++ b/environments/net-bond-with-vlans.yaml @@ -12,10 +12,6 @@ resource_registry: OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml -# We use parameter_defaults instead of parameters here because Tuskar munges -# the names of top level and role level parameters with the role name and a -# version. Using parameter_defaults makes it such that if the parameter name is -# not defined in the template, we don't get an error. parameter_defaults: # This sets 'external_network_bridge' in l3_agent.ini to an empty string # so that external networks act like provider bridge networks (they diff --git a/environments/net-multiple-nics-v6.yaml b/environments/net-multiple-nics-v6.yaml new file mode 100644 index 00000000..a2bb0bba --- /dev/null +++ b/environments/net-multiple-nics-v6.yaml @@ -0,0 +1,13 @@ +# This template configures each role to use a separate NIC for +# each isolated network with IPv6 on the External network. +# This template assumes use of network-isolation.yaml. +# +# FIXME: if/when we add functionality to heatclient to include heat +# environment files we should think about using it here to automatically +# include network-isolation-v6.yaml. +resource_registry: + OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/multiple-nics/cinder-storage.yaml + OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/multiple-nics/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/multiple-nics/controller-v6.yaml + OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/multiple-nics/swift-storage.yaml + OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/multiple-nics/ceph-storage.yaml diff --git a/environments/net-single-nic-linux-bridge-with-vlans.yaml b/environments/net-single-nic-linux-bridge-with-vlans.yaml index d5f2ed62..fd80bb9b 100644 --- a/environments/net-single-nic-linux-bridge-with-vlans.yaml +++ b/environments/net-single-nic-linux-bridge-with-vlans.yaml @@ -12,10 +12,6 @@ resource_registry: OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml -# We use parameter_defaults instead of parameters here because Tuskar munges -# the names of top level and role level parameters with the role name and a -# version. Using parameter_defaults makes it such that if the parameter name is -# not defined in the template, we don't get an error. parameter_defaults: # This sets 'external_network_bridge' in l3_agent.ini to an empty string # so that external networks act like provider bridge networks (they diff --git a/environments/net-single-nic-with-vlans-v6.yaml b/environments/net-single-nic-with-vlans-v6.yaml new file mode 100644 index 00000000..8210bad3 --- /dev/null +++ b/environments/net-single-nic-with-vlans-v6.yaml @@ -0,0 +1,19 @@ +# This template configures each role to use Vlans on a single nic for +# each isolated network with IPv6 on the External network. +# This template assumes use of network-isolation.yaml. +# +# FIXME: if/when we add functionality to heatclient to include heat +# environment files we should think about using it here to automatically +# include network-isolation-v6.yaml. +resource_registry: + OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/cinder-storage.yaml + OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/single-nic-vlans/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-v6.yaml + OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml + OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml + +parameter_defaults: + # This sets 'external_network_bridge' in l3_agent.ini to an empty string + # so that external networks act like provider bridge networks (they + # will plug into br-int instead of br-ex) + NeutronExternalNetworkBridge: "''" diff --git a/environments/net-single-nic-with-vlans.yaml b/environments/net-single-nic-with-vlans.yaml index bdfeadd3..a61bc6e1 100644 --- a/environments/net-single-nic-with-vlans.yaml +++ b/environments/net-single-nic-with-vlans.yaml @@ -12,10 +12,6 @@ resource_registry: OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml -# We use parameter_defaults instead of parameters here because Tuskar munges -# the names of top level and role level parameters with the role name and a -# version. Using parameter_defaults makes it such that if the parameter name is -# not defined in the template, we don't get an error. parameter_defaults: # This sets 'external_network_bridge' in l3_agent.ini to an empty string # so that external networks act like provider bridge networks (they diff --git a/environments/network-isolation-v6.yaml b/environments/network-isolation-v6.yaml new file mode 100644 index 00000000..599a08b1 --- /dev/null +++ b/environments/network-isolation-v6.yaml @@ -0,0 +1,55 @@ +# Enable the creation of IPv6 Neutron networks for isolated Overcloud +# traffic and configure each role to assign ports (related +# to that role) on these networks. +resource_registry: + OS::TripleO::Network::External: ../network/external_v6.yaml + OS::TripleO::Network::InternalApi: ../network/internal_api_v6.yaml + OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt_v6.yaml + OS::TripleO::Network::Storage: ../network/storage_v6.yaml + # IPv4 until OVS and Neutron support IPv6 tunnel endpoints + OS::TripleO::Network::Tenant: ../network/tenant.yaml + + # Port assignments for the VIPs + OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml + OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml + OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml + OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml + OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip_v6.yaml + + # Port assignments for the controller role + OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_v6.yaml + OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_v6.yaml + OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_v6.yaml + OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml + OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml + + # Port assignments for the compute role + OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api_v6.yaml + OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage_v6.yaml + OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml + + # Port assignments for the ceph storage role + OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage_v6.yaml + OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml + + # Port assignments for the swift storage role + OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api_v6.yaml + OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage_v6.yaml + OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml + + # Port assignments for the block storage role + OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api_v6.yaml + OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage_v6.yaml + OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml + +parameter_defaults: + # Enable IPv6 for Ceph. + CephIPv6: True + # Enable IPv6 for Corosync. This is required when Corosync is using an IPv6 IP in the cluster. + CorosyncIPv6: True + # Enable IPv6 for MongoDB. This is required when MongoDB is using an IPv6 IP. + MongoDbIPv6: True + # Enable various IPv6 features in Nova. + NovaIPv6: True + # Enable IPv6 environment for RabbitMQ. + RabbitIPv6: true diff --git a/extraconfig/all_nodes/swap-partition.yaml b/extraconfig/all_nodes/swap-partition.yaml new file mode 100644 index 00000000..89a2adb0 --- /dev/null +++ b/extraconfig/all_nodes/swap-partition.yaml @@ -0,0 +1,90 @@ +heat_template_version: 2014-10-16 + +description: > + Extra config to add swap space to nodes. + +# Parameters passed from the parent template - note if you maintain +# out-of-tree templates they may require additional parameters if the +# in-tree templates add a new role. +parameters: + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json + swap_partition_label: + type: string + description: Swap partition label + default: 'swap1' + + +resources: + + SwapConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: | + #!/bin/bash + set -eux + swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label) + swapon $swap_partition + echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab + inputs: + - name: swap_partition_label + description: Swap partition label + default: 'swap1' + + ControllerSwapDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + config: {get_resource: SwapConfig} + servers: {get_param: controller_servers} + input_values: + swap_partition_label: {get_param: swap_partition_label} + actions: ["CREATE"] + + ComputeSwapDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + config: {get_resource: SwapConfig} + servers: {get_param: compute_servers} + input_values: + swap_partition_label: {get_param: swap_partition_label} + actions: ["CREATE"] + + BlockStorageSwapDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + config: {get_resource: SwapConfig} + servers: {get_param: blockstorage_servers} + input_values: + swap_partition_label: {get_param: swap_partition_label} + actions: ["CREATE"] + + ObjectStorageSwapDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + config: {get_resource: SwapConfig} + servers: {get_param: objectstorage_servers} + input_values: + swap_partition_label: {get_param: swap_partition_label} + actions: ["CREATE"] + + CephStorageSwapDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + config: {get_resource: SwapConfig} + servers: {get_param: cephstorage_servers} + input_values: + swap_partition_label: {get_param: swap_partition_label} + actions: ["CREATE"] + +outputs: + config_identifier: + value: none diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh new file mode 100644 index 00000000..07666245 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_block_storage.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# +# This runs an upgrade of Cinder Block Storage nodes. +# +set -eu + +yum -y install python-zaqarclient # needed for os-collect-config +yum -y -q update diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh new file mode 100644 index 00000000..de42b16d --- /dev/null +++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# +# This delivers the ceph-storage upgrade script to be invoked as part of the tripleo +# major upgrade workflow. +# +set -eu + +UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh + +cat > $UPGRADE_SCRIPT << ENDOFCAT +### DO NOT MODIFY THIS FILE +### This file is automatically delivered to the ceph-storage nodes as part of the +### tripleo upgrades workflow + + +function systemctl_ceph { + action=\$1 + systemctl \$action ceph +} + +# "so that mirrors aren't rebalanced as if the OSD died" - gfidente +ceph osd set noout + +systemctl_ceph stop +yum -y install python-zaqarclient # needed for os-collect-config +yum -y update +systemctl_ceph start + +ceph osd unset noout + +ENDOFCAT + +# ensure the permissions are OK +chmod 0755 $UPGRADE_SCRIPT + diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh new file mode 100644 index 00000000..931f4f42 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_object_storage.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# +# This delivers the swift-storage upgrade script to be invoked as part of the tripleo +# major upgrade workflow. +# +set -eu + +UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh + +cat > $UPGRADE_SCRIPT << ENDOFCAT +### DO NOT MODIFY THIS FILE +### This file is automatically delivered to the swift-storage nodes as part of the +### tripleo upgrades workflow + + +function systemctl_swift { + action=\$1 + for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \ + openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \ + openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object; do + systemctl \$action \$S + done +} + + +systemctl_swift stop + +yum -y install python-zaqarclient # needed for os-collect-config +yum -y update + +systemctl_swift start + + + +ENDOFCAT + +# ensure the permissions are OK +chmod 0755 $UPGRADE_SCRIPT + diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml index 5a11bae9..4af3186c 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker.yaml +++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml @@ -40,6 +40,7 @@ resources: params: UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_pacemaker_migrations.sh - get_file: major_upgrade_controller_pacemaker_1.sh ControllerPacemakerUpgradeDeployment_Step1: @@ -49,46 +50,36 @@ resources: config: {get_resource: ControllerPacemakerUpgradeConfig_Step1} input_values: {get_param: input_values} - ControllerPacemakerUpgradeConfig_Step2: + BlockStorageUpgradeConfig: type: OS::Heat::SoftwareConfig + depends_on: ControllerPacemakerUpgradeDeployment_Step1 properties: group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_controller_pacemaker_2.sh + config: {get_file: major_upgrade_block_storage.sh} - ControllerPacemakerUpgradeDeployment_Step2: + BlockStorageUpgradeDeployment: type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step1 properties: - servers: {get_param: controller_servers} - config: {get_resource: ControllerPacemakerUpgradeConfig_Step2} + servers: {get_param: blockstorage_servers} + config: {get_resource: BlockStorageUpgradeConfig} input_values: {get_param: input_values} - ComputeDeliverUpgradeConfig_Step3: + ControllerPacemakerUpgradeConfig_Step2: type: OS::Heat::SoftwareConfig properties: group: script config: list_join: - '' - - - str_replace: - template: | - #!/bin/bash - upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' - params: - UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_compute.sh + - - get_file: pacemaker_common_functions.sh + - get_file: major_upgrade_pacemaker_migrations.sh + - get_file: major_upgrade_controller_pacemaker_2.sh - ComputeDeliverUpgradeConfigDeployment_Step3: + ControllerPacemakerUpgradeDeployment_Step2: type: OS::Heat::SoftwareDeploymentGroup - depends_on: ControllerPacemakerUpgradeDeployment_Step2 + depends_on: BlockStorageUpgradeDeployment properties: - servers: {get_param: compute_servers} - config: {get_resource: ComputeDeliverUpgradeConfig_Step3} + servers: {get_param: controller_servers} + config: {get_resource: ControllerPacemakerUpgradeConfig_Step2} input_values: {get_param: input_values} - diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml new file mode 100644 index 00000000..f662bc3d --- /dev/null +++ b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml @@ -0,0 +1,128 @@ +heat_template_version: 2014-10-16 +description: 'Upgrade for Pacemaker deployments' + +parameters: + + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json + input_values: + type: json + description: input values for the software deployments + + UpgradeInitCommand: + type: string + description: | + Command or script snippet to run on all overcloud nodes to + initialize the upgrade process. E.g. a repository switch. + default: '' + UpgradeLevelNovaCompute: + type: string + description: Nova Compute upgrade level + default: '' + +resources: + + UpgradeInitConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - get_param: UpgradeInitCommand + + UpgradeInitControllerDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: controller_servers} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + UpgradeInitComputeDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: compute_servers} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + UpgradeInitBlockStorageDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: blockstorage_servers} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + UpgradeInitObjectStorageDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: objectstorage_servers} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + UpgradeInitCephStorageDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: cephstorage_servers} + config: {get_resource: UpgradeInitConfig} + input_values: {get_param: input_values} + + # TODO(jistr): for Mitaka->Newton upgrades and further we can use + # map_merge with input_values instead of feeding params into scripts + # via str_replace on bash snippets + + ComputeDeliverUpgradeScriptConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - str_replace: + template: | + #!/bin/bash + upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' + params: + UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} + - get_file: major_upgrade_compute.sh + + ComputeDeliverUpgradeScriptDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: compute_servers} + config: {get_resource: ComputeDeliverUpgradeScriptConfig} + input_values: {get_param: input_values} + + ObjectStorageDeliverUpgradeScriptConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: major_upgrade_object_storage.sh} + + ObjectStorageDeliverUpgradeScriptDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: objectstorage_servers} + config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig} + input_values: {get_param: input_values} + + CephStorageDeliverUpgradeScriptConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: major_upgrade_ceph_storage.sh} + + CephStorageDeliverUpgradeScriptDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: cephstorage_servers} + config: {get_resource: CephStorageDeliverUpgradeScriptConfig} + input_values: {get_param: input_values} diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh new file mode 100644 index 00000000..7fd26945 --- /dev/null +++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Special pieces of upgrade migration logic go into this +# file. E.g. Pacemaker cluster transitions for existing deployments, +# matching changes to overcloud_controller_pacemaker.pp (Puppet +# handles deployment, this file handles migrations). +# +# This file shouldn't execute any action on its own, all logic should +# be wrapped into bash functions. Upgrade scripts will source this +# file and call the functions defined in this file where appropriate. +# +# The migration functions should be idempotent. If the migration has +# been already applied, it should be possible to call the function +# again without damaging the deployment or failing the upgrade. diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh index ee3216e4..0808763e 100755 --- a/extraconfig/tasks/pacemaker_common_functions.sh +++ b/extraconfig/tasks/pacemaker_common_functions.sh @@ -39,10 +39,23 @@ function echo_error { } function systemctl_swift { + services=( openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \ + openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \ + openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy ) action=$1 - for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \ - openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \ - openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy; do - systemctl $action $S + case $action in + stop) + services=$(systemctl | grep swift | grep running | awk '{print $1}') + ;; + start) + enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml 'enable_swift_storage') + if [[ $enable_swift_storage != "true" ]]; then + services=( openstack-swift-proxy ) + fi + ;; + *) services=() ;; # for safetly, should never happen + esac + for S in ${services[@]}; do + systemctl $action $S done } diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index 9fc17f63..59e4be45 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -53,14 +53,12 @@ neutron-l3-agent neutron-metadata-agent neutron-openvswitch-agent neutron-server +openstack-ceilometer-alarm-evaluator +openstack-ceilometer-alarm-notifier openstack-ceilometer-api openstack-ceilometer-central openstack-ceilometer-collector openstack-ceilometer-notification -openstack-aodh-api -openstack-aodh-evaluator -openstack-aodh-notifier -openstack-aodh-listener openstack-cinder-api openstack-cinder-scheduler openstack-cinder-volume @@ -109,9 +107,6 @@ openstack-nova-scheduler" pcs -f $pacemaker_dumpfile constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false fi - if ! pcs constraint order show | grep "promote redis-master then start openstack-aodh-evaluator-clone"; then - pcs -f $pacemaker_dumpfile constraint order promote redis-master then start openstack-aodh-evaluator-clone require-all=false - fi # ensure neutron constraints https://review.openstack.org/#/c/229466 # remove ovs-cleanup after server and add openvswitch-agent instead if pcs constraint order show | grep "start neutron-server-clone then start neutron-ovs-cleanup-clone"; then diff --git a/network/config/bond-with-vlans/controller-v6.yaml b/network/config/bond-with-vlans/controller-v6.yaml new file mode 100644 index 00000000..7869ebfc --- /dev/null +++ b/network/config/bond-with-vlans/controller-v6.yaml @@ -0,0 +1,180 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config with 2 bonded nics on a bridge + with VLANs attached for the controller role with IPv6 on the External + network. The IPv6 default route is on the External network, and the + IPv4 default route is on the Control Plane. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string + BondInterfaceOvsOptions: + default: 'bond_mode=active-backup' + description: The ovs_options string for the bond interface. Set things like + lacp=active and/or bond_mode=balance-slb using this option. + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + - + type: ovs_bridge + name: {get_input: bridge_name} + dns_servers: {get_param: DnsServers} + members: + - + type: ovs_bond + name: bond1 + ovs_options: {get_param: BondInterfaceOvsOptions} + members: + - + type: interface + name: nic2 + primary: true + - + type: interface + name: nic3 + - + type: vlan + device: bond1 + vlan_id: {get_param: ExternalNetworkVlanID} + addresses: + - + ip_netmask: {get_param: ExternalIpSubnet} + routes: + - + default: true + next_hop: {get_param: ExternalInterfaceDefaultRoute} + - + type: vlan + device: bond1 + vlan_id: {get_param: InternalApiNetworkVlanID} + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + type: vlan + device: bond1 + vlan_id: {get_param: StorageNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + device: bond1 + vlan_id: {get_param: StorageMgmtNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + - + type: vlan + device: bond1 + vlan_id: {get_param: TenantNetworkVlanID} + addresses: + - + ip_netmask: {get_param: TenantIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # device: bond1 + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/multiple-nics/controller-v6.yaml b/network/config/multiple-nics/controller-v6.yaml new file mode 100644 index 00000000..b69879fb --- /dev/null +++ b/network/config/multiple-nics/controller-v6.yaml @@ -0,0 +1,174 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the controller role with IPv6 on the External network. The IPv6 + default route is on the External network, and the IPv4 default route + is on the Control Plane. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + - + type: interface + name: nic2 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: interface + name: nic3 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + - + type: interface + name: nic4 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + # Create a bridge which can also be used for VLAN-mode bridge mapping + type: ovs_bridge + name: br-tenant + use_dhcp: false + addresses: + - + ip_netmask: {get_param: TenantIpSubnet} + members: + - + type: interface + name: nic5 + use_dhcp: false + # force the MAC address of the bridge to this interface + primary: true + - + type: ovs_bridge + name: {get_input: bridge_name} + dns_servers: {get_param: DnsServers} + use_dhcp: false + addresses: + - + ip_netmask: {get_param: ExternalIpSubnet} + routes: + - + default: true + next_hop: {get_param: ExternalInterfaceDefaultRoute} + members: + - + type: interface + name: nic6 + # force the MAC address of the bridge to this interface + primary: true + # Uncomment when including environments/network-management.yaml + #- + # type: interface + # name: nic7 + # use_dhcp: false + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/single-nic-vlans/controller-v6.yaml b/network/config/single-nic-vlans/controller-v6.yaml new file mode 100644 index 00000000..472e539d --- /dev/null +++ b/network/config/single-nic-vlans/controller-v6.yaml @@ -0,0 +1,158 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure VLANs for the + controller role with IPv6 on the External network. The IPv6 default + route is on the External network, and the IPv4 default route is on + the Control Plane. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: ovs_bridge + name: {get_input: bridge_name} + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + members: + - + type: interface + name: nic1 + # force the MAC address of the bridge to this interface + primary: true + - + type: vlan + vlan_id: {get_param: ExternalNetworkVlanID} + addresses: + - + ip_netmask: {get_param: ExternalIpSubnet} + routes: + - + default: true + next_hop: {get_param: ExternalInterfaceDefaultRoute} + - + type: vlan + vlan_id: {get_param: InternalApiNetworkVlanID} + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageMgmtNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + - + type: vlan + vlan_id: {get_param: TenantNetworkVlanID} + addresses: + - + ip_netmask: {get_param: TenantIpSubnet} + #- # Uncomment when including environments/network-management.yaml + # type: vlan + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/single-nic-vlans/controller.yaml b/network/config/single-nic-vlans/controller.yaml index 3b22b36b..a5a0745d 100644 --- a/network/config/single-nic-vlans/controller.yaml +++ b/network/config/single-nic-vlans/controller.yaml @@ -111,7 +111,7 @@ resources: ip_netmask: {get_param: ExternalIpSubnet} routes: - - ip_netmask: 0.0.0.0/0 + default: true next_hop: {get_param: ExternalInterfaceDefaultRoute} - type: vlan diff --git a/network/endpoints/build_endpoint_map.py b/network/endpoints/build_endpoint_map.py index 056d6889..d8cdee3d 100755 --- a/network/endpoints/build_endpoint_map.py +++ b/network/endpoints/build_endpoint_map.py @@ -173,7 +173,10 @@ def template_endpoint_items(config): def generate_endpoint_map_template(config): return collections.OrderedDict([ ('heat_template_version', '2015-04-30'), - ('description', 'A map of OpenStack endpoints.'), + ('description', 'A map of OpenStack endpoints. Since the endpoints ' + 'are URLs, we need to have brackets around IPv6 IP addresses. The ' + 'inputs to these parameters come from net_ip_uri_map, which will ' + 'include these brackets in IPv6 addresses.'), ('parameters', template_parameters(config)), ('outputs', { 'endpoint_map': { diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml index 84fea0ef..7a0bbf4a 100644 --- a/network/endpoints/endpoint_data.yaml +++ b/network/endpoints/endpoint_data.yaml @@ -1,15 +1,6 @@ # Data in this file is used to generate the endpoint_map.yaml template. # Run the script build_endpoint_map.py to regenerate the file. -Aodh: - Internal: - vip_param: AodhApi - Public: - vip_param: Public - Admin: - vip_param: AodhApi - port: 8042 - Ceilometer: Internal: vip_param: CeilometerApi diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml index bb0413a5..a31094a1 100644 --- a/network/endpoints/endpoint_map.yaml +++ b/network/endpoints/endpoint_map.yaml @@ -3,9 +3,11 @@ ### by the script build_endpoint_map.py heat_template_version: '2015-04-30' -description: A map of OpenStack endpoints. +description: A map of OpenStack endpoints. Since the endpoints are URLs, + we need to have brackets around IPv6 IP addresses. The inputs to these + parameters come from net_ip_uri_map, which will include these brackets + in IPv6 addresses. parameters: - AodhApiVirtualIP: {type: string, default: ''} CeilometerApiVirtualIP: {type: string, default: ''} CinderApiVirtualIP: {type: string, default: ''} GlanceApiVirtualIP: {type: string, default: ''} @@ -22,9 +24,6 @@ parameters: EndpointMap: type: json default: - AodhAdmin: {protocol: http, port: '8042', host: IP_ADDRESS} - AodhInternal: {protocol: http, port: '8042', host: IP_ADDRESS} - AodhPublic: {protocol: http, port: '8042', host: IP_ADDRESS} CeilometerAdmin: {protocol: http, port: '8777', host: IP_ADDRESS} CeilometerInternal: {protocol: http, port: '8777', host: IP_ADDRESS} CeilometerPublic: {protocol: http, port: '8777', host: IP_ADDRESS} @@ -72,120 +71,6 @@ parameters: outputs: endpoint_map: value: - AodhAdmin: - host: - str_replace: - template: - get_param: [EndpointMap, AodhAdmin, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: AodhApiVirtualIP} - port: - get_param: [EndpointMap, AodhAdmin, port] - protocol: - get_param: [EndpointMap, AodhAdmin, protocol] - uri: - list_join: - - '' - - - get_param: [EndpointMap, AodhAdmin, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, AodhAdmin, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: AodhApiVirtualIP} - - ':' - - get_param: [EndpointMap, AodhAdmin, port] - uri_no_suffix: - list_join: - - '' - - - get_param: [EndpointMap, AodhAdmin, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, AodhAdmin, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: AodhApiVirtualIP} - - ':' - - get_param: [EndpointMap, AodhAdmin, port] - AodhInternal: - host: - str_replace: - template: - get_param: [EndpointMap, AodhInternal, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: AodhApiVirtualIP} - port: - get_param: [EndpointMap, AodhInternal, port] - protocol: - get_param: [EndpointMap, AodhInternal, protocol] - uri: - list_join: - - '' - - - get_param: [EndpointMap, AodhInternal, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, AodhInternal, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: AodhApiVirtualIP} - - ':' - - get_param: [EndpointMap, AodhInternal, port] - uri_no_suffix: - list_join: - - '' - - - get_param: [EndpointMap, AodhInternal, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, AodhInternal, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: AodhApiVirtualIP} - - ':' - - get_param: [EndpointMap, AodhInternal, port] - AodhPublic: - host: - str_replace: - template: - get_param: [EndpointMap, AodhPublic, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: PublicVirtualIP} - port: - get_param: [EndpointMap, AodhPublic, port] - protocol: - get_param: [EndpointMap, AodhPublic, protocol] - uri: - list_join: - - '' - - - get_param: [EndpointMap, AodhPublic, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, AodhPublic, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: PublicVirtualIP} - - ':' - - get_param: [EndpointMap, AodhPublic, port] - uri_no_suffix: - list_join: - - '' - - - get_param: [EndpointMap, AodhPublic, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, AodhPublic, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: PublicVirtualIP} - - ':' - - get_param: [EndpointMap, AodhPublic, port] CeilometerAdmin: host: str_replace: diff --git a/network/external_v6.yaml b/network/external_v6.yaml new file mode 100644 index 00000000..3e120f24 --- /dev/null +++ b/network/external_v6.yaml @@ -0,0 +1,69 @@ +heat_template_version: 2015-04-30 + +description: > + External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc. + +parameters: + # the defaults here work for static IP assignment (IPAM) only + ExternalNetCidr: + # OpenStack uses the EUI-64 address format, which requires a /64 prefix + default: '2001:db8:fd00:1000::/64' + description: Cidr for the external network. + type: string + ExternalNetValueSpecs: + default: {'provider:physical_network': 'external', 'provider:network_type': 'flat'} + description: Value specs for the external network. + type: json + ExternalNetAdminStateUp: + default: false + description: This admin state of of the network. + type: boolean + ExternalNetShared: + default: false + description: Whether this network is shared across all tenants. + type: boolean + ExternalNetName: + default: external + description: The name of the external network. + type: string + ExternalSubnetName: + default: external_subnet + description: The name of the external subnet in Neutron. + type: string + ExternalAllocationPools: + default: [{'start': '2001:db8:fd00:1000::10', 'end': '2001:db8:fd00:1000:ffff:ffff:ffff:fffe'}] + description: Ip allocation pool range for the external network. + type: json + IPv6AddressMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 address mode + type: string + IPv6RAMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 router advertisement mode + type: string + +resources: + ExternalNetwork: + type: OS::Neutron::Net + properties: + admin_state_up: {get_param: ExternalNetAdminStateUp} + name: {get_param: ExternalNetName} + shared: {get_param: ExternalNetShared} + value_specs: {get_param: ExternalNetValueSpecs} + + ExternalSubnet: + type: OS::Neutron::Subnet + properties: + ip_version: 6 + ipv6_address_mode: {get_param: IPv6AddressMode} + ipv6_ra_mode: {get_param: IPv6RAMode} + cidr: {get_param: ExternalNetCidr} + name: {get_param: ExternalSubnetName} + network: {get_resource: ExternalNetwork} + allocation_pools: {get_param: ExternalAllocationPools} + +outputs: + OS::stack_id: + description: Neutron external network + value: {get_resource: ExternalNetwork} diff --git a/network/internal_api_v6.yaml b/network/internal_api_v6.yaml new file mode 100644 index 00000000..68c14fbe --- /dev/null +++ b/network/internal_api_v6.yaml @@ -0,0 +1,69 @@ +heat_template_version: 2015-04-30 + +description: > + Internal API network. Used for most APIs, Database, RPC. + +parameters: + # the defaults here work for static IP assignment (IPAM) only + InternalApiNetCidr: + # OpenStack uses the EUI-64 address format, which requires a /64 prefix + default: 'fd00:fd00:fd00:2000::/64' + description: Cidr for the internal API network. + type: string + InternalApiNetValueSpecs: + default: {'provider:physical_network': 'internal_api', 'provider:network_type': 'flat'} + description: Value specs for the internal API network. + type: json + InternalApiNetAdminStateUp: + default: false + description: This admin state of of the network. + type: boolean + InternalApiNetShared: + default: false + description: Whether this network is shared across all tenants. + type: boolean + InternalApiNetName: + default: internal_api + description: The name of the internal API network. + type: string + InternalApiSubnetName: + default: internal_api_subnet + description: The name of the internal API subnet in Neutron. + type: string + InternalApiAllocationPools: + default: [{'start': 'fd00:fd00:fd00:2000::10', 'end': 'fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe'}] + description: Ip allocation pool range for the internal API network. + type: json + IPv6AddressMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 address mode + type: string + IPv6RAMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 router advertisement mode + type: string + +resources: + InternalApiNetwork: + type: OS::Neutron::Net + properties: + admin_state_up: {get_param: InternalApiNetAdminStateUp} + name: {get_param: InternalApiNetName} + shared: {get_param: InternalApiNetShared} + value_specs: {get_param: InternalApiNetValueSpecs} + + InternalApiSubnet: + type: OS::Neutron::Subnet + properties: + ip_version: 6 + ipv6_address_mode: {get_param: IPv6AddressMode} + ipv6_ra_mode: {get_param: IPv6RAMode} + cidr: {get_param: InternalApiNetCidr} + name: {get_param: InternalApiSubnetName} + network: {get_resource: InternalApiNetwork} + allocation_pools: {get_param: InternalApiAllocationPools} + +outputs: + OS::stack_id: + description: Neutron internal network + value: {get_resource: InternalApiNetwork} diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml index 1d8b2916..5ac7d344 100644 --- a/network/ports/ctlplane_vip.yaml +++ b/network/ports/ctlplane_vip.yaml @@ -45,6 +45,9 @@ outputs: ip_address: description: Virtual IP network IP value: {get_attr: [VipPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: Virtual IP network IP (for compatibility with vip_v6.yaml) + value: {get_attr: [VipPort, fixed_ips, 0, ip_address]} ip_subnet: description: IP/Subnet CIDR for the ctlplane network. value: diff --git a/network/ports/external.yaml b/network/ports/external.yaml index df12cc80..c4f815fb 100644 --- a/network/ports/external.yaml +++ b/network/ports/external.yaml @@ -48,6 +48,9 @@ outputs: ip_address: description: external network IP value: {get_attr: [ExternalPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: external network IP (for compatibility with external_v6.yaml) + value: {get_attr: [ExternalPort, fixed_ips, 0, ip_address]} ip_subnet: description: IP/Subnet CIDR for the external network IP value: diff --git a/network/ports/external_from_pool.yaml b/network/ports/external_from_pool.yaml index 5e61683a..98f2aa35 100644 --- a/network/ports/external_from_pool.yaml +++ b/network/ports/external_from_pool.yaml @@ -33,6 +33,9 @@ outputs: ip_address: description: external network IP value: {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: external network IP (for compatibility with IPv6) + value: {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]} ip_subnet: description: IP/Subnet CIDR for the external network IP value: diff --git a/network/ports/external_from_pool_v6.yaml b/network/ports/external_from_pool_v6.yaml new file mode 100644 index 00000000..bf0c036d --- /dev/null +++ b/network/ports/external_from_pool_v6.yaml @@ -0,0 +1,54 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a network mapped list of IPs. This version is for IPv6 + addresses. The ip_address_uri output will have brackets for use in URLs. + +parameters: + ExternalNetName: + description: Name of the external network + default: external + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + ExternalNetCidr: + default: '2001:db8:fd00:1000::/64' + description: Cidr for the external network. + type: string + +outputs: + ip_address: + description: external network IP + value: {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: external network IP (for compatibility with IPv6) + value: + list_join: + - '' + - - '[' + - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the external network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]} + - '/' + - {get_param: [ExternalNetCidr, -2]} + - {get_param: [ExternalNetCidr, -1]} diff --git a/network/ports/external_v6.yaml b/network/ports/external_v6.yaml new file mode 100644 index 00000000..522caaa0 --- /dev/null +++ b/network/ports/external_v6.yaml @@ -0,0 +1,68 @@ +heat_template_version: 2015-04-30 + +description: > + Creates a port on the external network. The IP address will be chosen + automatically if FixedIPs is empty. + +parameters: + ExternalNetName: + description: Name of the external neutron network + default: external + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + ControlPlaneNetwork: # Here for compatibility with ctlplane_vip.yaml + description: The name of the undercloud Neutron control plane + default: ctlplane + type: string + FixedIPs: + description: > + Control the IP allocation for the VIP port. E.g. + [{'ip_address':'1.2.3.4'}] + default: [] + type: json + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number + +resources: + + ExternalPort: + type: OS::Neutron::Port + properties: + network: {get_param: ExternalNetName} + name: {get_param: PortName} + fixed_ips: {get_param: FixedIPs} + replacement_policy: AUTO + +outputs: + ip_address: + description: external network IP + value: {get_attr: [ExternalPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: external network IP with brackets suitable for a URL + value: + list_join: + - '' + - - '[' + - {get_attr: [ExternalPort, fixed_ips, 0, ip_address]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the external network IP + value: + list_join: + - '' + - - {get_attr: [ExternalPort, fixed_ips, 0, ip_address]} + - '/' + - {get_attr: [ExternalPort, subnets, 0, cidr, -2]} + - {get_attr: [ExternalPort, subnets, 0, cidr, -1]} diff --git a/network/ports/from_service.yaml b/network/ports/from_service.yaml index 6b669f41..359d77a7 100644 --- a/network/ports/from_service.yaml +++ b/network/ports/from_service.yaml @@ -32,3 +32,6 @@ outputs: ip_address: description: network IP value: {get_param: [ServiceVips, {get_param: ServiceName}]} + ip_address_uri: + description: network IP (for compatibility with IPv6) + value: {get_param: [ServiceVips, {get_param: ServiceName}]} diff --git a/network/ports/from_service_v6.yaml b/network/ports/from_service_v6.yaml new file mode 100644 index 00000000..2dd0a0ee --- /dev/null +++ b/network/ports/from_service_v6.yaml @@ -0,0 +1,42 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a service mapped list of IPv6 IPs + +parameters: + ServiceName: + description: Name of the service to lookup + default: '' + type: string + NetworkName: # Here for compatability with ctlplane_vip.yaml + description: Name of the network where the VIP will be created + default: ctlplane + type: string + PortName: # Here for compatability with ctlplane_vip.yaml + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with ctlplane_vip.yaml + description: IP address on the control plane + default: '' + type: string + ControlPlaneNetwork: # Here for compatability with ctlplane_vip.yaml + description: The name of the undercloud Neutron control plane + default: ctlplane + type: string + ServiceVips: + default: {} + type: json + +outputs: + ip_address: + description: network IP + value: {get_param: [ServiceVips, {get_param: ServiceName}]} + ip_address_uri: + description: network IP (with brackets for use in URLs) + value: + list_join: + - '' + - - '[' + - {get_param: [ServiceVips, {get_param: ServiceName}]} + - ']' diff --git a/network/ports/internal_api.yaml b/network/ports/internal_api.yaml index 4039f9d7..1d521a8d 100644 --- a/network/ports/internal_api.yaml +++ b/network/ports/internal_api.yaml @@ -43,6 +43,10 @@ outputs: ip_address: description: internal API network IP value: {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: | + internal API network IP (for compatibility with internal_api_v6.yaml) + value: {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]} ip_subnet: description: IP/Subnet CIDR for the internal API network IP value: diff --git a/network/ports/internal_api_from_pool.yaml b/network/ports/internal_api_from_pool.yaml index 18ccd2b2..c7b04847 100644 --- a/network/ports/internal_api_from_pool.yaml +++ b/network/ports/internal_api_from_pool.yaml @@ -33,6 +33,9 @@ outputs: ip_address: description: internal API network IP value: {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: internal API network IP (for compatibility with internal_api_v6.yaml) + value: {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]} ip_subnet: description: IP/Subnet CIDR for the internal API network IP value: diff --git a/network/ports/internal_api_from_pool_v6.yaml b/network/ports/internal_api_from_pool_v6.yaml new file mode 100644 index 00000000..34c17ab2 --- /dev/null +++ b/network/ports/internal_api_from_pool_v6.yaml @@ -0,0 +1,54 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a network mapped list of IPs. This version is for IPv6 + addresses. The ip_address_uri output will have brackets for use in URLs. + +parameters: + InternalApiNetName: + description: Name of the internal API network + default: internal_api + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + InternalApiNetCidr: + default: 'fd00:fd00:fd00:2000::/64' + description: Cidr for the internal API network. + type: string + +outputs: + ip_address: + description: internal API network IP + value: {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: internal API network IP (for compatibility with internal_api_v6.yaml) + value: + list_join: + - '' + - - '[' + - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the internal API network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]} + - '/' + - {get_param: [InternalApiNetCidr, -2]} + - {get_param: [InternalApiNetCidr, -1]} diff --git a/network/ports/internal_api_v6.yaml b/network/ports/internal_api_v6.yaml new file mode 100644 index 00000000..279e6bd0 --- /dev/null +++ b/network/ports/internal_api_v6.yaml @@ -0,0 +1,63 @@ +heat_template_version: 2015-04-30 + +description: > + Creates a port on the internal_api network. + +parameters: + InternalApiNetName: + description: Name of the internal API neutron network + default: internal_api + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + FixedIPs: + description: > + Control the IP allocation for the VIP port. E.g. + [{'ip_address':'1.2.3.4'}] + default: [] + type: json + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number + +resources: + + InternalApiPort: + type: OS::Neutron::Port + properties: + network: {get_param: InternalApiNetName} + name: {get_param: PortName} + fixed_ips: {get_param: FixedIPs} + replacement_policy: AUTO + +outputs: + ip_address: + description: internal API network IP + value: {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: internal api network IP with brackets suitable for a URL + value: + list_join: + - '' + - - '[' + - {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the internal API network IP + value: + list_join: + - '' + - - {get_attr: [InternalApiPort, fixed_ips, 0, ip_address]} + - '/' + - {get_attr: [InternalApiPort, subnets, 0, cidr, -2]} + - {get_attr: [InternalApiPort, subnets, 0, cidr, -1]} diff --git a/network/ports/management.yaml b/network/ports/management.yaml index 8ad2eb47..967b66e1 100644 --- a/network/ports/management.yaml +++ b/network/ports/management.yaml @@ -36,6 +36,9 @@ outputs: ip_address: description: management network IP value: {get_attr: [ManagementPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: management network IP (for compatibility with management_v6.yaml) + value: {get_attr: [ManagementPort, fixed_ips, 0, ip_address]} ip_subnet: description: IP/Subnet CIDR for the management network IP value: diff --git a/network/ports/management_from_pool.yaml b/network/ports/management_from_pool.yaml index bbd62f5b..fc87e39a 100644 --- a/network/ports/management_from_pool.yaml +++ b/network/ports/management_from_pool.yaml @@ -33,6 +33,9 @@ outputs: ip_address: description: management network IP value: {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: management network IP (for compatibility with management_v6.yaml) + value: {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]} ip_subnet: description: IP/Subnet CIDR for the management network IP value: diff --git a/network/ports/management_v6.yaml b/network/ports/management_v6.yaml new file mode 100644 index 00000000..a94ebc7b --- /dev/null +++ b/network/ports/management_v6.yaml @@ -0,0 +1,54 @@ +heat_template_version: 2015-10-15 + +description: > + Creates a port on the management network. The IP address will be chosen + automatically if FixedIPs is empty. + +parameters: + ManagementNetName: + description: Name of the management neutron network + default: management + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatibility with noop.yaml + description: IP address on the control plane + type: string + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number + +resources: + + ManagementPort: + type: OS::Neutron::Port + properties: + network: {get_param: ManagementNetName} + name: {get_param: PortName} + replacement_policy: AUTO + +outputs: + ip_address: + description: management network IP + value: {get_attr: [ManagementPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: management network IP with brackets suitable for a URL + value: + list_join: + - '' + - - '[' + - {get_attr: [ManagementPort, fixed_ips, 0, ip_address]} + - ']' + ip_subnet: + description: IP/Subnet CIDR for the management network IP + value: + list_join: + - '' + - - {get_attr: [ManagementPort, fixed_ips, 0, ip_address]} + - '/' + - {str_split: ['/', {get_attr: [ManagementPort, subnets, 0, cidr]}, 1]} diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml index c6386025..6bb4557b 100644 --- a/network/ports/net_ip_map.yaml +++ b/network/ports/net_ip_map.yaml @@ -7,21 +7,45 @@ parameters: ExternalIp: default: '' type: string + ExternalIpUri: + default: '' + type: string + description: IP address with brackets in case of IPv6 InternalApiIp: default: '' type: string + InternalApiIpUri: + default: '' + type: string + description: IP address with brackets in case of IPv6 StorageIp: default: '' type: string + StorageIpUri: + default: '' + type: string + description: IP address with brackets in case of IPv6 StorageMgmtIp: default: '' type: string + StorageMgmtIpUri: + default: '' + type: string + description: IP address with brackets in case of IPv6 TenantIp: default: '' type: string + TenantIpUri: + default: '' + type: string + description: IP address with brackets in case of IPv6 ManagementIp: default: '' type: string + ManagementIpUri: + default: '' + type: string + description: IP address with brackets in case of IPv6 outputs: net_ip_map: @@ -36,3 +60,15 @@ outputs: storage_mgmt: {get_param: StorageMgmtIp} tenant: {get_param: TenantIp} management: {get_param: ManagementIp} + net_ip_uri_map: + description: > + A Hash containing a mapping of netowrk names to assigned IPs for a + specific machine with brackets around IPv6 addresses for use in URLs. + value: + ctlplane: {get_param: ControlPlaneIp} + external: {get_param: ExternalIpUri} + internal_api: {get_param: InternalApiIpUri} + storage: {get_param: StorageIpUri} + storage_mgmt: {get_param: StorageMgmtIpUri} + tenant: {get_param: TenantIpUri} + management: {get_param: ManagementIpUri} diff --git a/network/ports/net_vip_map_external.yaml b/network/ports/net_vip_map_external.yaml index 23e1f992..a40a0bfc 100644 --- a/network/ports/net_vip_map_external.yaml +++ b/network/ports/net_vip_map_external.yaml @@ -24,18 +24,33 @@ parameters: ExternalIp: default: '' type: string + ExternalIpUri: + default: '' + type: string InternalApiIp: default: '' type: string + InternalApiIpUri: + default: '' + type: string StorageIp: default: '' type: string + StorageIpUri: + default: '' + type: string StorageMgmtIp: default: '' type: string + StorageMgmtIpUri: + default: '' + type: string TenantIp: default: '' type: string + TenantIpUri: + default: '' + type: string outputs: net_ip_map: @@ -48,3 +63,13 @@ outputs: internal_api: {get_param: InternalApiNetworkVip} storage: {get_param: StorageNetworkVip} storage_mgmt: {get_param: StorageMgmtNetworkVip} + net_ip_uri_map: + description: > + A Hash containing a mapping of netowrk names to assigned IPs for a + specific machine with brackets around IPv6 addresses for use in URLs. + value: + ctlplane: {get_param: ControlPlaneIP} + external: {get_param: ExternalNetworkVip} + internal_api: {get_param: InternalApiNetworkVip} + storage: {get_param: StorageNetworkVip} + storage_mgmt: {get_param: StorageMgmtNetworkVip} diff --git a/network/ports/net_vip_map_external_v6.yaml b/network/ports/net_vip_map_external_v6.yaml new file mode 100644 index 00000000..f6d67fe8 --- /dev/null +++ b/network/ports/net_vip_map_external_v6.yaml @@ -0,0 +1,95 @@ +heat_template_version: 2015-04-30 + +parameters: + # Set these via parameter defaults to configure external VIPs + ControlPlaneIP: + default: '' + type: string + ExternalNetworkVip: + default: '' + type: string + InternalApiNetworkVip: + default: '' + type: string + StorageNetworkVip: + default: '' + type: string + StorageMgmtNetworkVip: + default: '' + type: string + # The following are unused in this template + ControlPlaneIp: + default: '' + type: string + ExternalIp: + default: '' + type: string + ExternalIpUri: + default: '' + type: string + InternalApiIp: + default: '' + type: string + InternalApiIpUri: + default: '' + type: string + StorageIp: + default: '' + type: string + StorageIpUri: + default: '' + type: string + StorageMgmtIp: + default: '' + type: string + StorageMgmtIpUri: + default: '' + type: string + TenantIp: + default: '' + type: string + TenantIpUri: + default: '' + type: string + +outputs: + net_ip_map: + description: > + A Hash containing a mapping of network names to assigned IPs + for a specific machine. + value: + ctlplane: {get_param: ControlPlaneIP} + external: {get_param: ExternalNetworkVip} + internal_api: {get_param: InternalApiNetworkVip} + storage: {get_param: StorageNetworkVip} + storage_mgmt: {get_param: StorageMgmtNetworkVip} + net_ip_uri_map: + description: > + A Hash containing a mapping of netowrk names to assigned IPs for a + specific machine with brackets around IPv6 addresses for use in URLs. + value: + ctlplane: {get_param: ControlPlaneIP} + external: + list_join: + - '' + - - '[' + - {get_param: ExternalNetworkVip} + - ']' + internal_api: + list_join: + - '' + - - '[' + - {get_param: InternalApiNetworkVip} + - ']' + storage: + list_join: + - '' + - - '[' + - {get_param: StorageNetworkVip} + - ']' + storage_mgmt: + list_join: + - '' + - - '[' + - {get_param: StorageMgmtNetworkVip} + - ']' diff --git a/network/ports/noop.yaml b/network/ports/noop.yaml index ac946cd9..96c461e0 100644 --- a/network/ports/noop.yaml +++ b/network/ports/noop.yaml @@ -44,6 +44,9 @@ outputs: ip_address: description: pass thru network IP value: {get_param: ControlPlaneIP} + ip_address_uri: + description: pass thru network IP (for compatibility with vip_v6.yaml) + value: {get_param: ControlPlaneIP} ip_subnet: description: IP/Subnet CIDR for the pass thru network IP value: diff --git a/network/ports/storage.yaml b/network/ports/storage.yaml index 579554fb..1ed5cca1 100644 --- a/network/ports/storage.yaml +++ b/network/ports/storage.yaml @@ -43,6 +43,9 @@ outputs: ip_address: description: storage network IP value: {get_attr: [StoragePort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: storage network IP (for compatibility with storage_v6.yaml) + value: {get_attr: [StoragePort, fixed_ips, 0, ip_address]} ip_subnet: description: IP/Subnet CIDR for the storage network IP value: diff --git a/network/ports/storage_from_pool.yaml b/network/ports/storage_from_pool.yaml index 13c16c15..dfc9e752 100644 --- a/network/ports/storage_from_pool.yaml +++ b/network/ports/storage_from_pool.yaml @@ -33,6 +33,9 @@ outputs: ip_address: description: storage network IP value: {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: storage network IP (for compatibility with storage_v6.yaml) + value: {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]} ip_subnet: description: IP/Subnet CIDR for the storage network IP value: diff --git a/network/ports/storage_from_pool_v6.yaml b/network/ports/storage_from_pool_v6.yaml new file mode 100644 index 00000000..966d96ae --- /dev/null +++ b/network/ports/storage_from_pool_v6.yaml @@ -0,0 +1,54 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a network mapped list of IPs. This version is for IPv6 + addresses. The ip_address_uri output will have brackets for use in URLs. + +parameters: + StorageNetName: + description: Name of the storage network + default: storage + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + StorageNetCidr: + default: 'fd00:fd00:fd00:3000::/64' + description: Cidr for the storage network. + type: string + +outputs: + ip_address: + description: storage network IP + value: {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: storage network IP (for compatibility with storage_v6.yaml) + value: + list_join: + - '' + - - '[' + - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the storage network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]} + - '/' + - {get_param: [StorageNetCidr, -2]} + - {get_param: [StorageNetCidr, -1]} diff --git a/network/ports/storage_mgmt.yaml b/network/ports/storage_mgmt.yaml index f97e337b..548d226a 100644 --- a/network/ports/storage_mgmt.yaml +++ b/network/ports/storage_mgmt.yaml @@ -43,6 +43,10 @@ outputs: ip_address: description: storage_mgmt network IP value: {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: | + storage_mgmt network IP (for compatibility with storage_mgmt_v6.yaml) + value: {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]} ip_subnet: description: IP/Subnet CIDR for the storage_mgmt network IP value: diff --git a/network/ports/storage_mgmt_from_pool.yaml b/network/ports/storage_mgmt_from_pool.yaml index 7c033ebd..9c757a6e 100644 --- a/network/ports/storage_mgmt_from_pool.yaml +++ b/network/ports/storage_mgmt_from_pool.yaml @@ -33,6 +33,9 @@ outputs: ip_address: description: storage MGMT network IP value: {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: storage MGMT network IP (for compatibility with storage_mgmt_v6.yaml) + value: {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]} ip_subnet: description: IP/Subnet CIDR for the storage MGMT network IP value: diff --git a/network/ports/storage_mgmt_from_pool_v6.yaml b/network/ports/storage_mgmt_from_pool_v6.yaml new file mode 100644 index 00000000..890da75c --- /dev/null +++ b/network/ports/storage_mgmt_from_pool_v6.yaml @@ -0,0 +1,54 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a network mapped list of IPs This version is for IPv6 + addresses. The ip_address_uri output will have brackets for use in URLs. + +parameters: + StorageMgmtNetName: + description: Name of the storage MGMT network + default: storage_mgmt + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + StorageMgmtNetCidr: + default: 'fd00:fd00:fd00:4000::/64' + description: Cidr for the storage MGMT network. + type: string + +outputs: + ip_address: + description: storage MGMT network IP + value: {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: storage MGMT network IP (for compatibility with storage_mgmt_v6.yaml) + value: + list_join: + - '' + - - '[' + - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the storage MGMT network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]} + - '/' + - {get_param: [StorageMgmtNetCidr, -2]} + - {get_param: [StorageMgmtNetCidr, -1]} diff --git a/network/ports/storage_mgmt_v6.yaml b/network/ports/storage_mgmt_v6.yaml new file mode 100644 index 00000000..61956be2 --- /dev/null +++ b/network/ports/storage_mgmt_v6.yaml @@ -0,0 +1,63 @@ +heat_template_version: 2015-04-30 + +description: > + Creates a port on the storage_mgmt API network. + +parameters: + StorageMgmtNetName: + description: Name of the storage_mgmt API neutron network + default: storage_mgmt + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + FixedIPs: + description: > + Control the IP allocation for the VIP port. E.g. + [{'ip_address':'1.2.3.4'}] + default: [] + type: json + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number + +resources: + + StorageMgmtPort: + type: OS::Neutron::Port + properties: + network: {get_param: StorageMgmtNetName} + name: {get_param: PortName} + fixed_ips: {get_param: FixedIPs} + replacement_policy: AUTO + +outputs: + ip_address: + description: storage_mgmt network IP + value: {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: storage_mgmt network IP with brackets suitable for a URI + value: + list_join: + - '' + - - '[' + - {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the storage_mgmt network IP + value: + list_join: + - '' + - - {get_attr: [StorageMgmtPort, fixed_ips, 0, ip_address]} + - '/' + - {get_attr: [StorageMgmtPort, subnets, 0, cidr, -2]} + - {get_attr: [StorageMgmtPort, subnets, 0, cidr, -1]} diff --git a/network/ports/storage_v6.yaml b/network/ports/storage_v6.yaml new file mode 100644 index 00000000..13b62276 --- /dev/null +++ b/network/ports/storage_v6.yaml @@ -0,0 +1,63 @@ +heat_template_version: 2015-04-30 + +description: > + Creates a port on the storage network. + +parameters: + StorageNetName: + description: Name of the storage neutron network + default: storage + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + FixedIPs: + description: > + Control the IP allocation for the VIP port. E.g. + [{'ip_address':'1.2.3.4'}] + default: [] + type: json + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number + +resources: + + StoragePort: + type: OS::Neutron::Port + properties: + network: {get_param: StorageNetName} + name: {get_param: PortName} + fixed_ips: {get_param: FixedIPs} + replacement_policy: AUTO + +outputs: + ip_address: + description: storage network IP + value: {get_attr: [StoragePort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: storage network IP with brackets suitable for a URL + value: + list_join: + - '' + - - '[' + - {get_attr: [StoragePort, fixed_ips, 0, ip_address]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the storage network IP + value: + list_join: + - '' + - - {get_attr: [StoragePort, fixed_ips, 0, ip_address]} + - '/' + - {get_attr: [StoragePort, subnets, 0, cidr, -2]} + - {get_attr: [StoragePort, subnets, 0, cidr, -1]} diff --git a/network/ports/tenant.yaml b/network/ports/tenant.yaml index cc0771ea..d8f78c49 100644 --- a/network/ports/tenant.yaml +++ b/network/ports/tenant.yaml @@ -43,6 +43,9 @@ outputs: ip_address: description: tenant network IP value: {get_attr: [TenantPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: tenant network IP (for compatibility with tenant_v6.yaml) + value: {get_attr: [TenantPort, fixed_ips, 0, ip_address]} ip_subnet: description: IP/Subnet CIDR for the tenant network IP value: diff --git a/network/ports/tenant_from_pool.yaml b/network/ports/tenant_from_pool.yaml index 17e4ac8a..d5f3156e 100644 --- a/network/ports/tenant_from_pool.yaml +++ b/network/ports/tenant_from_pool.yaml @@ -33,6 +33,9 @@ outputs: ip_address: description: tenant network IP value: {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: tenant network IP (for compatibility with tenant_v6.yaml) + value: {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]} ip_subnet: description: IP/Subnet CIDR for the tenant network IP value: diff --git a/network/ports/tenant_from_pool_v6.yaml b/network/ports/tenant_from_pool_v6.yaml new file mode 100644 index 00000000..b2bcd426 --- /dev/null +++ b/network/ports/tenant_from_pool_v6.yaml @@ -0,0 +1,53 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a network mapped list of IPs + +parameters: + TenantNetName: + description: Name of the tenant network + default: tenant + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + TenantNetCidr: + default: 'fd00:fd00:fd00:5000::/64' + description: Cidr for the tenant network. + type: string + +outputs: + ip_address: + description: tenant network IP + value: {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: tenant network IP (for compatibility with tenant_v6.yaml) + value: + list_join: + - '' + - - '[' + - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the tenant network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]} + - '/' + - {get_param: [TenantNetCidr, -2]} + - {get_param: [TenantNetCidr, -1]} diff --git a/network/ports/tenant_v6.yaml b/network/ports/tenant_v6.yaml new file mode 100644 index 00000000..6ca37549 --- /dev/null +++ b/network/ports/tenant_v6.yaml @@ -0,0 +1,63 @@ +heat_template_version: 2015-04-30 + +description: > + Creates a port on the tenant network. + +parameters: + TenantNetName: + description: Name of the tenant neutron network + default: tenant + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + FixedIPs: + description: > + Control the IP allocation for the VIP port. E.g. + [{'ip_address':'1.2.3.4'}] + default: [] + type: json + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number + +resources: + + TenantPort: + type: OS::Neutron::Port + properties: + network: {get_param: TenantNetName} + name: {get_param: PortName} + fixed_ips: {get_param: FixedIPs} + replacement_policy: AUTO + +outputs: + ip_address: + description: tenant network IP + value: {get_attr: [TenantPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: tenant network IP with brackets suitable for a URL + value: + list_join: + - '' + - - '[' + - {get_attr: [TenantPort, fixed_ips, 0, ip_address]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the tenant network IP + value: + list_join: + - '' + - - {get_attr: [TenantPort, fixed_ips, 0, ip_address]} + - '/' + - {get_attr: [TenantPort, subnets, 0, cidr, -2]} + - {get_attr: [TenantPort, subnets, 0, cidr, -1]} diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml index fb251994..38322907 100644 --- a/network/ports/vip.yaml +++ b/network/ports/vip.yaml @@ -45,6 +45,9 @@ outputs: ip_address: description: Virtual IP network IP value: {get_attr: [VipPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: Virtual IP network IP (for compatibility with vip.yaml) + value: {get_attr: [VipPort, fixed_ips, 0, ip_address]} ip_subnet: description: IP/Subnet CIDR for the network associated with this IP value: diff --git a/network/ports/vip_v6.yaml b/network/ports/vip_v6.yaml new file mode 100644 index 00000000..de927094 --- /dev/null +++ b/network/ports/vip_v6.yaml @@ -0,0 +1,65 @@ +heat_template_version: 2015-04-30 + +description: > + Creates a port for a VIP on the isolated network NetworkName. + The IP address will be chosen automatically if FixedIPs is empty. + +parameters: + ServiceName: # Here for compatibility with from_service.yaml + description: Name of the service to lookup + default: '' + type: string + NetworkName: + description: Name of the network where the VIP will be created + default: internal_api + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + ControlPlaneNetwork: + description: The name of the undercloud Neutron control plane + default: ctlplane + type: string + FixedIPs: + description: > + Control the IP allocation for the VIP port. E.g. + [{'ip_address':'1.2.3.4'}] + default: [] + type: json + +resources: + VipPort: + type: OS::Neutron::Port + properties: + network: {get_param: NetworkName} + name: {get_param: PortName} + fixed_ips: {get_param: FixedIPs} + replacement_policy: AUTO + +outputs: + ip_address: + description: Virtual IP network IP + value: {get_attr: [VipPort, fixed_ips, 0, ip_address]} + ip_address_uri: + description: Virtual IP with brackets suitable for a URL + value: + list_join: + - '' + - - '[' + - {get_attr: [VipPort, fixed_ips, 0, ip_address]} + - ']' + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the network associated with this IP + value: + list_join: + - '' + - - {get_attr: [VipPort, fixed_ips, 0, ip_address]} + - '/' + - {get_attr: [VipPort, subnets, 0, cidr, -2]} + - {get_attr: [VipPort, subnets, 0, cidr, -1]} diff --git a/network/storage_mgmt_v6.yaml b/network/storage_mgmt_v6.yaml new file mode 100644 index 00000000..f05644ef --- /dev/null +++ b/network/storage_mgmt_v6.yaml @@ -0,0 +1,69 @@ +heat_template_version: 2015-04-30 + +description: > + Storage management network. Storage replication, etc. + +parameters: + # the defaults here work for static IP assignment (IPAM) only + StorageMgmtNetCidr: + # OpenStack uses the EUI-64 address format, which requires a /64 prefix + default: 'fd00:fd00:fd00:4000::/64' + description: Cidr for the storage management network. + type: string + StorageMgmtNetValueSpecs: + default: {'provider:physical_network': 'storage_mgmt', 'provider:network_type': 'flat'} + description: Value specs for the storage_mgmt network. + type: json + StorageMgmtNetAdminStateUp: + default: false + description: This admin state of of the network. + type: boolean + StorageMgmtNetShared: + default: false + description: Whether this network is shared across all tenants. + type: boolean + StorageMgmtNetName: + default: storage_mgmt + description: The name of the Storage management network. + type: string + StorageMgmtSubnetName: + default: storage_mgmt_subnet + description: The name of the Storage management subnet in Neutron. + type: string + StorageMgmtAllocationPools: + default: [{'start': 'fd00:fd00:fd00:4000::10', 'end': 'fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe'}] + description: Ip allocation pool range for the storage mgmt network. + type: json + IPv6AddressMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 address mode + type: string + IPv6RAMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 router advertisement mode + type: string + +resources: + StorageMgmtNetwork: + type: OS::Neutron::Net + properties: + admin_state_up: {get_param: StorageMgmtNetAdminStateUp} + name: {get_param: StorageMgmtNetName} + shared: {get_param: StorageMgmtNetShared} + value_specs: {get_param: StorageMgmtNetValueSpecs} + + StorageMgmtSubnet: + type: OS::Neutron::Subnet + properties: + ip_version: 6 + ipv6_address_mode: {get_param: IPv6AddressMode} + ipv6_ra_mode: {get_param: IPv6RAMode} + cidr: {get_param: StorageMgmtNetCidr} + name: {get_param: StorageMgmtSubnetName} + network: {get_resource: StorageMgmtNetwork} + allocation_pools: {get_param: StorageMgmtAllocationPools} + +outputs: + OS::stack_id: + description: Neutron storage management network + value: {get_resource: StorageMgmtNetwork} diff --git a/network/storage_v6.yaml b/network/storage_v6.yaml new file mode 100644 index 00000000..36a6fae8 --- /dev/null +++ b/network/storage_v6.yaml @@ -0,0 +1,69 @@ +heat_template_version: 2015-04-30 + +description: > + Storage network. + +parameters: + # the defaults here work for static IP assignment (IPAM) only + StorageNetCidr: + # OpenStack uses the EUI-64 address format, which requires a /64 prefix + default: 'fd00:fd00:fd00:3000::/64' + description: Cidr for the storage network. + type: string + StorageNetValueSpecs: + default: {'provider:physical_network': 'storage', 'provider:network_type': 'flat'} + description: Value specs for the storage network. + type: json + StorageNetAdminStateUp: + default: false + description: This admin state of of the network. + type: boolean + StorageNetShared: + default: false + description: Whether this network is shared across all tenants. + type: boolean + StorageNetName: + default: storage + description: The name of the storage network. + type: string + StorageSubnetName: + default: storage_subnet + description: The name of the storage subnet in Neutron. + type: string + StorageAllocationPools: + default: [{'start': 'fd00:fd00:fd00:3000::10', 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'}] + description: Ip allocation pool range for the storage network. + type: json + IPv6AddressMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 address mode + type: string + IPv6RAMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 router advertisement mode + type: string + +resources: + StorageNetwork: + type: OS::Neutron::Net + properties: + admin_state_up: {get_param: StorageNetAdminStateUp} + name: {get_param: StorageNetName} + shared: {get_param: StorageNetShared} + value_specs: {get_param: StorageNetValueSpecs} + + StorageSubnet: + type: OS::Neutron::Subnet + properties: + ip_version: 6 + ipv6_address_mode: {get_param: IPv6AddressMode} + ipv6_ra_mode: {get_param: IPv6RAMode} + cidr: {get_param: StorageNetCidr} + name: {get_param: StorageSubnetName} + network: {get_resource: StorageNetwork} + allocation_pools: {get_param: StorageAllocationPools} + +outputs: + OS::stack_id: + description: Neutron storage network + value: {get_resource: StorageNetwork} diff --git a/network/tenant_v6.yaml b/network/tenant_v6.yaml new file mode 100644 index 00000000..b653eaf7 --- /dev/null +++ b/network/tenant_v6.yaml @@ -0,0 +1,69 @@ +heat_template_version: 2015-04-30 + +description: > + Tenant IPv6 network. + +parameters: + # the defaults here work for static IP assignment (IPAM) only + TenantNetCidr: + # OpenStack uses the EUI-64 address format, which requires a /64 prefix + default: 'fd00:fd00:fd00:5000::/64' + description: Cidr for the tenant network. + type: string + TenantNetValueSpecs: + default: {'provider:physical_network': 'tenant', 'provider:network_type': 'flat'} + description: Value specs for the tenant network. + type: json + TenantNetAdminStateUp: + default: false + description: This admin state of of the network. + type: boolean + TenantNetShared: + default: false + description: Whether this network is shared across all tenants. + type: boolean + TenantNetName: + default: tenant + description: The name of the tenant network. + type: string + TenantSubnetName: + default: tenant_subnet + description: The name of the tenant subnet in Neutron. + type: string + TenantAllocationPools: + default: [{'start': 'fd00:fd00:fd00:5000::10', 'end': 'fd00:fd00:fd00:5000:ffff:ffff:ffff:fffe'}] + description: Ip allocation pool range for the tenant network. + type: json + IPv6AddressMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 address mode + type: string + IPv6RAMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 router advertisement mode + type: string + +resources: + TenantNetwork: + type: OS::Neutron::Net + properties: + admin_state_up: {get_param: TenantNetAdminStateUp} + name: {get_param: TenantNetName} + shared: {get_param: TenantNetShared} + value_specs: {get_param: TenantNetValueSpecs} + + TenantSubnet: + type: OS::Neutron::Subnet + properties: + ip_version: 6 + ipv6_address_mode: {get_param: IPv6AddressMode} + ipv6_ra_mode: {get_param: IPv6RAMode} + cidr: {get_param: TenantNetCidr} + name: {get_param: TenantSubnetName} + network: {get_resource: TenantNetwork} + allocation_pools: {get_param: TenantAllocationPools} + +outputs: + OS::stack_id: + description: Neutron tenant network + value: {get_resource: TenantNetwork} diff --git a/overcloud.yaml b/overcloud.yaml index d6048de7..cceb2018 100644 --- a/overcloud.yaml +++ b/overcloud.yaml @@ -16,11 +16,6 @@ parameters: description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true - AodhPassword: - default: unset - description: The password for the aodh services. - type: string - hidden: true CeilometerBackend: default: 'mongodb' description: The ceilometer backend type. @@ -77,6 +72,10 @@ parameters: default: [] description: Should be used for arbitrary ips. type: json + CorosyncIPv6: + default: false + description: Enable IPv6 in Corosync + type: boolean Debug: default: '' description: Set to True to enable debugging on all services. @@ -249,6 +248,10 @@ parameters: type: number default: 1 description: The number of neutron dhcp agents to schedule per network + NovaIPv6: + default: false + description: Enable IPv6 features in Nova + type: boolean NovaPassword: description: The password for the nova service account, used by nova-api. type: string @@ -261,6 +264,10 @@ parameters: default: false description: Should MongoDb journaling be disabled type: boolean + MongoDbIPv6: + default: false + description: Enable IPv6 if MongoDB VIP is IPv6 + type: boolean PublicVirtualFixedIPs: default: [] description: > @@ -296,6 +303,10 @@ parameters: default: 16384 description: Configures RabbitMQ FD limit type: string + RabbitIPv6: + default: false + description: Enable IPv6 in RabbitMQ + type: boolean SnmpdReadonlyUserName: default: ro_snmp_user description: The user name for SNMPd with readonly rights running on all Overcloud nodes @@ -359,7 +370,7 @@ parameters: type: string hidden: true CinderISCSIHelper: - default: tgtadm + default: lioadm description: The iSCSI helper to use with cinder. type: string ControllerCount: @@ -658,7 +669,6 @@ parameters: default: NeutronTenantNetwork: tenant CeilometerApiNetwork: internal_api - AodhApiNetwork: internal_api MongoDbNetwork: internal_api CinderApiNetwork: internal_api CinderIscsiNetwork: storage @@ -857,20 +867,19 @@ resources: type: OS::TripleO::EndpointMap properties: CloudName: {get_param: CloudName} - CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} - AodhApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]} - CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} - GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} - GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} - HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} - KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} - KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} - MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} - NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} - NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} - SaharaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]} - SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} - PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]} + CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} + CinderApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} + GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} + GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} + HeatApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} + KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} + KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} + MysqlVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} + NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} + NovaApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} + SaharaApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]} + SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} + PublicVirtualIP: {get_attr: [VipMap, net_ip_uri_map, external]} Controller: type: OS::Heat::ResourceGroup @@ -883,7 +892,6 @@ resources: properties: AdminPassword: {get_param: AdminPassword} AdminToken: {get_param: AdminToken} - AodhPassword: {get_param: AodhPassword} CeilometerBackend: {get_param: CeilometerBackend} CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret} CeilometerPassword: {get_param: CeilometerPassword} @@ -898,6 +906,7 @@ resources: CloudDomain: {get_param: CloudDomain} ControlVirtualInterface: {get_param: ControlVirtualInterface} ControllerExtraConfig: {get_param: controllerExtraConfig} + CorosyncIPv6: {get_param: CorosyncIPv6} Debug: {get_param: Debug} EnableFencing: {get_param: EnableFencing} ManageFirewall: {get_param: ManageFirewall} @@ -965,9 +974,11 @@ resources: NeutronDhcpAgentsPerNetwork: {get_param: NeutronDhcpAgentsPerNetwork} NeutronNetworkType: {get_param: NeutronNetworkType} NeutronTunnelTypes: {get_param: NeutronTunnelTypes} + NovaIPv6: {get_param: NovaIPv6} NovaPassword: {get_param: NovaPassword} NtpServer: {get_param: NtpServer} MongoDbNoJournal: {get_param: MongoDbNoJournal} + MongoDbIPv6: {get_param: MongoDbIPv6} PcsdPassword: {get_resource: PcsdPassword} PublicVirtualInterface: {get_param: PublicVirtualInterface} RabbitPassword: {get_param: RabbitPassword} @@ -976,10 +987,12 @@ resources: RabbitClientUseSSL: {get_param: RabbitClientUseSSL} RabbitClientPort: {get_param: RabbitClientPort} RabbitFDLimit: {get_param: RabbitFDLimit} + RabbitIPv6: {get_param: RabbitIPv6} SaharaPassword: {get_param: SaharaPassword} SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName} SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword} RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]} + RedisVirtualIPUri: {get_attr: [RedisVirtualIP, ip_address_uri]} SwiftHashSuffix: {get_param: SwiftHashSuffix} SwiftMountCheck: {get_param: SwiftMountCheck} SwiftMinPartHours: {get_param: SwiftMinPartHours} @@ -992,14 +1005,15 @@ resources: ServiceNetMap: {get_param: ServiceNetMap} EndpointMap: {get_attr: [EndpointMap, endpoint_map]} CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} - AodhApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]} CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} + HeatApiVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} + MysqlVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} @@ -1069,6 +1083,7 @@ resources: NovaComputeLibvirtType: {get_param: NovaComputeLibvirtType} NovaComputeLibvirtVifDriver: {get_param: NovaComputeLibvirtVifDriver} NovaEnableRbdBackend: {get_param: NovaEnableRbdBackend} + NovaIPv6: {get_param: NovaIPv6} NovaPublicIP: {get_attr: [VipMap, net_ip_map, external]} NovaPassword: {get_param: NovaPassword} NovaOVSBridge: {get_param: NovaOVSBridge} @@ -1093,6 +1108,7 @@ resources: CloudDomain: {get_param: CloudDomain} ServerMetadata: {get_param: ServerMetadata} SchedulerHints: {get_param: NovaComputeSchedulerHints} + NodeIndex: '%index%' BlockStorage: type: OS::Heat::ResourceGroup @@ -1128,12 +1144,13 @@ resources: '%stackname%': {get_param: 'OS::stack_name'} ServiceNetMap: {get_param: ServiceNetMap} EndpointMap: {get_attr: [EndpointMap, endpoint_map]} - MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} + MysqlVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} ExtraConfig: {get_param: ExtraConfig} BlockStorageExtraConfig: {get_param: BlockStorageExtraConfig} CloudDomain: {get_param: CloudDomain} ServerMetadata: {get_param: ServerMetadata} SchedulerHints: {get_param: BlockStorageSchedulerHints} + NodeIndex: '%index%' ObjectStorage: type: OS::Heat::ResourceGroup @@ -1166,6 +1183,7 @@ resources: CloudDomain: {get_param: CloudDomain} ServerMetadata: {get_param: ServerMetadata} SchedulerHints: {get_param: ObjectStorageSchedulerHints} + NodeIndex: '%index%' CephStorage: type: OS::Heat::ResourceGroup @@ -1193,6 +1211,7 @@ resources: CloudDomain: {get_param: CloudDomain} ServerMetadata: {get_param: ServerMetadata} SchedulerHints: {get_param: CephStorageSchedulerHints} + NodeIndex: '%index%' ControllerIpListMap: type: OS::TripleO::Network::Ports::NetIpListMap @@ -1224,7 +1243,6 @@ resources: heat_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} swift_proxy_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} ceilometer_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} - aodh_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]} nova_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} nova_metadata_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]} glance_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} @@ -1315,9 +1333,13 @@ resources: properties: ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} ExternalIp: {get_attr: [PublicVirtualIP, ip_address]} + ExternalIpUri: {get_attr: [PublicVirtualIP, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]} + InternalApiIpUri: {get_attr: [InternalApiVirtualIP, ip_address_uri]} StorageIp: {get_attr: [StorageVirtualIP, ip_address]} + StorageIpUri: {get_attr: [StorageVirtualIP, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]} + StorageMgmtIpUri: {get_attr: [StorageMgmtVirtualIP, ip_address_uri]} # No tenant or management VIP required VipConfig: @@ -1341,7 +1363,6 @@ resources: nova_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} nova_metadata_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]} ceilometer_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} - aodh_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]} heat_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} horizon_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]} redis_vip: {get_attr: [RedisVirtualIP, ip_address]} @@ -1549,7 +1570,13 @@ resources: # the nested template may configure each role differently (or not at all) AllNodesExtraConfig: type: OS::TripleO::AllNodesExtraConfig - depends_on: UpdateWorkflow + depends_on: + - UpdateWorkflow + - ComputeAllNodesValidationDeployment + - BlockStorageAllNodesValidationDeployment + - ObjectStorageAllNodesValidationDeployment + - CephStorageAllNodesValidationDeployment + - ControllerAllNodesValidationDeployment properties: controller_servers: {get_attr: [Controller, attributes, nova_server_resource]} compute_servers: {get_attr: [Compute, attributes, nova_server_resource]} @@ -1618,9 +1645,6 @@ outputs: PublicVip: description: Controller VIP for public API endpoints value: {get_attr: [VipMap, net_ip_map, external]} - AodhInternalVip: - description: VIP for Aodh API internal endpoint - value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]} CeilometerInternalVip: description: VIP for Ceilometer API internal endpoint value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index f7633a86..2f2a1e9d 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -34,8 +34,6 @@ parameters: type: comma_delimited_list ceilometer_api_node_ips: type: comma_delimited_list - aodh_api_node_ips: - type: comma_delimited_list nova_api_node_ips: type: comma_delimited_list nova_metadata_node_ips: @@ -141,6 +139,14 @@ resources: list_join: - "','" - {get_param: memcache_node_ips} + memcache_node_ips_v6: + str_replace: + template: "['inet6:[SERVERS_LIST]']" + params: + SERVERS_LIST: + list_join: + - "]','inet6:[" + - {get_param: memcache_node_ips} mysql_node_ips: str_replace: template: "['SERVERS_LIST']" @@ -181,14 +187,6 @@ resources: list_join: - "','" - {get_param: ceilometer_api_node_ips} - aodh_api_node_ips: - str_replace: - template: "['SERVERS_LIST']" - params: - SERVERS_LIST: - list_join: - - "','" - - {get_param: aodh_api_node_ips} nova_api_node_ips: str_replace: template: "['SERVERS_LIST']" @@ -274,8 +272,8 @@ resources: # NOTE(gfidente): interpolation with %{} in the # hieradata file can't be used as it returns string ceilometer::rabbit_hosts: *rabbit_nodes_array - aodh::rabbit_hosts: *rabbit_nodes_array cinder::rabbit_hosts: *rabbit_nodes_array + glance::notify::rabbitmq::rabbit_hosts: *rabbit_nodes_array heat::rabbit_hosts: *rabbit_nodes_array neutron::rabbit_hosts: *rabbit_nodes_array nova::rabbit_hosts: *rabbit_nodes_array diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml index 96198c3f..dc2f98ed 100644 --- a/puppet/ceph-cluster-config.yaml +++ b/puppet/ceph-cluster-config.yaml @@ -39,6 +39,9 @@ parameters: CephClientUserName: default: openstack type: string + CephIPv6: + default: False + type: boolean resources: CephClusterConfigImpl: @@ -50,15 +53,25 @@ resources: datafiles: ceph_cluster: mapped_data: + ceph_ipv6: {get_param: CephIPv6} ceph_storage_count: {get_param: ceph_storage_count} ceph_mon_initial_members: list_join: - ',' - {get_param: ceph_mon_names} - ceph::profile::params::mon_host: + ceph_mon_host: list_join: - ',' - {get_param: ceph_mon_ips} + ceph_mon_host_v6: + str_replace: + template: "'[IPS_LIST]'" + params: + IPS_LIST: + list_join: + - '],[' + - {get_param: ceph_mon_ips} + ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6} ceph::profile::params::fsid: {get_param: ceph_fsid} ceph::profile::params::mon_key: {get_param: ceph_mon_key} # We should use a separated key for the non-admin clients diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index d38f3f22..d2988926 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -62,6 +62,9 @@ parameters: description: | Role specific additional hiera configuration to inject into the cluster. type: json + CephStorageIPs: + default: {} + type: json NetworkDeploymentActions: type: comma_delimited_list description: > @@ -90,6 +93,9 @@ parameters: type: json description: Optional scheduler hints to pass to nova default: {} + NodeIndex: + type: number + default: 0 resources: CephStorage: @@ -135,31 +141,43 @@ resources: type: OS::TripleO::CephStorage::Ports::ExternalPort properties: ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + IPPool: {get_param: CephStorageIPs} + NodeIndex: {get_param: NodeIndex} InternalApiPort: type: OS::TripleO::CephStorage::Ports::InternalApiPort properties: ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + IPPool: {get_param: CephStorageIPs} + NodeIndex: {get_param: NodeIndex} StoragePort: type: OS::TripleO::CephStorage::Ports::StoragePort properties: ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + IPPool: {get_param: CephStorageIPs} + NodeIndex: {get_param: NodeIndex} StorageMgmtPort: type: OS::TripleO::CephStorage::Ports::StorageMgmtPort properties: ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + IPPool: {get_param: CephStorageIPs} + NodeIndex: {get_param: NodeIndex} TenantPort: type: OS::TripleO::CephStorage::Ports::TenantPort properties: ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + IPPool: {get_param: CephStorageIPs} + NodeIndex: {get_param: NodeIndex} ManagementPort: type: OS::TripleO::CephStorage::Ports::ManagementPort properties: ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + IPPool: {get_param: CephStorageIPs} + NodeIndex: {get_param: NodeIndex} NetworkConfig: type: OS::TripleO::CephStorage::Net::SoftwareConfig @@ -177,11 +195,17 @@ resources: properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetIpSubnetMap: type: OS::TripleO::Network::Ports::NetIpSubnetMap diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index 440c2fd2..888f3cf8 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -9,7 +9,7 @@ parameters: description: Whether to enable or not the Iscsi backend for Cinder type: boolean CinderISCSIHelper: - default: tgtadm + default: lioadm description: The iSCSI helper to use with cinder. type: string CinderLVMLoopDeviceSize: @@ -38,6 +38,9 @@ parameters: description: | Role specific additional hiera configuration to inject into the cluster. type: json + BlockStorageIPs: + default: {} + type: json Flavor: description: Flavor for block storage nodes to request when deploying. type: string @@ -110,7 +113,7 @@ parameters: GlanceApiVirtualIP: type: string default: '' - MysqlVirtualIP: + MysqlVirtualIPUri: type: string default: '' NetworkDeploymentActions: @@ -141,6 +144,9 @@ parameters: type: json description: Optional scheduler hints to pass to nova default: {} + NodeIndex: + type: number + default: 0 resources: @@ -187,31 +193,43 @@ resources: type: OS::TripleO::BlockStorage::Ports::ExternalPort properties: ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + IPPool: {get_param: BlockStorageIPs} + NodeIndex: {get_param: NodeIndex} InternalApiPort: type: OS::TripleO::BlockStorage::Ports::InternalApiPort properties: ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + IPPool: {get_param: BlockStorageIPs} + NodeIndex: {get_param: NodeIndex} StoragePort: type: OS::TripleO::BlockStorage::Ports::StoragePort properties: ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + IPPool: {get_param: BlockStorageIPs} + NodeIndex: {get_param: NodeIndex} StorageMgmtPort: type: OS::TripleO::BlockStorage::Ports::StorageMgmtPort properties: ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + IPPool: {get_param: BlockStorageIPs} + NodeIndex: {get_param: NodeIndex} TenantPort: type: OS::TripleO::BlockStorage::Ports::TenantPort properties: ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + IPPool: {get_param: BlockStorageIPs} + NodeIndex: {get_param: NodeIndex} ManagementPort: type: OS::TripleO::BlockStorage::Ports::ManagementPort properties: ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + IPPool: {get_param: BlockStorageIPs} + NodeIndex: {get_param: NodeIndex} NetworkConfig: type: OS::TripleO::BlockStorage::Net::SoftwareConfig @@ -229,11 +247,17 @@ resources: properties: ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -252,7 +276,7 @@ resources: config: {get_resource: BlockStorageConfig} input_values: debug: {get_param: Debug} - cinder_dsn: {list_join: ['', ['mysql+pymysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIP} , '/cinder']]} + cinder_dsn: {list_join: ['', ['mysql+pymysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIPUri} , '/cinder']]} snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} cinder_lvm_loop_device_size: diff --git a/puppet/compute.yaml b/puppet/compute.yaml index deaf7984..ee5bced6 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -195,6 +195,9 @@ parameters: default: 'dvr_snat' description: Agent mode for the neutron-l3-agent on the controller hosts type: string + NodeIndex: + type: number + default: 0 NovaApiHost: type: string default: '' # Has to be here because of the ignored empty value bug @@ -207,6 +210,9 @@ parameters: NovaCompute specific configuration to inject into the cluster. Same structure as ExtraConfig. type: json + NovaComputeIPs: + default: {} + type: json NovaComputeLibvirtType: type: string default: kvm @@ -218,6 +224,10 @@ parameters: default: false description: Whether to enable or not the Rbd backend for Nova type: boolean + NovaIPv6: + default: false + description: Enable IPv6 features in Nova + type: boolean NovaPassword: description: The password for the nova service account, used by nova-api. type: string @@ -378,42 +388,60 @@ resources: type: OS::TripleO::Compute::Ports::ExternalPort properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + IPPool: {get_param: NovaComputeIPs} + NodeIndex: {get_param: NodeIndex} InternalApiPort: type: OS::TripleO::Compute::Ports::InternalApiPort properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + IPPool: {get_param: NovaComputeIPs} + NodeIndex: {get_param: NodeIndex} StoragePort: type: OS::TripleO::Compute::Ports::StoragePort properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + IPPool: {get_param: NovaComputeIPs} + NodeIndex: {get_param: NodeIndex} StorageMgmtPort: type: OS::TripleO::Compute::Ports::StorageMgmtPort properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + IPPool: {get_param: NovaComputeIPs} + NodeIndex: {get_param: NodeIndex} TenantPort: type: OS::TripleO::Compute::Ports::TenantPort properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + IPPool: {get_param: NovaComputeIPs} + NodeIndex: {get_param: NodeIndex} ManagementPort: type: OS::TripleO::Compute::Ports::ManagementPort properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + IPPool: {get_param: NovaComputeIPs} + NodeIndex: {get_param: NodeIndex} NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkConfig: type: OS::TripleO::Compute::Net::SoftwareConfig @@ -472,6 +500,7 @@ resources: raw_data: {get_file: hieradata/compute.yaml} mapped_data: cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend} + nova::use_ipv6: {get_input: nova_ipv6} nova::debug: {get_input: debug} nova::rabbit_userid: {get_input: rabbit_username} nova::rabbit_password: {get_input: rabbit_password} @@ -561,10 +590,18 @@ resources: nova_api_host: {get_param: NovaApiHost} nova_password: {get_param: NovaPassword} nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend} + nova_ipv6: {get_param: NovaIPv6} cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend} nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]} nova_vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]} - nova_vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host]} + # Remove brackets that may come if the IP address is IPv6. + # For DNS names and IPv4, this will just get the NovaVNCProxyPublic value + nova_vncproxy_host: + str_replace: + template: {get_param: [EndpointMap, NovaVNCProxyPublic, host]} + params: + '[': '' + ']': '' nova_vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]} nova_ovs_bridge: {get_param: NovaOVSBridge} nova_security_group_api: {get_param: NovaSecurityGroupAPI} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index 688e4178..186dce64 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -17,14 +17,6 @@ parameters: description: The keystone auth secret and db password. type: string hidden: true - AodhApiVirtualIP: - type: string - default: '' - AodhPassword: - default: unset - description: The password for the aodh services. - type: string - hidden: true CeilometerApiVirtualIP: type: string default: '' @@ -40,6 +32,10 @@ parameters: description: The password for the ceilometer service and db account. type: string hidden: true + CeilometerStoreEvents: + default: false + description: Whether to store events in ceilometer. + type: boolean CinderApiVirtualIP: type: string default: '' @@ -65,7 +61,7 @@ parameters: description: Whether to enable or not the Rbd backend for Cinder type: boolean CinderISCSIHelper: - default: tgtadm + default: lioadm description: The iSCSI helper to use with cinder. type: string CinderLVMLoopDeviceSize: @@ -115,6 +111,10 @@ parameters: default: 'br-ex' description: Interface where virtual ip will be assigned. type: string + CorosyncIPv6: + default: false + description: Enable IPv6 in Corosync + type: boolean Debug: default: '' description: Set to True to enable debugging on all services. @@ -553,6 +553,10 @@ parameters: description: | Whether to create cron job for purging soft deleted rows in Nova database. type: boolean + NovaIPv6: + default: false + description: Enable IPv6 features in Nova + type: boolean NovaPassword: description: The password for the nova service and db account, used by nova-api. type: string @@ -565,6 +569,10 @@ parameters: default: false description: Should MongoDb journaling be disabled type: boolean + MongoDbIPv6: + default: false + description: Enable IPv6 if Mongo DB VIP is IPv6 + type: boolean NtpServer: default: '' description: Comma-separated list of ntp servers @@ -609,9 +617,17 @@ parameters: default: 16384 description: Configures RabbitMQ FD limit type: string + RabbitIPv6: + default: false + description: Enable IPv6 in RabbitMQ + type: boolean RedisVirtualIP: type: string default: '' # Has to be here because of the ignored empty value bug + RedisVirtualIPUri: + type: string + default: '' # Has to be here because of the ignored empty value bug + description: An IP address which is wrapped in brackets in case of IPv6 SnmpdReadonlyUserName: default: ro_snmp_user description: The user name for SNMPd with readonly rights running on all Overcloud nodes @@ -667,6 +683,9 @@ parameters: HeatApiVirtualIP: type: string default: '' + HeatApiVirtualIPUri: + type: string + default: '' GlanceApiVirtualIP: type: string default: '' @@ -676,6 +695,9 @@ parameters: MysqlVirtualIP: type: string default: '' + MysqlVirtualIPUri: + type: string + default: '' KeystoneAdminApiVirtualIP: type: string default: '' @@ -832,11 +854,17 @@ resources: properties: ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetIpSubnetMap: type: OS::TripleO::Network::Ports::NetIpSubnetMap @@ -912,19 +940,19 @@ resources: list_join: - '' - - 'http://' - - {get_param: HeatApiVirtualIP} + - {get_param: HeatApiVirtualIPUri} - ':8003' heat.metadata_server_url: list_join: - '' - - 'http://' - - {get_param: HeatApiVirtualIP} + - {get_param: HeatApiVirtualIPUri} - ':8000' heat.waitcondition_server_url: list_join: - '' - - 'http://' - - {get_param: HeatApiVirtualIP} + - {get_param: HeatApiVirtualIPUri} - ':8000/v1/waitcondition' heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey} heat_enable_db_purge: {get_param: HeatEnableDBPurge} @@ -955,7 +983,7 @@ resources: - - 'mysql+pymysql://cinder:' - {get_param: CinderPassword} - '@' - - {get_param: MysqlVirtualIP} + - {get_param: MysqlVirtualIPUri} - '/cinder' glance_port: {get_param: [EndpointMap, GlanceInternal, port]} glance_password: {get_param: GlancePassword} @@ -972,7 +1000,7 @@ resources: - - 'mysql+pymysql://glance:' - {get_param: GlancePassword} - '@' - - {get_param: MysqlVirtualIP} + - {get_param: MysqlVirtualIPUri} - '/glance' heat_password: {get_param: HeatPassword} heat_stack_domain_admin_password: {get_param: HeatStackDomainAdminPassword} @@ -982,7 +1010,7 @@ resources: - - 'mysql+pymysql://heat:' - {get_param: HeatPassword} - '@' - - {get_param: MysqlVirtualIP} + - {get_param: MysqlVirtualIPUri} - '/heat' keystone_ca_certificate: {get_param: KeystoneCACertificate} keystone_signing_key: {get_param: KeystoneSigningKey} @@ -998,7 +1026,7 @@ resources: - - 'mysql+pymysql://keystone:' - {get_param: AdminToken} - '@' - - {get_param: MysqlVirtualIP} + - {get_param: MysqlVirtualIPUri} - '/keystone' keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } @@ -1105,7 +1133,7 @@ resources: - - 'mysql+pymysql://neutron:' - {get_param: NeutronPassword} - '@' - - {get_param: MysqlVirtualIP} + - {get_param: MysqlVirtualIPUri} - '/ovs_neutron?charset=utf8' neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] } neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] } @@ -1115,12 +1143,12 @@ resources: ceilometer_backend: {get_param: CeilometerBackend} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} ceilometer_password: {get_param: CeilometerPassword} - aodh_password: {get_param: AodhPassword} + ceilometer_store_events: {get_param: CeilometerStoreEvents} ceilometer_coordination_url: list_join: - '' - - 'redis://' - - {get_param: RedisVirtualIP} + - {get_param: RedisVirtualIPUri} - ':6379' ceilometer_dsn: list_join: @@ -1128,11 +1156,13 @@ resources: - - 'mysql+pymysql://ceilometer:' - {get_param: CeilometerPassword} - '@' - - {get_param: MysqlVirtualIP} + - {get_param: MysqlVirtualIPUri} - '/ceilometer' snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} nova_enable_db_purge: {get_param: NovaEnableDBPurge} + nova_ipv6: {get_param: NovaIPv6} + corosync_ipv6: {get_param: CorosyncIPv6} nova_password: {get_param: NovaPassword} nova_dsn: list_join: @@ -1140,7 +1170,7 @@ resources: - - 'mysql+pymysql://nova:' - {get_param: NovaPassword} - '@' - - {get_param: MysqlVirtualIP} + - {get_param: MysqlVirtualIPUri} - '/nova' nova_api_dsn: list_join: @@ -1148,7 +1178,7 @@ resources: - - 'mysql+pymysql://nova_api:' - {get_param: NovaPassword} - '@' - - {get_param: MysqlVirtualIP} + - {get_param: MysqlVirtualIPUri} - '/nova_api' upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} instance_name_template: {get_param: InstanceNameTemplate} @@ -1159,7 +1189,9 @@ resources: rabbit_cookie: {get_param: RabbitCookie} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} + rabbit_ipv6: {get_param: RabbitIPv6} mongodb_no_journal: {get_param: MongoDbNoJournal} + mongodb_ipv6: {get_param: MongoDbIPv6} # We need to force this into quotes or hiera will return integer causing # the puppet module validation regexp to fail. # Remove when: https://github.com/puppetlabs/puppetlabs-rabbitmq/pull/401 @@ -1187,7 +1219,7 @@ resources: - - 'mysql://sahara:' - {get_param: SaharaPassword} - '@' - - {get_param: MysqlVirtualIP} + - {get_param: MysqlVirtualIPUri} - '/sahara' swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} @@ -1205,7 +1237,6 @@ resources: neutron_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} ceilometer_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} - aodh_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]} nova_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} nova_metadata_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]} horizon_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]} @@ -1278,6 +1309,7 @@ resources: enable_fencing: {get_input: enable_fencing} enable_load_balancer: {get_input: enable_load_balancer} hacluster_pwd: {get_input: pcsd_password} + corosync_ipv6: {get_input: corosync_ipv6} tripleo::fencing::config: {get_input: fencing_config} # Swift @@ -1412,6 +1444,7 @@ resources: # MongoDB mongodb::server::bind_ip: {get_input: mongo_db_network} mongodb::server::nojournal: {get_input: mongodb_no_journal} + mongodb::server::ipv6: {get_input: mongodb_ipv6} # MySQL admin_password: {get_input: admin_password} enable_galera: {get_input: enable_galera} @@ -1503,28 +1536,11 @@ resources: ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} ceilometer::agent::auth::auth_url: {get_input: keystone_auth_uri} ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url} + ceilometer::agent::notification::store_events: {get_input: ceilometer_store_events} ceilometer::db::mysql::password: {get_input: ceilometer_password} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} - # Aodh - aodh::rabbit_userid: {get_input: rabbit_username} - aodh::rabbit_password: {get_input: rabbit_password} - aodh::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - aodh::rabbit_port: {get_input: rabbit_client_port} - aodh::debug: {get_input: debug} - aodh::wsgi::apache::ssl: false - aodh::wsgi::apache::bind_host: {get_input: aodh_api_network} - aodh::api::service_name: 'httpd' - aodh::api::host: {get_input: aodh_api_network} - aodh::api::keystone_password: {get_input: aodh_password} - aodh::api::keystone_auth_uri: {get_input: keystone_auth_uri} - aodh::api::keystone_identity_uri: {get_input: keystone_identity_uri} - aodh::auth::auth_password: {get_input: aodh_password} - aodh::db::mysql::password: {get_input: aodh_password} - # for a migration path from ceilometer-alarm to aodh, we use the same database & coordination - aodh::evaluator::coordination_url: {get_input: ceilometer_coordination_url} - # Nova nova::rabbit_userid: {get_input: rabbit_username} nova::rabbit_password: {get_input: rabbit_password} @@ -1532,6 +1548,7 @@ resources: nova::rabbit_port: {get_input: rabbit_client_port} nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute} nova::debug: {get_input: debug} + nova::use_ipv6: {get_input: nova_ipv6} nova::api::auth_uri: {get_input: keystone_auth_uri} nova::api::identity_uri: {get_input: keystone_identity_uri} nova::api::api_bind_address: {get_input: nova_api_network} @@ -1591,6 +1608,7 @@ resources: rabbitmq::file_limit: {get_input: rabbit_fd_limit} rabbitmq::default_user: {get_input: rabbit_username} rabbitmq::default_pass: {get_input: rabbit_password} + rabbit_ipv6: {get_input: rabbit_ipv6} # Redis redis::bind: {get_input: redis_network} redis_vip: {get_input: redis_vip} @@ -1734,14 +1752,14 @@ outputs: str_replace: template: 'r1z1-IP:%PORT%/d1' params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} + IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} swift_proxy_memcache: description: Swift proxy-memcache value value: str_replace: template: "IP:11211" params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} + IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} config_identifier: description: identifier which changes if the controller configuration may need re-applying value: diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml index ebd6c251..312d49a0 100644 --- a/puppet/extraconfig/ceph/ceph-external-config.yaml +++ b/puppet/extraconfig/ceph/ceph-external-config.yaml @@ -41,6 +41,9 @@ parameters: CephClientUserName: default: openstack type: string + CephIPv6: + default: False + type: boolean resources: CephClusterConfigImpl: @@ -54,7 +57,9 @@ resources: mapped_data: ceph_storage_count: {get_param: ceph_storage_count} enable_external_ceph: true - ceph::profile::params::mon_host: {get_param: ceph_external_mon_ips} + ceph_ipv6: {get_param: CephIPv6} + ceph_mon_host: {get_param: ceph_external_mon_ips} + ceph_mon_host_v6: {get_param: ceph_external_mon_ips} ceph::profile::params::fsid: {get_param: ceph_fsid} ceph::profile::params::client_keys: str_replace: @@ -72,6 +77,7 @@ resources: NOVA_POOL: {get_param: NovaRbdPoolName} CINDER_POOL: {get_param: CinderRbdPoolName} GLANCE_POOL: {get_param: GlanceRbdPoolName} + ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6} nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} cinder_rbd_pool_name: {get_param: CinderRbdPoolName} glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml index 905f196d..9b6981bb 100644 --- a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml +++ b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2015-11-12 +heat_template_version: 2015-10-15 description: Configure hieradata for Cinder Dell Storage Center configuration diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml index c73608f1..36db334e 100644 --- a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml +++ b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2015-11-06 +heat_template_version: 2015-10-15 description: Configure hieradata for Cinder Eqlx configuration diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index 262c7cca..98cec364 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -5,9 +5,6 @@ ssh::server::storeconfigs_enabled: false ceilometer::agent::auth::auth_region: 'regionOne' ceilometer::agent::auth::auth_tenant_name: 'service' -aodh::auth::auth_region: 'regionOne' -aodh::auth::auth_tenant_name: 'service' - nova::api::admin_tenant_name: 'service' nova::network::neutron::neutron_project_name: 'service' nova::network::neutron::neutron_username: 'neutron' @@ -34,6 +31,8 @@ sysctl_settings: value: 0 net.ipv6.conf.default.autoconf: value: 0 + net.core.netdev_max_backlog: + value: 10000 nova::rabbit_heartbeat_timeout_threshold: 60 neutron::rabbit_heartbeat_timeout_threshold: 60 diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index 572eef9f..865210c9 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -7,7 +7,6 @@ nova::compute::instance_usage_audit: true nova::compute::instance_usage_audit_period: 'hour' nova::compute::vnc_enabled: true -nova::compute::libvirt::vncserver_listen: '0.0.0.0' nova::compute::libvirt::migration_support: true nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index f52f1d0c..e80bee07 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -31,7 +31,6 @@ redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh' # service tenant glance::api::keystone_tenant: 'service' -aodh::api::keystone_tenant: 'service' glance::registry::keystone_tenant: 'service' neutron::server::auth_tenant: 'service' neutron::agents::metadata::auth_tenant: 'service' @@ -101,6 +100,7 @@ ceilometer::agent::auth::auth_endpoint_type: 'internalURL' cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler cinder::cron::db_purge::destination: '/dev/null' cinder::host: hostgroup +cinder_user_enabled_backends: [] # heat heat::engine::configure_delegated_roles: false @@ -143,7 +143,6 @@ tripleo::loadbalancer::redis: true tripleo::loadbalancer::sahara: true tripleo::loadbalancer::swift_proxy_server: true tripleo::loadbalancer::ceilometer: true -tripleo::loadbalancer::aodh: true tripleo::loadbalancer::heat_api: true tripleo::loadbalancer::heat_cloudwatch: true tripleo::loadbalancer::heat_cfn: true @@ -256,7 +255,3 @@ tripleo::firewall::firewall_rules: '127 snmp': port: 161 proto: 'udp' - '128 aodh': - port: - - 8042 - - 13042 diff --git a/puppet/hieradata/volume.yaml b/puppet/hieradata/volume.yaml index f4cd78a9..8640c0a7 100644 --- a/puppet/hieradata/volume.yaml +++ b/puppet/hieradata/volume.yaml @@ -9,4 +9,6 @@ cinder::config::cinder_config: DEFAULT/swift_catalog_info: value: 'object-store:swift:internalURL' +cinder_user_enabled_backends: [] + volume_classes: []
\ No newline at end of file diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index 0db5b45a..fd7faff1 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -40,6 +40,14 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) { } -> Class['ceph::profile::osd'] } +if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') +} else { + $mon_host = hiera('ceph_mon_host') +} +class { '::ceph::profile::params': + mon_host => $mon_host, +} include ::ceph::conf include ::ceph::profile::client include ::ceph::profile::osd diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 7925f50a..7c8cda71 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -56,11 +56,20 @@ include ::nova::compute nova_config { 'DEFAULT/my_ip': value => $ipaddress; 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; + 'DEFAULT/host': value => $fqdn; } $rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) $rbd_persistent_storage = hiera('rbd_persistent_storage', false) if $rbd_ephemeral_storage or $rbd_persistent_storage { + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } + class { '::ceph::profile::params': + mon_host => $mon_host, + } include ::ceph::conf include ::ceph::profile::client @@ -82,7 +91,14 @@ if hiera('cinder_enable_nfs_backend', false) { package {'nfs-utils': } -> Service['nova-compute'] } -include ::nova::compute::libvirt +if str2bool(hiera('nova::use_ipv6', false)) { + $vncserver_listen = '::0' +} else { + $vncserver_listen = '0.0.0.0' +} +class { '::nova::compute::libvirt' : + vncserver_listen => $vncserver_listen, +} if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { file {'/etc/libvirt/qemu.conf': ensure => present, @@ -146,6 +162,9 @@ else { } } +neutron_config { + 'DEFAULT/host': value => $fqdn; +} include ::ceilometer include ::ceilometer::config @@ -159,7 +178,7 @@ snmp::snmpv3_user { $snmpd_user: } class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } hiera_include('compute_classes') diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 10a64e45..4a03fefa 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -48,14 +48,24 @@ if hiera('step') >= 2 { include ::mongodb::globals include ::mongodb::server - $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') + # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and + # without the brackets as 'members' argument for the 'mongodb_replset' + # resource. + if str2bool(hiera('mongodb::server::ipv6', false)) { + $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[') + $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') + $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') + } else { + $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') + $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') + } $mongo_node_string = join($mongo_node_ips_with_port, ',') $mongodb_replset = hiera('mongodb::server::replset') $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" if downcase(hiera('bootstrap_nodeid')) == $::hostname { mongodb_replset { $mongodb_replset : - members => $mongo_node_ips_with_port, + members => $mongo_node_ips_with_port_nobr, } } } @@ -113,18 +123,27 @@ if hiera('step') >= 2 { include ::sahara::db::mysql if downcase(hiera('ceilometer_backend')) == 'mysql' { include ::ceilometer::db::mysql - include ::aodh::db::mysql } $rabbit_nodes = hiera('rabbit_node_ips') if count($rabbit_nodes) > 1 { + + $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false)) + if $rabbit_ipv6 { + $rabbit_env = merge(hiera('rabbitmq_environment'), { + 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"' + }) + } else { + $rabbit_env = hiera('rabbitmq_environment') + } + class { '::rabbitmq': config_cluster => true, cluster_nodes => $rabbit_nodes, tcp_keepalive => false, config_kernel_variables => hiera('rabbitmq_kernel_variables'), config_variables => hiera('rabbitmq_config_variables'), - environment_variables => hiera('rabbitmq_environment'), + environment_variables => $rabbit_env, } rabbitmq_policy { 'ha-all@/': pattern => '^(?!amq\.).*', @@ -142,8 +161,15 @@ if hiera('step') >= 2 { $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) if $enable_ceph { + $mon_initial_members = downcase(hiera('ceph_mon_initial_members')) + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } class { '::ceph::profile::params': - mon_initial_members => downcase(hiera('ceph_mon_initial_members')), + mon_initial_members => $mon_initial_members, + mon_host => $mon_host, } include ::ceph::conf include ::ceph::profile::mon @@ -169,6 +195,14 @@ if hiera('step') >= 2 { } if str2bool(hiera('enable_external_ceph', false)) { + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } + class { '::ceph::profile::params': + mon_host => $mon_host, + } include ::ceph::conf include ::ceph::profile::client } @@ -230,11 +264,8 @@ if hiera('step') >= 3 { known_stores => $glance_store, } include ::glance::registry + include ::glance::notify::rabbitmq include join(['::glance::backend::', $glance_backend]) - $rabbit_port = hiera('rabbitmq::port') - class { '::glance::notify::rabbitmq': - rabbit_hosts => suffix(hiera('rabbit_node_ips'), ":${rabbit_port}"), - } class { '::nova' : memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'), @@ -513,7 +544,7 @@ if hiera('step') >= 3 { $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend]) class { '::cinder::backends' : - enabled_backends => $cinder_enabled_backends, + enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), } # swift proxy @@ -572,21 +603,6 @@ if hiera('step') >= 3 { Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } - # Aodh - class { '::aodh' : - database_connection => $ceilometer_database_connection, - } - include ::aodh::db::sync - # To manage the upgrade: - Exec['ceilometer-dbsync'] -> Exec['aodh-db-sync'] - include ::aodh::auth - include ::aodh::api - include ::aodh::wsgi::apache - include ::aodh::evaluator - include ::aodh::notifier - include ::aodh::listener - include ::aodh::client - # Heat class { '::heat' : notification_driver => 'messaging', @@ -622,7 +638,7 @@ if hiera('step') >= 3 { } class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } hiera_include('controller_classes') diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index fb36893d..7637029c 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -62,6 +62,12 @@ if hiera('step') >= 1 { } $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) + $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false)) + if $corosync_ipv6 { + $cluster_setup_extras = { '--ipv6' => '' } + } else { + $cluster_setup_extras = {} + } user { 'hacluster': ensure => present, } -> @@ -69,8 +75,9 @@ if hiera('step') >= 1 { hacluster_pwd => hiera('hacluster_pwd'), } -> class { '::pacemaker::corosync': - cluster_members => $pacemaker_cluster_members, - setup_cluster => $pacemaker_master, + cluster_members => $pacemaker_cluster_members, + setup_cluster => $pacemaker_master, + cluster_setup_extras => $cluster_setup_extras, } class { '::pacemaker::stonith': disable => !$enable_fencing, @@ -93,12 +100,21 @@ if hiera('step') >= 1 { # avoid races where non-master nodes attempt to start without # config (eg. binding on 0.0.0.0) # The module ignores erlang_cookie if cluster_config is false + $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false)) + if $rabbit_ipv6 { + $rabbit_env = merge(hiera('rabbitmq_environment'), { + 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"' + }) + } else { + $rabbit_env = hiera('rabbitmq_environment') + } + class { '::rabbitmq': service_manage => false, tcp_keepalive => false, config_kernel_variables => hiera('rabbitmq_kernel_variables'), config_variables => hiera('rabbitmq_config_variables'), - environment_variables => hiera('rabbitmq_environment'), + environment_variables => $rabbit_env, } -> file { '/var/lib/rabbitmq/.erlang.cookie': ensure => file, @@ -185,8 +201,19 @@ if hiera('step') >= 1 { if hiera('step') >= 2 { # NOTE(gfidente): the following vars are needed on all nodes so they - # need to stay out of pacemaker_master conditional - $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') + # need to stay out of pacemaker_master conditional. + # The addresses mangling will hopefully go away when we'll be able to + # configure the connection string via hostnames, until then, we need to pass + # the list of IPv6 addresses *with* port and without the brackets as 'members' + # argument for the 'mongodb_replset' resource. + if str2bool(hiera('mongodb::server::ipv6', false)) { + $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[') + $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') + $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') + } else { + $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') + $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') + } $mongodb_replset = hiera('mongodb::server::replset') if $pacemaker_master { @@ -195,6 +222,11 @@ if hiera('step') >= 2 { include ::pacemaker::resource_defaults + # Create an openstack-core dummy resource. See RHBZ 1290121 + pacemaker::resource::ocf { 'openstack-core': + ocf_agent_name => 'heartbeat:Dummy', + clone_params => true, + } # FIXME: we should not have to access tripleo::loadbalancer class # parameters here to configure pacemaker VIPs. The configuration # of pacemaker VIPs could move into puppet-tripleo or we should @@ -204,8 +236,14 @@ if hiera('step') >= 2 { } $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') + if is_ipv6_address($control_vip) { + $control_vip_netmask = '64' + } else { + $control_vip_netmask = '32' + } pacemaker::resource::ip { 'control_vip': - ip_address => $control_vip, + ip_address => $control_vip, + cidr_netmask => $control_vip_netmask, } pacemaker::constraint::base { 'control_vip-then-haproxy': constraint_type => 'order', @@ -226,9 +264,15 @@ if hiera('step') >= 2 { } $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip') + if is_ipv6_address($public_vip) { + $public_vip_netmask = '64' + } else { + $public_vip_netmask = '32' + } if $public_vip and $public_vip != $control_vip { pacemaker::resource::ip { 'public_vip': - ip_address => $public_vip, + ip_address => $public_vip, + cidr_netmask => $public_vip_netmask, } pacemaker::constraint::base { 'public_vip-then-haproxy': constraint_type => 'order', @@ -250,9 +294,15 @@ if hiera('step') >= 2 { } $redis_vip = hiera('redis_vip') + if is_ipv6_address($redis_vip) { + $redis_vip_netmask = '64' + } else { + $redis_vip_netmask = '32' + } if $redis_vip and $redis_vip != $control_vip { pacemaker::resource::ip { 'redis_vip': - ip_address => $redis_vip, + ip_address => $redis_vip, + cidr_netmask => $redis_vip_netmask, } pacemaker::constraint::base { 'redis_vip-then-haproxy': constraint_type => 'order', @@ -274,9 +324,15 @@ if hiera('step') >= 2 { } $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') + if is_ipv6_address($internal_api_vip) { + $internal_api_vip_netmask = '64' + } else { + $internal_api_vip_netmask = '32' + } if $internal_api_vip and $internal_api_vip != $control_vip { pacemaker::resource::ip { 'internal_api_vip': - ip_address => $internal_api_vip, + ip_address => $internal_api_vip, + cidr_netmask => $internal_api_vip_netmask, } pacemaker::constraint::base { 'internal_api_vip-then-haproxy': constraint_type => 'order', @@ -298,9 +354,15 @@ if hiera('step') >= 2 { } $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') + if is_ipv6_address($storage_vip) { + $storage_vip_netmask = '64' + } else { + $storage_vip_netmask = '32' + } if $storage_vip and $storage_vip != $control_vip { pacemaker::resource::ip { 'storage_vip': - ip_address => $storage_vip, + ip_address => $storage_vip, + cidr_netmask => $storage_vip_netmask, } pacemaker::constraint::base { 'storage_vip-then-haproxy': constraint_type => 'order', @@ -322,9 +384,15 @@ if hiera('step') >= 2 { } $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') + if is_ipv6_address($storage_mgmt_vip) { + $storage_mgmt_vip_netmask = '64' + } else { + $storage_mgmt_vip_netmask = '32' + } if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip { pacemaker::resource::ip { 'storage_mgmt_vip': - ip_address => $storage_mgmt_vip, + ip_address => $storage_mgmt_vip, + cidr_netmask => $storage_mgmt_vip_netmask, } pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy': constraint_type => 'order', @@ -374,7 +442,7 @@ if hiera('step') >= 2 { before => Mongodb_replset[$mongodb_replset], } mongodb_replset { $mongodb_replset : - members => $mongo_node_ips_with_port, + members => $mongo_node_ips_with_port_nobr, } } @@ -469,8 +537,15 @@ MYSQL_HOST=localhost\n", $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) if $enable_ceph { + $mon_initial_members = downcase(hiera('ceph_mon_initial_members')) + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } class { '::ceph::profile::params': - mon_initial_members => downcase(hiera('ceph_mon_initial_members')), + mon_initial_members => $mon_initial_members, + mon_host => $mon_host, } include ::ceph::conf include ::ceph::profile::mon @@ -496,6 +571,14 @@ MYSQL_HOST=localhost\n", } if str2bool(hiera('enable_external_ceph', false)) { + if str2bool(hiera('ceph_ipv6', false)) { + $mon_host = hiera('ceph_mon_host_v6') + } else { + $mon_host = hiera('ceph_mon_host') + } + class { '::ceph::profile::params': + mon_host => $mon_host, + } include ::ceph::conf include ::ceph::profile::client } @@ -577,14 +660,18 @@ if hiera('step') >= 3 { manage_service => false, enabled => false, } + include ::glance::notify::rabbitmq include join(['::glance::backend::', $glance_backend]) - $rabbit_port = hiera('rabbitmq::port') - class { '::glance::notify::rabbitmq': - rabbit_hosts => suffix(hiera('rabbit_node_ips'), ":${rabbit_port}"), + + $nova_ipv6 = hiera('nova::use_ipv6', false) + if $nova_ipv6 { + $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211') + } else { + $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211') } class { '::nova' : - memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'), + memcached_servers => $memcached_servers } include ::nova::config @@ -898,7 +985,7 @@ if hiera('step') >= 3 { $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend]) class { '::cinder::backends' : - enabled_backends => $cinder_enabled_backends, + enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), } class { '::sahara': @@ -1038,32 +1125,6 @@ if hiera('step') >= 3 { neutron_options => $neutron_options, } - # Aodh - class { '::aodh' : - database_connection => $ceilometer_database_connection, - } - include ::aodh::config - include ::aodh::auth - include ::aodh::client - include ::aodh::wsgi::apache - class { '::aodh::api': - manage_service => false, - enabled => false, - service_name => 'httpd', - } - class { '::aodh::evaluator': - manage_service => false, - enabled => false, - } - class { '::aodh::notifier': - manage_service => false, - enabled => false, - } - class { '::aodh::listener': - manage_service => false, - enabled => false, - } - $snmpd_user = hiera('snmpd_readonly_user_name') snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', @@ -1071,7 +1132,7 @@ if hiera('step') >= 3 { } class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } hiera_include('controller_classes') @@ -1103,45 +1164,55 @@ if hiera('step') >= 4 { pacemaker::constraint::base { 'haproxy-then-keystone-constraint': constraint_type => 'order', first_resource => 'haproxy-clone', - second_resource => "${::apache::params::service_name}-clone", + second_resource => 'openstack-core-clone', first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service['haproxy'], - Pacemaker::Resource::Service[$::apache::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], } } + + pacemaker::constraint::base { 'openstack-core-then-httpd-constraint': + constraint_type => 'order', + first_resource => 'openstack-core-clone', + second_resource => "${::apache::params::service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::apache::params::service_name], + Pacemaker::Resource::Ocf['openstack-core']], + } pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint': constraint_type => 'order', first_resource => 'rabbitmq-clone', - second_resource => "${::apache::params::service_name}-clone", + second_resource => 'openstack-core-clone', first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Ocf['rabbitmq'], - Pacemaker::Resource::Service[$::apache::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], } - pacemaker::constraint::base { 'memcached-then-keystone-constraint': + pacemaker::constraint::base { 'memcached-then-openstack-core-constraint': constraint_type => 'order', first_resource => 'memcached-clone', - second_resource => "${::apache::params::service_name}-clone", + second_resource => 'openstack-core-clone', first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service['memcached'], - Pacemaker::Resource::Service[$::apache::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], } - pacemaker::constraint::base { 'galera-then-keystone-constraint': + pacemaker::constraint::base { 'galera-then-openstack-core-constraint': constraint_type => 'order', first_resource => 'galera-master', - second_resource => "${::apache::params::service_name}-clone", + second_resource => 'openstack-core-clone', first_action => 'promote', second_action => 'start', require => [Pacemaker::Resource::Ocf['galera'], - Pacemaker::Resource::Service[$::apache::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], } # Cinder pacemaker::resource::service { $::cinder::params::api_service : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::apache::params::service_name], + require => Pacemaker::Resource::Ocf['openstack-core'], } pacemaker::resource::service { $::cinder::params::scheduler_service : clone_params => 'interleave=true', @@ -1150,12 +1221,12 @@ if hiera('step') >= 4 { pacemaker::constraint::base { 'keystone-then-cinder-api-constraint': constraint_type => 'order', - first_resource => "${::apache::params::service_name}-clone", + first_resource => 'openstack-core-clone', second_resource => "${::cinder::params::api_service}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::apache::params::service_name]], + require => [Pacemaker::Resource::Ocf['openstack-core'], + Pacemaker::Resource::Service[$::cinder::params::api_service]], } pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint': constraint_type => 'order', @@ -1193,25 +1264,25 @@ if hiera('step') >= 4 { # Sahara pacemaker::resource::service { $::sahara::params::api_service_name : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::apache::params::service_name], + require => Pacemaker::Resource::Ocf['openstack-core'], } pacemaker::resource::service { $::sahara::params::engine_service_name : clone_params => 'interleave=true', } pacemaker::constraint::base { 'keystone-then-sahara-api-constraint': constraint_type => 'order', - first_resource => "${::apache::params::service_name}-clone", + first_resource => 'openstack-core-clone', second_resource => "${::sahara::params::api_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name], - Pacemaker::Resource::Service[$::apache::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], } # Glance pacemaker::resource::service { $::glance::params::registry_service_name : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::apache::params::service_name], + require => Pacemaker::Resource::Ocf['openstack-core'], } pacemaker::resource::service { $::glance::params::api_service_name : clone_params => 'interleave=true', @@ -1219,12 +1290,12 @@ if hiera('step') >= 4 { pacemaker::constraint::base { 'keystone-then-glance-registry-constraint': constraint_type => 'order', - first_resource => "${::apache::params::service_name}-clone", + first_resource => 'openstack-core-clone', second_resource => "${::glance::params::registry_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], - Pacemaker::Resource::Service[$::apache::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], } pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint': constraint_type => 'order', @@ -1262,12 +1333,12 @@ if hiera('step') >= 4 { } -> pacemaker::resource::service { $::neutron::params::server_service: clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::apache::params::service_name] + require => Pacemaker::Resource::Ocf['openstack-core'] } } else { pacemaker::resource::service { $::neutron::params::server_service: clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::apache::params::service_name] + require => Pacemaker::Resource::Ocf['openstack-core'] } } if hiera('neutron::enable_l3_agent', true) { @@ -1340,14 +1411,14 @@ if hiera('step') >= 4 { } } pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => 'order', - first_resource => "${::apache::params::service_name}-clone", - second_resource => "${::neutron::params::server_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::apache::params::service_name], - Pacemaker::Resource::Service[$::neutron::params::server_service]], - } + constraint_type => 'order', + first_resource => 'openstack-core-clone', + second_resource => "${::neutron::params::server_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Ocf['openstack-core'], + Pacemaker::Resource::Service[$::neutron::params::server_service]], + } if hiera('neutron::enable_ovs_agent',true) { pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': constraint_type => 'order', @@ -1461,7 +1532,7 @@ if hiera('step') >= 4 { } pacemaker::resource::service { $::nova::params::consoleauth_service_name : clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::apache::params::service_name], + require => Pacemaker::Resource::Ocf['openstack-core'], } pacemaker::resource::service { $::nova::params::vncproxy_service_name : clone_params => 'interleave=true', @@ -1472,12 +1543,12 @@ if hiera('step') >= 4 { pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': constraint_type => 'order', - first_resource => "${::apache::params::service_name}-clone", + first_resource => 'openstack-core-clone', second_resource => "${::nova::params::consoleauth_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::apache::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], } pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': constraint_type => 'order', @@ -1544,19 +1615,19 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], } - # Ceilometer and Aodh + # Ceilometer case downcase(hiera('ceilometer_backend')) { /mysql/: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : + pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::apache::params::service_name], + require => Pacemaker::Resource::Ocf['openstack-core'], } } default: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : + pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: clone_params => 'interleave=true', - require => [Pacemaker::Resource::Service[$::apache::params::service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], + require => [Pacemaker::Resource::Ocf['openstack-core'], + Pacemaker::Resource::Service[$::mongodb::params::service_name]], } } } @@ -1577,10 +1648,8 @@ if hiera('step') >= 4 { # Fedora doesn't know `require-all` parameter for constraints yet if $::operatingsystem == 'Fedora' { $redis_ceilometer_constraint_params = undef - $redis_aodh_constraint_params = undef } else { $redis_ceilometer_constraint_params = 'require-all=false' - $redis_aodh_constraint_params = 'require-all=false' } pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint': constraint_type => 'order', @@ -1592,24 +1661,14 @@ if hiera('step') >= 4 { require => [Pacemaker::Resource::Ocf['redis'], Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]], } - pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint': - constraint_type => 'order', - first_resource => 'redis-master', - second_resource => "${::aodh::params::evaluator_service_name}-clone", - first_action => 'promote', - second_action => 'start', - constraint_params => $redis_aodh_constraint_params, - require => [Pacemaker::Resource::Ocf['redis'], - Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]], - } pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint': constraint_type => 'order', - first_resource => "${::apache::params::service_name}-clone", + first_resource => 'openstack-core-clone', second_resource => "${::ceilometer::params::agent_central_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::apache::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], } pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint': constraint_type => 'order', @@ -1652,60 +1711,6 @@ if hiera('step') >= 4 { require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], Pacemaker::Resource::Ocf['delay']], } - # Aodh - pacemaker::resource::service { $::aodh::params::api_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::aodh::params::evaluator_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::aodh::params::notifier_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::aodh::params::listener_service_name : - clone_params => 'interleave=true', - } - pacemaker::constraint::base { 'keystone-then-aodh-api-constraint': - constraint_type => 'order', - first_resource => "${::apache::params::service_name}-clone", - second_resource => "${::aodh::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::aodh::params::api_service_name], - Pacemaker::Resource::Service[$::apache::params::service_name]], - } - pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint': - constraint_type => 'order', - first_resource => 'delay-clone', - second_resource => "${::aodh::params::evaluator_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation': - source => "${::aodh::params::evaluator_service_name}-clone", - target => 'delay-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::horizon::params::http_service], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint': - constraint_type => 'order', - first_resource => "${::aodh::params::evaluator_service_name}-clone", - second_resource => "${::aodh::params::notifier_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]], - } - pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation': - source => "${::aodh::params::notifier_service_name}-clone", - target => "${::aodh::params::evaluator_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], - Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]], - } if downcase(hiera('ceilometer_backend')) == 'mongodb' { pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': constraint_type => 'order', @@ -1733,12 +1738,12 @@ if hiera('step') >= 4 { } pacemaker::constraint::base { 'keystone-then-heat-api-constraint': constraint_type => 'order', - first_resource => "${::apache::params::service_name}-clone", + first_resource => 'openstack-core-clone', second_resource => "${::heat::params::api_service_name}-clone", first_action => 'start', second_action => 'start', require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::apache::params::service_name]], + Pacemaker::Resource::Ocf['openstack-core']], } pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint': constraint_type => 'order', diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 1ac66904..ae074589 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -50,7 +50,7 @@ snmp::snmpv3_user { $snmpd_user: } class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } hiera_include('object_classes') diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 72cd36c3..134dc43b 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -44,7 +44,7 @@ if $cinder_enable_iscsi { $cinder_enabled_backends = any2array($cinder_iscsi_backend) class { '::cinder::backends' : - enabled_backends => $cinder_enabled_backends, + enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), } $snmpd_user = hiera('snmpd_readonly_user_name') @@ -54,7 +54,7 @@ snmp::snmpv3_user { $snmpd_user: } class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], + snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } hiera_include('volume_classes') diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index e77a25f8..c26aca77 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -83,6 +83,9 @@ parameters: description: | Role specific additional hiera configuration to inject into the cluster. type: json + SwiftStorageIPs: + default: {} + type: json NetworkDeploymentActions: type: comma_delimited_list description: > @@ -111,6 +114,9 @@ parameters: type: json description: Optional scheduler hints to pass to nova default: {} + NodeIndex: + type: number + default: 0 resources: @@ -156,31 +162,43 @@ resources: type: OS::TripleO::SwiftStorage::Ports::ExternalPort properties: ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + IPPool: {get_param: SwiftStorageIPs} + NodeIndex: {get_param: NodeIndex} InternalApiPort: type: OS::TripleO::SwiftStorage::Ports::InternalApiPort properties: ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + IPPool: {get_param: SwiftStorageIPs} + NodeIndex: {get_param: NodeIndex} StoragePort: type: OS::TripleO::SwiftStorage::Ports::StoragePort properties: ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + IPPool: {get_param: SwiftStorageIPs} + NodeIndex: {get_param: NodeIndex} StorageMgmtPort: type: OS::TripleO::SwiftStorage::Ports::StorageMgmtPort properties: ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + IPPool: {get_param: SwiftStorageIPs} + NodeIndex: {get_param: NodeIndex} TenantPort: type: OS::TripleO::SwiftStorage::Ports::TenantPort properties: ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + IPPool: {get_param: SwiftStorageIPs} + NodeIndex: {get_param: NodeIndex} ManagementPort: type: OS::TripleO::SwiftStorage::Ports::ManagementPort properties: ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + IPPool: {get_param: SwiftStorageIPs} + NodeIndex: {get_param: NodeIndex} NetworkConfig: type: OS::TripleO::ObjectStorage::Net::SoftwareConfig @@ -198,11 +216,17 @@ resources: properties: ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} + ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} + InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageIpUri: {get_attr: [StoragePort, ip_address_uri]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]} TenantIp: {get_attr: [TenantPort, ip_address]} + TenantIpUri: {get_attr: [TenantPort, ip_address_uri]} ManagementIp: {get_attr: [ManagementPort, ip_address]} + ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -367,7 +391,7 @@ outputs: str_replace: template: 'r1z1-IP:%PORT%/d1' params: - IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} + IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} external_ip_address: description: IP address of the server in the external network value: {get_attr: [ExternalPort, ip_address]} diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml index ebecd0cb..5e2f698f 100644 --- a/puppet/vip-config.yaml +++ b/puppet/vip-config.yaml @@ -26,7 +26,6 @@ resources: nova_api_vip: {get_input: nova_api_vip} nova_metadata_vip: {get_input: nova_metadata_vip} ceilometer_api_vip: {get_input: ceilometer_api_vip} - aodh_api_vip: {get_input: aodh_api_vip} heat_api_vip: {get_input: heat_api_vip} horizon_vip: {get_input: horizon_vip} redis_vip: {get_input: redis_vip} |