diff options
80 files changed, 2253 insertions, 429 deletions
diff --git a/capabilities_map.yaml b/capabilities_map.yaml new file mode 100644 index 00000000..30ee211e --- /dev/null +++ b/capabilities_map.yaml @@ -0,0 +1,226 @@ +# This file holds metadata about the capabilities of the tripleo-heat-templates +# repository for deployment using puppet. It groups configuration by topic, +# describes possible combinations of environments and resource capabilities. + +# root_template: identifies repository's root template +# root_environment: identifies root_environment, this one is special in terms of +# order in which the environments are merged before deploying. This one serves as +# a base and it's parameters/resource_registry gets overriden by other environments +# if used. + +# topics: +# High Level grouping by purpose of environments +# Attributes: +# title: (required) +# description: (optional) +# environment_groups: (required) + +# environment_groups: +# Identifies an environment choice. If group includes multiple environments it +# indicates that environments in group are mutually exclusive. +# Attributes: +# title: (optional) +# description: (optional) +# tags: a list of tags to provide aditional information for e.g. filtering (optional) +# environments: (required) + +# environments: +# List of environments in environment group +# Attributes: +# file: a file name including path within repository (required) +# title: (required) +# description: (optional) +# requires: an array of environments which are required by this environment (optional) +# resource_registry: [tbd] (optional) + +# resource_registry: +# [tbd] Each environment can provide options on resource_registry level applicable +# only when that given environment is used. (resource_type of that environment can +# be implemented using multiple templates). + +root_template: overcloud.yaml +root_environment: overcloud-resource-registry-puppet.yaml +topics: + - title: Basic Configuration + description: + environment_groups: + - title: + description: Enable basic configuration required for OpenStack Deployment + environments: + - file: overcloud-resource-registry-puppet.yaml + title: Default Configuration + description: + + - title: Deployment options + description: + environment_groups: + - title: High Availability + description: Enables configuration of an Overcloud controller with Pacemaker + environments: + - file: environments/puppet-pacemaker.yaml + title: Pacemaker + description: Enable configuration of an Overcloud controller with Pacemaker + requires: + - overcloud-resource-registry-puppet.yaml + - title: Docker RDO + description: > + Docker container with heat agents for containerized compute node + environments: + - file: environments/docker-rdo.yaml + title: Docker RDO + description: + requires: + - overcloud-resource-registry-puppet.yaml + + # - title: Network Interface Configuration + # description: + # environment_groups: + + - title: Overlay network Configuration + description: + environment_groups: + - title: Network Isolation + description: > + Enable the creation of Neutron networks for + isolated Overcloud traffic and configure each role to assign ports + (related to that role) on these networks. + environments: + - file: environments/network-isolation.yaml + title: Network Isolation + description: Enable Network Isolation + requires: + - overcloud-resource-registry-puppet.yaml + - title: Single nic or Bonding + description: > + Configure roles to use pair of bonded nics or to use Vlans on a + single nic. This option assumes use of Network Isolation. + environments: + - file: environments/net-bond-with-vlans.yaml + title: Bond with Vlans + description: > + Configure each role to use a pair of bonded nics (nic2 and + nic3) and configures an IP address on each relevant isolated network + for each role. This option assumes use of Network Isolation. + requires: + - environments/network-isolation.yaml + - overcloud-resource-registry-puppet.yaml + - file: environments/net-single-nic-with-vlans.yaml + title: Single nic with Vlans + description: > + Configure each role to use Vlans on a single nic for + each isolated network. This option assumes use of Network Isolation. + requires: + - environments/network-isolation.yaml + - overcloud-resource-registry-puppet.yaml + + - title: Neutron Plugin Configuration + description: + environment_groups: + - title: BigSwitch extensions or Cisco N1KV backend + description: + environments: + - file: environments/neutron-ml2-bigswitch.yaml + title: BigSwitch extensions + description: > + Enable Big Switch extensions, configured via puppet + requires: + - overcloud-resource-registry-puppet.yaml + - file: environments/neutron-ml2-cisco-n1kv.yaml + title: Cisco N1KV backend + description: > + Enable a Cisco N1KV backend, configured via puppet + requires: + - overcloud-resource-registry-puppet.yaml + - title: Cisco Neutron plugin + description: > + Enable a Cisco Neutron plugin + environments: + - file: environments/neutron-ml2-cisco-nexus-ucsm.yaml + title: Cisco Neutron plugin + description: + requires: + - overcloud-resource-registry-puppet.yaml + + - title: Storage + description: + environment_groups: + - title: Cinder NetApp backend + description: > + Enable a Cinder NetApp backend, configured via puppet + environments: + - file: environments/cinder-netapp-config.yaml + title: Cinder NetApp backend + description: + requires: + - overcloud-resource-registry-puppet.yaml + - title: Externally managed Ceph + description: > + Enable the use of an externally managed Ceph cluster + environments: + - file: environments/puppet-ceph-external.yaml + title: Externally managed Ceph + description: + requires: + - overcloud-resource-registry-puppet.yaml + - title: Ceph Devel + description: > + Enable a Ceph storage cluster using the controller and 2 ceph nodes. + Rbd backends are enabled for Cinder, Glance, and Nova. + environments: + - file: environments/puppet-ceph-devel.yaml + title: Ceph Devel + description: + requires: + - overcloud-resource-registry-puppet.yaml + - title: Storage Environment + description: > + Can be used to set up storage backends. Defaults to Ceph used as a + backend for Cinder, Glance and Nova ephemeral storage. It configures + for example which services will use Ceph, or if any of the services + will use NFS. And more. Usually requires to be edited by user first. + tags: + - no-gui + environments: + - file: environments/storage-environment.yaml + title: Storage Environment + description: + requires: + - overcloud-resource-registry-puppet.yaml + + - title: Utilities + description: + environment_groups: + - title: Config Debug + description: Enable config management (e.g. Puppet) debugging + environments: + - file: environments/config-debug.yaml + title: Config Debug + description: + requires: + - overcloud-resource-registry-puppet.yaml + - title: Disable journal in MongoDb + description: > + Since, when journaling is enabled, MongoDb will create big journal + file it can take time. In a CI environment for example journaling is + not necessary. + environments: + - file: environments/mongodb-nojournal.yaml + title: Disable journal in MongoDb + description: + requires: + - overcloud-resource-registry-puppet.yaml + - title: Overcloud Steps + description: > + Specifies hooks/breakpoints where overcloud deployment should stop + Allows operator validation between steps, and/or more granular control. + Note: the wildcards relate to naming convention for some resource suffixes, + e.g see puppet/*-post.yaml, enabling this will mean we wait for + a user signal on every *Deployment_StepN resource defined in those files. + tags: + - no-gui + environments: + - file: environments/overcloud-steps.yaml + title: Overcloud Steps + description: + requires: + - overcloud-resource-registry-puppet.yaml diff --git a/environments/external-loadbalancer-vip.yaml b/environments/external-loadbalancer-vip.yaml index 47d5bd9b..1cf59825 100644 --- a/environments/external-loadbalancer-vip.yaml +++ b/environments/external-loadbalancer-vip.yaml @@ -1,14 +1,37 @@ resource_registry: OS::TripleO::Network::Ports::NetVipMap: ../network/ports/net_vip_map_external.yaml + OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/noop.yaml + OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/noop.yaml + OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/noop.yaml + OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/noop.yaml + OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/from_service.yaml + OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml + OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml + OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml + OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml + OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml parameter_defaults: # When using an external loadbalancer set the following in parameter_defaults # to control your VIPs (currently one per network) # NOTE: we will eventually move to one VIP per service # - # ControlNetworkVip: - # ExternalNetworkVip: - # InternalApiNetworkVip: - # StorageNetworkVip: - # StorageMgmtNetworkVip: - EnableLoadBalancer: false
\ No newline at end of file + ControlPlaneIP: 192.0.2.251 + ExternalNetworkVip: 10.0.0.251 + InternalApiNetworkVip: 172.16.2.251 + StorageNetworkVip: 172.16.1.251 + StorageMgmtNetworkVip: 172.16.3.251 + ServiceVips: + redis: 172.16.2.252 + ControllerIPs: + external: + - 10.0.0.253 + internal_api: + - 172.16.2.253 + storage: + - 172.16.1.253 + storage_mgmt: + - 172.16.3.253 + tenant: + - 172.16.0.253 + EnableLoadBalancer: false diff --git a/environments/ips-from-pool.yaml b/environments/ips-from-pool.yaml new file mode 100644 index 00000000..8c27fe4e --- /dev/null +++ b/environments/ips-from-pool.yaml @@ -0,0 +1,20 @@ +resource_registry: + OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml + OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml + OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml + OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml + OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml + +parameter_defaults: + ControllerIPs: + # Each controller will get an IP from the lists below, first controller, first IP + external: + - 10.0.0.251 + internal_api: + - 172.16.2.251 + storage: + - 172.16.1.251 + storage_mgmt: + - 172.16.3.251 + tenant: + - 172.16.0.251 diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml index 937931d1..87fc22f5 100644 --- a/environments/network-isolation.yaml +++ b/environments/network-isolation.yaml @@ -1,12 +1,23 @@ # Enable the creation of Neutron networks for isolated Overcloud # traffic and configure each role to assign ports (related # to that role) on these networks. +# Many networks are disabled by default because they are not used +# in a typical configuration. Override via parameter_defaults. resource_registry: OS::TripleO::Network::External: ../network/external.yaml OS::TripleO::Network::InternalApi: ../network/internal_api.yaml OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml OS::TripleO::Network::Storage: ../network/storage.yaml OS::TripleO::Network::Tenant: ../network/tenant.yaml + # Management network is optional and disabled by default + OS::TripleO::Network::Management: ../network/noop.yaml + + # Port assignments for the VIPs + OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml + OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml + OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml + OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml + OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml # Port assignments for the controller role OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml @@ -14,25 +25,39 @@ resource_registry: OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml + OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/noop.yaml # Port assignments for the compute role + OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml + OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml + OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/noop.yaml # Port assignments for the ceph storage role + OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml + OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml + OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml + OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/noop.yaml # Port assignments for the swift storage role + OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml + OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml + OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/noop.yaml # Port assignments for the block storage role + OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml + OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml + OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/noop.yaml # Port assignments for service virtual IPs for the controller role OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml diff --git a/environments/network-management.yaml b/environments/network-management.yaml new file mode 100644 index 00000000..2f0cff8b --- /dev/null +++ b/environments/network-management.yaml @@ -0,0 +1,24 @@ +# Enable the creation of a system management network. This +# creates a Neutron network for isolated Overcloud +# system management traffic and configures each role to +# assign a port (related to that role) on that network. +# Note that the basic sample NIC configuration templates +# do not include the management network, see the +# single-nic-vlans-mgmt templates for an example. +resource_registry: + OS::TripleO::Network::Management: ../network/management.yaml + + # Port assignments for the controller role + OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml + + # Port assignments for the compute role + OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml + + # Port assignments for the ceph storage role + OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml + + # Port assignments for the swift storage role + OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml + + # Port assignments for the block storage role + OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml diff --git a/environments/neutron-midonet.yaml b/environments/neutron-midonet.yaml new file mode 100644 index 00000000..726852a0 --- /dev/null +++ b/environments/neutron-midonet.yaml @@ -0,0 +1,20 @@ +# A Heat environment that can be used to deploy MidoNet Services +resource_registry: + OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml + OS::TripleO::Controller::Net::SoftwareConfig: ../net-config-linux-bridge.yaml # We have to avoid any ovs bridge. MidoNet is incompatible with its datapath + +parameter_defaults: + EnableZookeeperOnController: true + EnableCassandraOnController: true + NeutronCorePlugin: 'midonet.neutron.plugin_v1.MidonetPluginV2' # Overriding default core_plugin in Neutron. Don't touch it + NeutronEnableIsolatedMetadata: true # MidoNet 1.9 needs this one to work. Don't change it + NeutronEnableL3Agent: false + NeutronEnableOVSAgent: false + + # Other available options for MidoNet Services + # TunnelZoneName: 'tunnelname' + # TunnelZoneType: 'gre' + # CassandraStoragePort: 7000 + # CassandraSslStoragePort: 7009 + # CassandraClientPort: 9042 + # CassandraClientPortThrift: 9160 diff --git a/environments/neutron-nuage-config.yaml b/environments/neutron-nuage-config.yaml index 4ba8d9cb..50ba8f53 100644 --- a/environments/neutron-nuage-config.yaml +++ b/environments/neutron-nuage-config.yaml @@ -12,3 +12,4 @@ parameter_defaults: NeutronNuageVSDOrganization: 'organization' NeutronNuageBaseURIVersion: 'default_uri_version' NeutronNuageCMSId: '' + UseForwardedFor: true diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml index f235cf8f..8986e35f 100644 --- a/environments/puppet-pacemaker.yaml +++ b/environments/puppet-pacemaker.yaml @@ -2,3 +2,5 @@ # Overcloud controller with Pacemaker. resource_registry: OS::TripleO::ControllerConfig: ../puppet/controller-config-pacemaker.yaml + OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml diff --git a/extraconfig/tasks/noop.yaml b/extraconfig/tasks/noop.yaml new file mode 100644 index 00000000..0cff7469 --- /dev/null +++ b/extraconfig/tasks/noop.yaml @@ -0,0 +1,10 @@ +heat_template_version: 2014-10-16 +description: 'No-op task' + +parameters: + servers: + type: json + input_values: + type: json + default: {} + description: input values for the software deployments diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh new file mode 100755 index 00000000..4e8b20fd --- /dev/null +++ b/extraconfig/tasks/pacemaker_resource_restart.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +set -eux + +pacemaker_status=$(systemctl is-active pacemaker) +check_interval=3 + +function check_resource { + + service=$1 + state=$2 + timeout=$3 + tstart=$(date +%s) + tend=$(( $tstart + $timeout )) + + if [ "$state" = "stopped" ]; then + match_for_incomplete='Started' + else # started + match_for_incomplete='Stopped' + fi + + while (( $(date +%s) < $tend )); do + node_states=$(pcs status --full | grep "$service" | grep -v Clone) + if echo "$node_states" | grep -q "$match_for_incomplete"; then + echo "$service not yet $state, sleeping $check_interval seconds." + sleep $check_interval + else + echo "$service has $state" + return + fi + done + + echo "$service never $state after $timeout seconds" | tee /dev/fd/2 + exit 1 + +} + +# Run if pacemaker is running, we're the bootstrap node, +# and we're updating the deployment (not creating). +if [ "$pacemaker_status" = "active" -a \ + "$(hiera bootstrap_nodeid)" = "$(facter hostname)" -a \ + "$(hiera update_identifier)" != "nil" ]; then + + #ensure neutron constraints like + #https://review.openstack.org/#/c/245093/ + if pcs constraint order show | grep "start neutron-server-clone then start neutron-ovs-cleanup-clone"; then + pcs constraint remove order-neutron-server-clone-neutron-ovs-cleanup-clone-mandatory + fi + + pcs resource disable httpd + check_resource httpd stopped 300 + pcs resource disable openstack-keystone + check_resource openstack-keystone stopped 1200 + + if pcs status | grep haproxy-clone; then + pcs resource restart haproxy-clone + fi + pcs resource restart redis-master + pcs resource restart mongod-clone + pcs resource restart rabbitmq-clone + pcs resource restart memcached-clone + pcs resource restart galera-master + + pcs resource enable openstack-keystone + check_resource openstack-keystone started 300 + pcs resource enable httpd + check_resource httpd started 800 + +fi diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml new file mode 100644 index 00000000..7de41d94 --- /dev/null +++ b/extraconfig/tasks/post_puppet_pacemaker.yaml @@ -0,0 +1,44 @@ +heat_template_version: 2014-10-16 +description: 'Post-Puppet Config for Pacemaker deployments' + +parameters: + servers: + type: json + input_values: + type: json + description: input values for the software deployments + +resources: + + ControllerPostPuppetMaintenanceModeConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: | + #!/bin/bash + pacemaker_status=$(systemctl is-active pacemaker) + + if [ "$pacemaker_status" = "active" ]; then + pcs property set maintenance-mode=false + fi + + ControllerPostPuppetMaintenanceModeDeployment: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: servers} + config: {get_resource: ControllerPostPuppetMaintenanceModeConfig} + input_values: {get_param: input_values} + + ControllerPostPuppetRestartConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: pacemaker_resource_restart.sh} + + ControllerPostPuppetRestartDeployment: + type: OS::Heat::SoftwareDeployments + depends_on: ControllerPostPuppetMaintenanceModeDeployment + properties: + servers: {get_param: servers} + config: {get_resource: ControllerPostPuppetRestartConfig} + input_values: {get_param: input_values} diff --git a/extraconfig/tasks/pre_puppet_pacemaker.yaml b/extraconfig/tasks/pre_puppet_pacemaker.yaml new file mode 100644 index 00000000..2cfe92a7 --- /dev/null +++ b/extraconfig/tasks/pre_puppet_pacemaker.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2014-10-16 +description: 'Pre-Puppet Config for Pacemaker deployments' + +parameters: + servers: + type: json + input_values: + type: json + description: input values for the software deployments + +resources: + + ControllerPrePuppetMaintenanceModeConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: | + #!/bin/bash + pacemaker_status=$(systemctl is-active pacemaker) + + if [ "$pacemaker_status" = "active" ]; then + pcs property set maintenance-mode=true + fi + + ControllerPrePuppetMaintenanceModeDeployment: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: servers} + config: {get_resource: ControllerPrePuppetMaintenanceModeConfig} + input_values: {get_param: input_values} diff --git a/firstboot/userdata_heat_admin.yaml b/firstboot/userdata_heat_admin.yaml index 73481c63..f8891b29 100644 --- a/firstboot/userdata_heat_admin.yaml +++ b/firstboot/userdata_heat_admin.yaml @@ -1,7 +1,7 @@ heat_template_version: 2014-10-16 parameters: - # Can be overriden via parameter_defaults in the environment + # Can be overridden via parameter_defaults in the environment node_admin_username: type: string default: heat-admin diff --git a/net-config-bond.yaml b/net-config-bond.yaml index 797df4bf..b624563f 100644 --- a/net-config-bond.yaml +++ b/net-config-bond.yaml @@ -28,6 +28,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: + default: '' + description: IP address/subnet on the management network + type: string resources: OsNetConfigImpl: diff --git a/net-config-bridge.yaml b/net-config-bridge.yaml index ad16ef0b..4f7a19dc 100644 --- a/net-config-bridge.yaml +++ b/net-config-bridge.yaml @@ -28,6 +28,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: + default: '' + description: IP address/subnet on the management network + type: string resources: OsNetConfigImpl: diff --git a/net-config-noop.yaml b/net-config-noop.yaml index 30de5846..94c492c6 100644 --- a/net-config-noop.yaml +++ b/net-config-noop.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: + default: '' + description: IP address/subnet on the management network + type: string resources: OsNetConfigImpl: diff --git a/network/config/bond-with-vlans/README.md b/network/config/bond-with-vlans/README.md index 98879b4f..afe71776 100644 --- a/network/config/bond-with-vlans/README.md +++ b/network/config/bond-with-vlans/README.md @@ -3,10 +3,9 @@ Vlans on a bonded pair of NICs for each Overcloud role. There are two versions of the controller role template, one with an external network interface, and another without. If the -external network interface is not configured the ctlplane address +external network interface is not configured, the ctlplane address ranges will be used for external (public) network traffic. - Configuration ------------- @@ -20,8 +19,31 @@ something like this: OS::TripleO::ObjectStorage::Net::SoftwareConfig: network/config/bond-with-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: network/config/bond-with-vlans/ceph-storage.yaml +Or use this Heat environment file: + + environments/net-bond-with-vlans.yaml + Configuration with no External Network -------------------------------------- + Same as above except set the following value for the controller role: OS::TripleO::Controller::Net::SoftwareConfig: network/config/bond-with-vlans/controller-no-external.yaml + +Configuration with System Management Network +-------------------------------------------- + +To enable the optional System Management network, create a Heat environment +that looks something like this: + + resource\_registry: + OS::TripleO::Network::Management: ../network/management.yaml + OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml + +Or use this Heat environment file: + + environments/network-management.yaml diff --git a/network/config/bond-with-vlans/ceph-storage.yaml b/network/config/bond-with-vlans/ceph-storage.yaml index 620d1f7a..93db8666 100644 --- a/network/config/bond-with-vlans/ceph-storage.yaml +++ b/network/config/bond-with-vlans/ceph-storage.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string BondInterfaceOvsOptions: default: '' description: The ovs_options string for the bond interface. Set things like @@ -42,6 +46,10 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. @@ -114,6 +122,14 @@ resources: addresses: - ip_netmask: {get_param: StorageMgmtIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # device: bond1 + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/bond-with-vlans/cinder-storage.yaml b/network/config/bond-with-vlans/cinder-storage.yaml index f4c6de8f..bea98c19 100644 --- a/network/config/bond-with-vlans/cinder-storage.yaml +++ b/network/config/bond-with-vlans/cinder-storage.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string BondInterfaceOvsOptions: default: '' description: The ovs_options string for the bond interface. Set things like @@ -46,6 +50,10 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. @@ -125,6 +133,14 @@ resources: addresses: - ip_netmask: {get_param: StorageMgmtIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # device: bond1 + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/bond-with-vlans/compute.yaml b/network/config/bond-with-vlans/compute.yaml index 8cb3705b..774bf02d 100644 --- a/network/config/bond-with-vlans/compute.yaml +++ b/network/config/bond-with-vlans/compute.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string BondInterfaceOvsOptions: default: '' description: The ovs_options string for the bond interface. Set things like @@ -46,6 +50,10 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. @@ -125,6 +133,14 @@ resources: addresses: - ip_netmask: {get_param: TenantIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # device: bond1 + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/bond-with-vlans/controller-no-external.yaml b/network/config/bond-with-vlans/controller-no-external.yaml index 22579e8f..375d40be 100644 --- a/network/config/bond-with-vlans/controller-no-external.yaml +++ b/network/config/bond-with-vlans/controller-no-external.yaml @@ -25,6 +25,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string BondInterfaceOvsOptions: default: '' description: The ovs_options string for the bond interface. Set things like @@ -50,6 +54,10 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ExternalInterfaceDefaultRoute: default: '10.0.0.1' description: default route for the external network @@ -66,6 +74,7 @@ resources: - type: ovs_bridge name: {get_input: bridge_name} + use_dhcp: true members: - type: ovs_bond @@ -107,6 +116,14 @@ resources: addresses: - ip_netmask: {get_param: TenantIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # device: bond1 + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/bond-with-vlans/controller.yaml b/network/config/bond-with-vlans/controller.yaml index eb4399ea..d3627ead 100644 --- a/network/config/bond-with-vlans/controller.yaml +++ b/network/config/bond-with-vlans/controller.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string BondInterfaceOvsOptions: default: 'bond_mode=active-backup' description: The ovs_options string for the bond interface. Set things like @@ -54,6 +58,10 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ExternalInterfaceDefaultRoute: default: '10.0.0.1' description: default route for the external network @@ -119,7 +127,7 @@ resources: ip_netmask: {get_param: ExternalIpSubnet} routes: - - ip_netmask: 0.0.0.0/0 + default: true next_hop: {get_param: ExternalInterfaceDefaultRoute} - type: vlan @@ -149,6 +157,14 @@ resources: addresses: - ip_netmask: {get_param: TenantIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # device: bond1 + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/bond-with-vlans/swift-storage.yaml b/network/config/bond-with-vlans/swift-storage.yaml index f6b2a699..de9121e5 100644 --- a/network/config/bond-with-vlans/swift-storage.yaml +++ b/network/config/bond-with-vlans/swift-storage.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string BondInterfaceOvsOptions: default: '' description: The ovs_options string for the bond interface. Set things like @@ -46,6 +50,10 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. @@ -125,6 +133,14 @@ resources: addresses: - ip_netmask: {get_param: StorageMgmtIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # device: bond1 + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/multiple-nics/README.md b/network/config/multiple-nics/README.md index 3d81f0be..0d8a0f03 100644 --- a/network/config/multiple-nics/README.md +++ b/network/config/multiple-nics/README.md @@ -19,3 +19,21 @@ something like this: Or use this Heat environment file: environments/net-multiple-nics.yaml + +Configuration with System Management Network +-------------------------------------------- + +To enable the optional System Management network, create a Heat environment +that looks something like this: + + resource\_registry: + OS::TripleO::Network::Management: ../network/management.yaml + OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml + +Or use this Heat environment file: + + environments/network-management.yaml diff --git a/network/config/multiple-nics/ceph-storage.yaml b/network/config/multiple-nics/ceph-storage.yaml index 7d650f4b..a2a6b40d 100644 --- a/network/config/multiple-nics/ceph-storage.yaml +++ b/network/config/multiple-nics/ceph-storage.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string ExternalNetworkVlanID: default: 10 description: Vlan ID for the external network traffic. @@ -49,6 +53,10 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ExternalInterfaceDefaultRoute: default: '10.0.0.1' description: default route for the external network @@ -58,12 +66,12 @@ parameters: description: The subnet CIDR of the control plane network. type: string ControlPlaneDefaultRoute: # Override this via parameter_defaults - description: The subnet CIDR of the control plane network. + description: The default route of the control plane network. type: string DnsServers: # Override this via parameter_defaults default: [] description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. - type: json + type: comma_delimited_list EC2MetadataIp: # Override this via parameter_defaults description: The IP address of the EC2 metadata server. type: string @@ -109,6 +117,14 @@ resources: addresses: - ip_netmask: {get_param: StorageMgmtIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: interface + # name: nic7 + # use_dhcp: false + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/multiple-nics/cinder-storage.yaml b/network/config/multiple-nics/cinder-storage.yaml index fdb6c9d8..06b4b83f 100644 --- a/network/config/multiple-nics/cinder-storage.yaml +++ b/network/config/multiple-nics/cinder-storage.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string ExternalNetworkVlanID: default: 10 description: Vlan ID for the external network traffic. @@ -49,6 +53,10 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ExternalInterfaceDefaultRoute: default: '10.0.0.1' description: default route for the external network @@ -58,12 +66,12 @@ parameters: description: The subnet CIDR of the control plane network. type: string ControlPlaneDefaultRoute: # Override this via parameter_defaults - description: The subnet CIDR of the control plane network. + description: The default route of the control plane network. type: string DnsServers: # Override this via parameter_defaults default: [] description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. - type: json + type: comma_delimited_list EC2MetadataIp: # Override this via parameter_defaults description: The IP address of the EC2 metadata server. type: string @@ -116,6 +124,14 @@ resources: addresses: - ip_netmask: {get_param: InternalApiIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: interface + # name: nic7 + # use_dhcp: false + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/multiple-nics/compute.yaml b/network/config/multiple-nics/compute.yaml index 0032a287..97eef52b 100644 --- a/network/config/multiple-nics/compute.yaml +++ b/network/config/multiple-nics/compute.yaml @@ -29,6 +29,14 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number InternalApiNetworkVlanID: default: 20 description: Vlan ID for the internal_api network traffic. @@ -37,21 +45,33 @@ parameters: default: 30 description: Vlan ID for the storage network traffic. type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number TenantNetworkVlanID: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. type: string ControlPlaneDefaultRoute: # Override this via parameter_defaults - description: The subnet CIDR of the control plane network. + description: The default route of the control plane network. type: string DnsServers: # Override this via parameter_defaults default: [] description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. - type: json + type: comma_delimited_list EC2MetadataIp: # Override this via parameter_defaults description: The IP address of the EC2 metadata server. type: string @@ -112,6 +132,14 @@ resources: use_dhcp: false # force the MAC address of the bridge to this interface primary: true + # Uncomment when including environments/network-management.yaml + #- + # type: interface + # name: nic7 + # use_dhcp: false + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/multiple-nics/controller.yaml b/network/config/multiple-nics/controller.yaml index 63f53a1f..32851cfb 100644 --- a/network/config/multiple-nics/controller.yaml +++ b/network/config/multiple-nics/controller.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string ExternalNetworkVlanID: default: 10 description: Vlan ID for the external network traffic. @@ -49,6 +53,10 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ExternalInterfaceDefaultRoute: default: '10.0.0.1' description: default route for the external network @@ -58,12 +66,12 @@ parameters: description: The subnet CIDR of the control plane network. type: string ControlPlaneDefaultRoute: # Override this via parameter_defaults - description: The subnet CIDR of the control plane network. + description: The default route of the control plane network. type: string DnsServers: # Override this via parameter_defaults default: [] description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. - type: json + type: comma_delimited_list EC2MetadataIp: # Override this via parameter_defaults description: The IP address of the EC2 metadata server. type: string @@ -131,13 +139,14 @@ resources: - type: ovs_bridge name: {get_input: bridge_name} + dns_servers: {get_param: DnsServers} use_dhcp: false addresses: - ip_netmask: {get_param: ExternalIpSubnet} routes: - - ip_netmask: 0.0.0.0/0 + default: true next_hop: {get_param: ExternalInterfaceDefaultRoute} members: - @@ -145,6 +154,14 @@ resources: name: nic6 # force the MAC address of the bridge to this interface primary: true + # Uncomment when including environments/network-management.yaml + #- + # type: interface + # name: nic7 + # use_dhcp: false + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/multiple-nics/swift-storage.yaml b/network/config/multiple-nics/swift-storage.yaml index 00e4f353..4d5a7b99 100644 --- a/network/config/multiple-nics/swift-storage.yaml +++ b/network/config/multiple-nics/swift-storage.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string ExternalNetworkVlanID: default: 10 description: Vlan ID for the external network traffic. @@ -49,6 +53,10 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ExternalInterfaceDefaultRoute: default: '10.0.0.1' description: default route for the external network @@ -58,12 +66,12 @@ parameters: description: The subnet CIDR of the control plane network. type: string ControlPlaneDefaultRoute: # Override this via parameter_defaults - description: The subnet CIDR of the control plane network. + description: The default route of the control plane network. type: string DnsServers: # Override this via parameter_defaults default: [] description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. - type: json + type: comma_delimited_list EC2MetadataIp: # Override this via parameter_defaults description: The IP address of the EC2 metadata server. type: string @@ -116,6 +124,14 @@ resources: addresses: - ip_netmask: {get_param: InternalApiIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: interface + # name: nic7 + # use_dhcp: false + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/single-nic-vlans/README.md b/network/config/single-nic-vlans/README.md index 6f128650..f9c2e512 100644 --- a/network/config/single-nic-vlans/README.md +++ b/network/config/single-nic-vlans/README.md @@ -1,9 +1,9 @@ This directory contains Heat templates to help configure -Vlans on a single NICs for each Overcloud role. +Vlans on a single NIC for each Overcloud role. There are two versions of the controller role template, one with an external network interface, and another without. If the -external network interface is not configured the ctlplane address +external network interface is not configured, the ctlplane address ranges will be used for external (public) network traffic. Configuration @@ -23,9 +23,27 @@ Or use this Heat environment file: environments/net-single-nic-with-vlans.yaml - Configuration with no External Network -------------------------------------- + Same as above except set the following value for the controller role: OS::TripleO::Controller::Net::SoftwareConfig: network/config/single-nic-vlans/controller-no-external.yaml + +Configuration with System Management Network +-------------------------------------------- + +To enable the optional System Management network, create a Heat environment +that looks something like this: + + resource\_registry: + OS::TripleO::Network::Management: ../network/management.yaml + OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml + OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml + +Or use this Heat environment file: + + environments/network-management.yaml diff --git a/network/config/single-nic-vlans/ceph-storage.yaml b/network/config/single-nic-vlans/ceph-storage.yaml index 5148c520..80bc32d3 100644 --- a/network/config/single-nic-vlans/ceph-storage.yaml +++ b/network/config/single-nic-vlans/ceph-storage.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string StorageNetworkVlanID: default: 30 description: Vlan ID for the storage network traffic. @@ -37,6 +41,10 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. @@ -97,6 +105,13 @@ resources: addresses: - ip_netmask: {get_param: StorageMgmtIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/single-nic-vlans/cinder-storage.yaml b/network/config/single-nic-vlans/cinder-storage.yaml index e79a9f4b..e509443a 100644 --- a/network/config/single-nic-vlans/cinder-storage.yaml +++ b/network/config/single-nic-vlans/cinder-storage.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string InternalApiNetworkVlanID: default: 20 description: Vlan ID for the internal_api network traffic. @@ -41,6 +45,10 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. @@ -107,6 +115,13 @@ resources: addresses: - ip_netmask: {get_param: StorageMgmtIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/single-nic-vlans/compute.yaml b/network/config/single-nic-vlans/compute.yaml index 4e93b31c..8cf6825d 100644 --- a/network/config/single-nic-vlans/compute.yaml +++ b/network/config/single-nic-vlans/compute.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string InternalApiNetworkVlanID: default: 20 description: Vlan ID for the internal_api network traffic. @@ -41,6 +45,10 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. @@ -107,6 +115,13 @@ resources: addresses: - ip_netmask: {get_param: TenantIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/single-nic-vlans/controller-no-external.yaml b/network/config/single-nic-vlans/controller-no-external.yaml index faf9e9c2..eb5e1e5a 100644 --- a/network/config/single-nic-vlans/controller-no-external.yaml +++ b/network/config/single-nic-vlans/controller-no-external.yaml @@ -25,6 +25,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string ExternalNetworkVlanID: default: 10 description: Vlan ID for the external network traffic. @@ -45,6 +49,10 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ExternalInterfaceDefaultRoute: default: '10.0.0.1' description: default route for the external network @@ -92,6 +100,13 @@ resources: addresses: - ip_netmask: {get_param: TenantIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/single-nic-vlans/controller.yaml b/network/config/single-nic-vlans/controller.yaml index 3c536d67..3b22b36b 100644 --- a/network/config/single-nic-vlans/controller.yaml +++ b/network/config/single-nic-vlans/controller.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string ExternalNetworkVlanID: default: 10 description: Vlan ID for the external network traffic. @@ -49,6 +53,10 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ExternalInterfaceDefaultRoute: default: '10.0.0.1' description: default route for the external network @@ -129,6 +137,12 @@ resources: addresses: - ip_netmask: {get_param: TenantIpSubnet} + #- # Uncomment when including environments/network-management.yaml + # type: vlan + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/config/single-nic-vlans/swift-storage.yaml b/network/config/single-nic-vlans/swift-storage.yaml index 83b3304f..efc03393 100644 --- a/network/config/single-nic-vlans/swift-storage.yaml +++ b/network/config/single-nic-vlans/swift-storage.yaml @@ -29,6 +29,10 @@ parameters: default: '' description: IP address/subnet on the tenant network type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string InternalApiNetworkVlanID: default: 20 description: Vlan ID for the internal_api network traffic. @@ -41,6 +45,10 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. @@ -107,6 +115,13 @@ resources: addresses: - ip_netmask: {get_param: StorageMgmtIpSubnet} + # Uncomment when including environments/network-management.yaml + #- + # type: vlan + # vlan_id: {get_param: ManagementNetworkVlanID} + # addresses: + # - + # ip_netmask: {get_param: ManagementIpSubnet} outputs: OS::stack_id: diff --git a/network/external.yaml b/network/external.yaml index e8f92a5e..3b24da7e 100644 --- a/network/external.yaml +++ b/network/external.yaml @@ -15,7 +15,7 @@ parameters: type: json ExternalNetAdminStateUp: default: false - description: This admin state of of the network. + description: This admin state of the network. type: boolean ExternalNetEnableDHCP: default: false diff --git a/network/internal_api.yaml b/network/internal_api.yaml index 69154bef..6f8aa3a8 100644 --- a/network/internal_api.yaml +++ b/network/internal_api.yaml @@ -15,7 +15,7 @@ parameters: type: json InternalApiNetAdminStateUp: default: false - description: This admin state of of the network. + description: This admin state of the network. type: boolean InternalApiNetEnableDHCP: default: false diff --git a/network/management.yaml b/network/management.yaml new file mode 100644 index 00000000..9bfaafa2 --- /dev/null +++ b/network/management.yaml @@ -0,0 +1,64 @@ +heat_template_version: 2015-04-30 + +description: > + Management network. System administration, SSH, DNS, NTP, etc. This network + would usually be the default gateway for the non-controller nodes. + +parameters: + # the defaults here work for static IP assignment (IPAM) only + ManagementNetCidr: + default: '10.0.1.0/24' + description: Cidr for the management network. + type: string + ManagementNetValueSpecs: + default: {'provider:physical_network': 'management', 'provider:network_type': 'flat'} + description: Value specs for the management network. + type: string + ManagementNetAdminStateUp: + default: false + description: This admin state of of the network. + type: boolean + ManagementNetEnableDHCP: + default: false + description: Whether to enable DHCP on the associated subnet. + type: boolean + ManagementNetShared: + default: false + description: Whether this network is shared across all tenants. + type: boolean + ManagementNetName: + default: management + description: The name of the management network. + type: string + ManagementSubnetName: + default: management_subnet + description: The name of the management subnet in Neutron. + type: string + ManagementAllocationPools: + default: [{'start': '10.0.1.4', 'end': '10.0.1.250'}] + description: Ip allocation pool range for the management network. + type: json + +resources: + ManagementNetwork: + type: OS::Neutron::Net + properties: + admin_state_up: {get_param: ManagementNetAdminStateUp} + name: {get_param: ManagementNetName} + shared: {get_param: ManagementNetShared} + value_specs: {get_param: ManagementNetValueSpecs} + + ManagementSubnet: + type: OS::Neutron::Subnet + properties: + cidr: {get_param: ManagementNetCidr} + enable_dhcp: {get_param: ManagementNetEnableDHCP} + name: {get_param: ManagementSubnetName} + network: {get_resource: ManagementNetwork} + allocation_pools: {get_param: ManagementAllocationPools} + +outputs: + OS::stack_id: + description: Neutron management network + value: {get_resource: ManagementNetwork} + diff --git a/network/networks.yaml b/network/networks.yaml index 6618af38..ab50ae11 100644 --- a/network/networks.yaml +++ b/network/networks.yaml @@ -18,3 +18,6 @@ resources: TenantNetwork: type: OS::TripleO::Network::Tenant + + ManagementNetwork: + type: OS::TripleO::Network::Management diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml index 3e949f41..7a7043bd 100644 --- a/network/ports/ctlplane_vip.yaml +++ b/network/ports/ctlplane_vip.yaml @@ -5,6 +5,10 @@ description: > The IP address will be chosen automatically if FixedIPs is empty. parameters: + ServiceName: # Here for compatibility with from_service.yaml + description: Name of the service to lookup + default: '' + type: string NetworkName: description: # Here for compatibility with isolated networks default: ctlplane @@ -13,7 +17,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string diff --git a/network/ports/external.yaml b/network/ports/external.yaml index 1e2fff68..7624eb9f 100644 --- a/network/ports/external.yaml +++ b/network/ports/external.yaml @@ -13,7 +13,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string @@ -27,6 +27,12 @@ parameters: [{'ip_address':'1.2.3.4'}] default: [] type: json + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number resources: diff --git a/network/ports/external_from_pool.yaml b/network/ports/external_from_pool.yaml new file mode 100644 index 00000000..8e9dc7c2 --- /dev/null +++ b/network/ports/external_from_pool.yaml @@ -0,0 +1,45 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a network mapped list of IPs + +parameters: + ExternalNetName: + description: Name of the external network + default: external + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + ExternalNetCidr: + default: '10.0.0.0/24' + description: Cidr for the external network. + type: string + +outputs: + ip_address: + description: external network IP + value: {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]} + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the external network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]} + - '/' + - {get_param: [ExternalNetCidr, -2]} + - {get_param: [ExternalNetCidr, -1]} diff --git a/network/ports/from_service.yaml b/network/ports/from_service.yaml new file mode 100644 index 00000000..6b669f41 --- /dev/null +++ b/network/ports/from_service.yaml @@ -0,0 +1,34 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a service mapped list of IPs + +parameters: + ServiceName: + description: Name of the service to lookup + default: '' + type: string + NetworkName: # Here for compatability with ctlplane_vip.yaml + description: Name of the network where the VIP will be created + default: ctlplane + type: string + PortName: # Here for compatability with ctlplane_vip.yaml + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with ctlplane_vip.yaml + description: IP address on the control plane + default: '' + type: string + ControlPlaneNetwork: # Here for compatability with ctlplane_vip.yaml + description: The name of the undercloud Neutron control plane + default: ctlplane + type: string + ServiceVips: + default: {} + type: json + +outputs: + ip_address: + description: network IP + value: {get_param: [ServiceVips, {get_param: ServiceName}]} diff --git a/network/ports/internal_api.yaml b/network/ports/internal_api.yaml index d528b327..f84e8f71 100644 --- a/network/ports/internal_api.yaml +++ b/network/ports/internal_api.yaml @@ -12,7 +12,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string @@ -22,6 +22,12 @@ parameters: [{'ip_address':'1.2.3.4'}] default: [] type: json + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number resources: diff --git a/network/ports/internal_api_from_pool.yaml b/network/ports/internal_api_from_pool.yaml new file mode 100644 index 00000000..b98e1fb1 --- /dev/null +++ b/network/ports/internal_api_from_pool.yaml @@ -0,0 +1,45 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a network mapped list of IPs + +parameters: + InternalApiNetName: + description: Name of the internal API network + default: internal_api + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + InternalApiNetCidr: + default: '172.16.2.0/24' + description: Cidr for the internal API network. + type: string + +outputs: + ip_address: + description: internal API network IP + value: {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]} + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the internal API network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]} + - '/' + - {get_param: [InternalApiNetCidr, -2]} + - {get_param: [InternalApiNetCidr, -1]} diff --git a/network/ports/management.yaml b/network/ports/management.yaml new file mode 100644 index 00000000..1d15ca60 --- /dev/null +++ b/network/ports/management.yaml @@ -0,0 +1,42 @@ +heat_template_version: 2015-04-30 + +description: > + Creates a port on the management network. The IP address will be chosen + automatically if FixedIPs is empty. + +parameters: + ManagementNetName: + description: Name of the management neutron network + default: management + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatibility with noop.yaml + description: IP address on the control plane + type: string + +resources: + + ManagementPort: + type: OS::Neutron::Port + properties: + network: {get_param: ManagementNetName} + name: {get_param: PortName} + replacement_policy: AUTO + +outputs: + ip_address: + description: management network IP + value: {get_attr: [ManagementPort, fixed_ips, 0, ip_address]} + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the management network IP + value: + list_join: + - '' + - - {get_attr: [ManagementPort, fixed_ips, 0, ip_address]} + - '/' + - {get_attr: [ManagementPort, subnets, 0, cidr, -2]} + - {get_attr: [ManagementPort, subnets, 0, cidr, -1]} diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml index 257d3f9b..32272bd6 100644 --- a/network/ports/net_ip_list_map.yaml +++ b/network/ports/net_ip_list_map.yaml @@ -19,6 +19,9 @@ parameters: TenantIpList: default: [] type: comma_delimited_list + ManagementIpList: + default: [] + type: comma_delimited_list outputs: net_ip_map: @@ -32,3 +35,4 @@ outputs: storage: {get_param: StorageIpList} storage_mgmt: {get_param: StorageMgmtIpList} tenant: {get_param: TenantIpList} + management: {get_param: ManagementIpList} diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml index 7aaed160..c6386025 100644 --- a/network/ports/net_ip_map.yaml +++ b/network/ports/net_ip_map.yaml @@ -19,6 +19,9 @@ parameters: TenantIp: default: '' type: string + ManagementIp: + default: '' + type: string outputs: net_ip_map: @@ -32,3 +35,4 @@ outputs: storage: {get_param: StorageIp} storage_mgmt: {get_param: StorageMgmtIp} tenant: {get_param: TenantIp} + management: {get_param: ManagementIp} diff --git a/network/ports/net_ip_subnet_map.yaml b/network/ports/net_ip_subnet_map.yaml index cf59adb3..2f933eaa 100644 --- a/network/ports/net_ip_subnet_map.yaml +++ b/network/ports/net_ip_subnet_map.yaml @@ -19,6 +19,9 @@ parameters: TenantIpSubnet: default: '' type: string + ManagementIpSubnet: + default: '' + type: string ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. @@ -41,3 +44,4 @@ outputs: storage: {get_param: StorageIpSubnet} storage_mgmt: {get_param: StorageMgmtIpSubnet} tenant: {get_param: TenantIpSubnet} + management: {get_param: ManagementIpSubnet} diff --git a/network/ports/net_vip_map_external.yaml b/network/ports/net_vip_map_external.yaml index 36426b32..23e1f992 100644 --- a/network/ports/net_vip_map_external.yaml +++ b/network/ports/net_vip_map_external.yaml @@ -2,7 +2,7 @@ heat_template_version: 2015-04-30 parameters: # Set these via parameter defaults to configure external VIPs - ControlNetworkVip: + ControlPlaneIP: default: '' type: string ExternalNetworkVip: @@ -43,7 +43,7 @@ outputs: A Hash containing a mapping of network names to assigned IPs for a specific machine. value: - ctlplane: {get_param: ControlNetworkVip} + ctlplane: {get_param: ControlPlaneIP} external: {get_param: ExternalNetworkVip} internal_api: {get_param: InternalApiNetworkVip} storage: {get_param: StorageNetworkVip} diff --git a/network/ports/noop.yaml b/network/ports/noop.yaml index 31ee6f3c..ac946cd9 100644 --- a/network/ports/noop.yaml +++ b/network/ports/noop.yaml @@ -4,6 +4,10 @@ description: > Returns the control plane port (provisioning network) as the ip_address. parameters: + ServiceName: # Here for compatibility with from_service.yaml + description: Name of the service to lookup + default: '' + type: string ControlPlaneIP: description: IP address on the control plane type: string @@ -16,7 +20,7 @@ parameters: default: '' type: string NetworkName: - description: # Here for compatability with vip.yaml + description: # Here for compatibility with vip.yaml default: '' type: string FixedIPs: @@ -27,6 +31,14 @@ parameters: default: '24' description: The subnet CIDR of the control plane network. type: string + IPPool: # Here for compatibility with from_pool.yaml + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + description: Index of the IP to get from Pool + type: number outputs: ip_address: diff --git a/network/ports/storage.yaml b/network/ports/storage.yaml index 88fb537c..a07e5a4f 100644 --- a/network/ports/storage.yaml +++ b/network/ports/storage.yaml @@ -12,7 +12,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string @@ -22,6 +22,12 @@ parameters: [{'ip_address':'1.2.3.4'}] default: [] type: json + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number resources: diff --git a/network/ports/storage_from_pool.yaml b/network/ports/storage_from_pool.yaml new file mode 100644 index 00000000..668bc6f6 --- /dev/null +++ b/network/ports/storage_from_pool.yaml @@ -0,0 +1,45 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a network mapped list of IPs + +parameters: + StorageNetName: + description: Name of the storage network + default: storage + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + StorageNetCidr: + default: '172.16.1.0/24' + description: Cidr for the storage network. + type: string + +outputs: + ip_address: + description: storage network IP + value: {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]} + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the storage network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]} + - '/' + - {get_param: [StorageNetCidr, -2]} + - {get_param: [StorageNetCidr, -1]} diff --git a/network/ports/storage_mgmt.yaml b/network/ports/storage_mgmt.yaml index c98a21ef..4890bf5a 100644 --- a/network/ports/storage_mgmt.yaml +++ b/network/ports/storage_mgmt.yaml @@ -12,7 +12,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string @@ -22,6 +22,12 @@ parameters: [{'ip_address':'1.2.3.4'}] default: [] type: json + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number resources: diff --git a/network/ports/storage_mgmt_from_pool.yaml b/network/ports/storage_mgmt_from_pool.yaml new file mode 100644 index 00000000..bea87105 --- /dev/null +++ b/network/ports/storage_mgmt_from_pool.yaml @@ -0,0 +1,45 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a network mapped list of IPs + +parameters: + StorageMgmtNetName: + description: Name of the storage MGMT network + default: storage_mgmt + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + StorageMgmtNetCidr: + default: '172.16.3.0/24' + description: Cidr for the storage MGMT network. + type: string + +outputs: + ip_address: + description: storage MGMT network IP + value: {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]} + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the storage MGMT network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]} + - '/' + - {get_param: [StorageMgmtNetCidr, -2]} + - {get_param: [StorageMgmtNetCidr, -1]} diff --git a/network/ports/tenant.yaml b/network/ports/tenant.yaml index 94408ca2..86c58f2f 100644 --- a/network/ports/tenant.yaml +++ b/network/ports/tenant.yaml @@ -12,7 +12,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string @@ -22,6 +22,12 @@ parameters: [{'ip_address':'1.2.3.4'}] default: [] type: json + IPPool: # Here for compatibility with from_pool.yaml + default: {} + type: json + NodeIndex: # Here for compatibility with from_pool.yaml + default: 0 + type: number resources: diff --git a/network/ports/tenant_from_pool.yaml b/network/ports/tenant_from_pool.yaml new file mode 100644 index 00000000..29303bb6 --- /dev/null +++ b/network/ports/tenant_from_pool.yaml @@ -0,0 +1,45 @@ +heat_template_version: 2015-04-30 + +description: > + Returns an IP from a network mapped list of IPs + +parameters: + TenantNetName: + description: Name of the tenant network + default: tenant + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + TenantNetCidr: + default: '172.16.0.0/24' + description: Cidr for the tenant network. + type: string + +outputs: + ip_address: + description: tenant network IP + value: {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]} + ip_subnet: + # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) + description: IP/Subnet CIDR for the tenant network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]} + - '/' + - {get_param: [TenantNetCidr, -2]} + - {get_param: [TenantNetCidr, -1]} diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml index 56efc178..9bb6cde2 100644 --- a/network/ports/vip.yaml +++ b/network/ports/vip.yaml @@ -5,6 +5,10 @@ description: > The IP address will be chosen automatically if FixedIPs is empty. parameters: + ServiceName: # Here for compatibility with from_service.yaml + description: Name of the service to lookup + default: '' + type: string NetworkName: description: Name of the network where the VIP will be created default: internal_api @@ -13,7 +17,7 @@ parameters: description: Name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with noop.yaml + ControlPlaneIP: # Here for compatibility with noop.yaml description: IP address on the control plane default: '' type: string diff --git a/network/storage.yaml b/network/storage.yaml index 60b779e0..dc9f35ea 100644 --- a/network/storage.yaml +++ b/network/storage.yaml @@ -15,7 +15,7 @@ parameters: type: json StorageNetAdminStateUp: default: false - description: This admin state of of the network. + description: This admin state of the network. type: boolean StorageNetEnableDHCP: default: false diff --git a/network/storage_mgmt.yaml b/network/storage_mgmt.yaml index 043bc87b..59933c8c 100644 --- a/network/storage_mgmt.yaml +++ b/network/storage_mgmt.yaml @@ -15,7 +15,7 @@ parameters: type: json StorageMgmtNetAdminStateUp: default: false - description: This admin state of of the network. + description: This admin state of the network. type: boolean StorageMgmtNetEnableDHCP: default: false diff --git a/network/tenant.yaml b/network/tenant.yaml index daf5cb75..6fe96121 100644 --- a/network/tenant.yaml +++ b/network/tenant.yaml @@ -15,7 +15,7 @@ parameters: type: json TenantNetAdminStateUp: default: false - description: This admin state of of the network. + description: This admin state of the network. type: boolean TenantNetEnableDHCP: default: false diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml index c072c292..888a3c89 100644 --- a/overcloud-resource-registry-puppet.yaml +++ b/overcloud-resource-registry-puppet.yaml @@ -21,7 +21,11 @@ resource_registry: OS::TripleO::CephClusterConfig::SoftwareConfig: puppet/ceph-cluster-config.yaml OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml OS::TripleO::BootstrapNode::SoftwareConfig: puppet/bootstrap-config.yaml + + # Tasks (for internal TripleO usage) OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml + OS::TripleO::Tasks::ControllerPrePuppet: extraconfig/tasks/noop.yaml + OS::TripleO::Tasks::ControllerPostPuppet: extraconfig/tasks/noop.yaml # This creates the "heat-admin" user for all OS images by default # To disable, replace with firstboot/userdata_default.yaml @@ -57,40 +61,59 @@ resource_registry: OS::TripleO::Network::StorageMgmt: network/noop.yaml OS::TripleO::Network::Storage: network/noop.yaml OS::TripleO::Network::Tenant: network/noop.yaml + OS::TripleO::Network::Management: network/noop.yaml OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml + # Port assignments for the VIPs + OS::TripleO::Network::Ports::ExternalVipPort: network/ports/noop.yaml + OS::TripleO::Network::Ports::InternalApiVipPort: network/ports/noop.yaml + OS::TripleO::Network::Ports::StorageVipPort: network/ports/noop.yaml + OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml + OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml + # Port assignments for the controller role OS::TripleO::Controller::Ports::ExternalPort: network/ports/noop.yaml OS::TripleO::Controller::Ports::InternalApiPort: network/ports/noop.yaml OS::TripleO::Controller::Ports::StoragePort: network/ports/noop.yaml OS::TripleO::Controller::Ports::StorageMgmtPort: network/ports/noop.yaml OS::TripleO::Controller::Ports::TenantPort: network/ports/noop.yaml + OS::TripleO::Controller::Ports::ManagementPort: network/ports/noop.yaml # Port assignments for the compute role + OS::TripleO::Compute::Ports::ExternalPort: network/ports/noop.yaml OS::TripleO::Compute::Ports::InternalApiPort: network/ports/noop.yaml OS::TripleO::Compute::Ports::StoragePort: network/ports/noop.yaml + OS::TripleO::Compute::Ports::StorageMgmtPort: network/ports/noop.yaml OS::TripleO::Compute::Ports::TenantPort: network/ports/noop.yaml + OS::TripleO::Compute::Ports::ManagementPort: network/ports/noop.yaml # Port assignments for the ceph storage role + OS::TripleO::CephStorage::Ports::ExternalPort: network/ports/noop.yaml + OS::TripleO::CephStorage::Ports::InternalApiPort: network/ports/noop.yaml OS::TripleO::CephStorage::Ports::StoragePort: network/ports/noop.yaml OS::TripleO::CephStorage::Ports::StorageMgmtPort: network/ports/noop.yaml + OS::TripleO::CephStorage::Ports::TenantPort: network/ports/noop.yaml + OS::TripleO::CephStorage::Ports::ManagementPort: network/ports/noop.yaml # Port assignments for the swift storage role + OS::TripleO::SwiftStorage::Ports::ExternalPort: network/ports/noop.yaml OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml + OS::TripleO::SwiftStorage::Ports::TenantPort: network/ports/noop.yaml + OS::TripleO::SwiftStorage::Ports::ManagementPort: network/ports/noop.yaml # Port assignments for the block storage role + OS::TripleO::BlockStorage::Ports::ExternalPort: network/ports/noop.yaml OS::TripleO::BlockStorage::Ports::InternalApiPort: network/ports/noop.yaml OS::TripleO::BlockStorage::Ports::StoragePort: network/ports/noop.yaml OS::TripleO::BlockStorage::Ports::StorageMgmtPort: network/ports/noop.yaml - - # Port assignments for service virtual IPs for the controller role - OS::TripleO::Controller::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml + OS::TripleO::BlockStorage::Ports::TenantPort: network/ports/noop.yaml + OS::TripleO::BlockStorage::Ports::ManagementPort: network/ports/noop.yaml # Service Endpoint Mappings OS::TripleO::Endpoint: network/endpoints/endpoint.yaml diff --git a/overcloud-resource-registry.yaml b/overcloud-resource-registry.yaml deleted file mode 100644 index 11a33599..00000000 --- a/overcloud-resource-registry.yaml +++ /dev/null @@ -1,81 +0,0 @@ -resource_registry: - OS::TripleO::BlockStorage: os-apply-config/cinder-storage.yaml - OS::TripleO::BlockStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::Compute: os-apply-config/compute.yaml - OS::TripleO::Compute::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment - OS::TripleO::Controller: os-apply-config/controller.yaml - OS::TripleO::Controller::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::ObjectStorage: os-apply-config/swift-storage.yaml - OS::TripleO::ObjectStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::CephStorage: os-apply-config/ceph-storage.yaml - OS::TripleO::CephStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::ControllerPostDeployment: os-apply-config/controller-post.yaml - OS::TripleO::ComputePostDeployment: os-apply-config/compute-post.yaml - OS::TripleO::ObjectStoragePostDeployment: os-apply-config/swift-storage-post.yaml - OS::TripleO::BlockStoragePostDeployment: os-apply-config/cinder-storage-post.yaml - OS::TripleO::CephStoragePostDeployment: os-apply-config/ceph-storage-post.yaml - OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig: os-apply-config/swift-devices-and-proxy-config.yaml - OS::TripleO::CephClusterConfig::SoftwareConfig: os-apply-config/ceph-cluster-config.yaml - OS::TripleO::AllNodes::SoftwareConfig: os-apply-config/all-nodes-config.yaml - OS::TripleO::BootstrapNode::SoftwareConfig: bootstrap-config.yaml - OS::TripleO::NodeUserData: firstboot/userdata_default.yaml - OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml - OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml - - # "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy - # phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when - # configuration with knowledge of all nodes in the cluster is required vs single - # node configuration in the pre_deploy step. - OS::TripleO::AllNodesExtraConfig: extraconfig/all_nodes/default.yaml - - # TripleO overcloud networks - OS::TripleO::Network: network/networks.yaml - OS::TripleO::VipConfig: os-apply-config/vip-config.yaml - - OS::TripleO::Network::External: network/noop.yaml - OS::TripleO::Network::InternalApi: network/noop.yaml - OS::TripleO::Network::StorageMgmt: network/noop.yaml - OS::TripleO::Network::Storage: network/noop.yaml - OS::TripleO::Network::Tenant: network/noop.yaml - - OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml - OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml - OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml - OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml - - # Port assignments for the controller role - OS::TripleO::Controller::Ports::ExternalPort: network/ports/noop.yaml - OS::TripleO::Controller::Ports::InternalApiPort: network/ports/noop.yaml - OS::TripleO::Controller::Ports::StoragePort: network/ports/noop.yaml - OS::TripleO::Controller::Ports::StorageMgmtPort: network/ports/noop.yaml - OS::TripleO::Controller::Ports::TenantPort: network/ports/noop.yaml - - # Port assignments for the compute role - OS::TripleO::Compute::Ports::InternalApiPort: network/ports/noop.yaml - OS::TripleO::Compute::Ports::StoragePort: network/ports/noop.yaml - OS::TripleO::Compute::Ports::TenantPort: network/ports/noop.yaml - - # Port assignments for the ceph storage role - OS::TripleO::CephStorage::Ports::StoragePort: network/ports/noop.yaml - OS::TripleO::CephStorage::Ports::StorageMgmtPort: network/ports/noop.yaml - - # Port assignments for the swift storage role - OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml - OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml - OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml - - # Port assignments for the block storage role - OS::TripleO::BlockStorage::Ports::InternalApiPort: network/ports/noop.yaml - OS::TripleO::BlockStorage::Ports::StoragePort: network/ports/noop.yaml - OS::TripleO::BlockStorage::Ports::StorageMgmtPort: network/ports/noop.yaml - - # Port assignments for service virtual IPs for the controller role - OS::TripleO::Controller::Ports::RedisVipPort: network/ports/noop.yaml - - # Service Endpoint Mappings - OS::TripleO::Endpoint: network/endpoints/endpoint.yaml - OS::TripleO::EndpointMap: network/endpoints/endpoint_map.yaml - - # validation resources - OS::TripleO::AllNodes::Validation: os-apply-config/all-nodes-validation.yaml diff --git a/overcloud.yaml b/overcloud.yaml index 67636182..fe82bfd5 100644 --- a/overcloud.yaml +++ b/overcloud.yaml @@ -13,7 +13,6 @@ parameters: # Common parameters (not specific to a role) AdminPassword: - default: unset description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true @@ -22,12 +21,10 @@ parameters: description: The ceilometer backend type. type: string CeilometerMeteringSecret: - default: unset description: Secret shared by the ceilometer services. type: string hidden: true CeilometerPassword: - default: unset description: The password for the ceilometer service account. type: string hidden: true @@ -93,7 +90,7 @@ parameters: type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string constraints: - custom_constraint: nova.keypair @@ -109,7 +106,7 @@ parameters: to create provider networks (and we use this for the default floating network) - if changing this either use different post-install network scripts or be sure to keep 'datacentre' as a mapping network name. - type: string + type: comma_delimited_list default: "datacentre:br-ex" NeutronControlPlaneID: default: 'ctlplane' @@ -128,17 +125,16 @@ parameters: Enable/disable the L2 population feature in the Neutron agents. default: "False" NeutronFlatNetworks: - type: string + type: comma_delimited_list default: 'datacentre' description: > If set, flat networks to configure in neutron plugins. Defaults to 'datacentre' to permit external network creation. NeutronNetworkType: default: 'vxlan' - description: The tenant network type for Neutron, either gre or vxlan. - type: string + description: The tenant network type for Neutron. + type: comma_delimited_list NeutronPassword: - default: unset description: The password for the neutron service account, used by neutron agents. type: string hidden: true @@ -169,16 +165,14 @@ parameters: description: Whether to configure Neutron Distributed Virtual Routers type: string NeutronMetadataProxySharedSecret: - default: 'unset' description: Shared secret to prevent spoofing type: string hidden: true NeutronTunnelTypes: default: 'vxlan' description: | - The tunnel types for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'gre,vxlan' - type: string + The tunnel types for the Neutron tenant network. + type: comma_delimited_list NeutronTunnelIdRanges: description: | Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges @@ -211,9 +205,8 @@ parameters: NeutronMechanismDrivers: default: 'openvswitch' description: | - The mechanism drivers for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'openvswitch,l2_population' - type: string + The mechanism drivers for the Neutron tenant network. + type: comma_delimited_list NeutronAllowL3AgentFailover: default: 'False' description: Allow automatic l3-agent failover @@ -227,7 +220,6 @@ parameters: default: 1 description: The number of neutron dhcp agents to schedule per network NovaPassword: - default: unset description: The password for the nova service account, used by nova-api. type: string hidden: true @@ -279,7 +271,6 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true @@ -289,10 +280,15 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json # Controller-specific params AdminToken: - default: unset description: The keystone auth secret. type: string hidden: true @@ -313,7 +309,6 @@ parameters: CinderEnableNfsBackend is true. type: comma_delimited_list CinderPassword: - default: unset description: The password for the cinder service account, used by cinder-api. type: string hidden: true @@ -362,6 +357,10 @@ parameters: default: true description: Whether to enable Swift Storage on the Controller type: boolean + ControllerSchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} ExtraConfig: default: {} description: | @@ -410,7 +409,6 @@ parameters: type: string default: noop GlancePassword: - default: unset description: The password for the glance service account, used by the glance services. type: string hidden: true @@ -422,15 +420,17 @@ parameters: constraints: - allowed_values: ['swift', 'file', 'rbd'] HeatPassword: - default: unset description: The password for the Heat service account, used by the Heat services. type: string hidden: true HeatStackDomainAdminPassword: description: Password for heat_domain_admin user. type: string - default: '' hidden: true + InstanceNameTemplate: + default: 'instance-%08x' + description: Template string to be used to generate instance names + type: string KeystoneCACertificate: default: '' description: Keystone self-signed certificate authority certificate. @@ -505,12 +505,10 @@ parameters: This should be int_public when a VLAN is being used. type: string SwiftHashSuffix: - default: unset description: A random string to be used as a salt when hashing to determine mappings in the ring. type: string hidden: true SwiftPassword: - default: unset description: The password for the swift service account, used by the swift proxy services. type: string hidden: true @@ -575,6 +573,10 @@ parameters: default: '' description: Libvirt VIF driver configuration for the network type: string + NovaComputeSchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} NovaEnableRbdBackend: default: false description: Whether to enable or not the Rbd backend for Nova @@ -649,6 +651,11 @@ parameters: BlockStorage specific configuration to inject into the cluster. Same structure as ExtraConfig. type: json + BlockStorageSchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} + # Object storage specific parameters ObjectStorageCount: @@ -668,7 +675,10 @@ parameters: ObjectStorage specific configuration to inject into the cluster. Same structure as ExtraConfig. type: json - + ObjectStorageSchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} # Ceph storage specific parameters CephStorageCount: @@ -689,6 +699,11 @@ parameters: CephStorage specific configuration to inject into the cluster. Same structure as ExtraConfig. type: json + CephStorageSchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} + # Hostname format for each role # Note %index% is translated into the index of the node, e.g 0/1/2 etc @@ -844,6 +859,7 @@ resources: HorizonSecret: {get_resource: HorizonSecret} Image: {get_param: controllerImage} ImageUpdatePolicy: {get_param: ImageUpdatePolicy} + InstanceNameTemplate: {get_param: InstanceNameTemplate} KeyName: {get_param: KeyName} KeystoneCACertificate: {get_param: KeystoneCACertificate} KeystoneSigningCertificate: {get_param: KeystoneSigningCertificate} @@ -924,6 +940,8 @@ resources: params: '%stackname%': {get_param: 'OS::stack_name'} NodeIndex: '%index%' + ServerMetadata: {get_param: ServerMetadata} + SchedulerHints: {get_param: ControllerSchedulerHints} Compute: type: OS::Heat::ResourceGroup @@ -998,6 +1016,8 @@ resources: params: '%stackname%': {get_param: 'OS::stack_name'} CloudDomain: {get_param: CloudDomain} + ServerMetadata: {get_param: ServerMetadata} + SchedulerHints: {get_param: NovaComputeSchedulerHints} BlockStorage: type: OS::Heat::ResourceGroup @@ -1036,6 +1056,8 @@ resources: ExtraConfig: {get_param: ExtraConfig} BlockStorageExtraConfig: {get_param: BlockStorageExtraConfig} CloudDomain: {get_param: CloudDomain} + ServerMetadata: {get_param: ServerMetadata} + SchedulerHints: {get_param: BlockStorageSchedulerHints} ObjectStorage: type: OS::Heat::ResourceGroup @@ -1065,6 +1087,8 @@ resources: ExtraConfig: {get_param: ExtraConfig} ObjectStorageExtraConfig: {get_param: ObjectStorageExtraConfig} CloudDomain: {get_param: CloudDomain} + ServerMetadata: {get_param: ServerMetadata} + SchedulerHints: {get_param: ObjectStorageSchedulerHints} CephStorage: type: OS::Heat::ResourceGroup @@ -1089,6 +1113,8 @@ resources: ExtraConfig: {get_param: ExtraConfig} CephStorageExtraConfig: {get_param: CephStorageExtraConfig} CloudDomain: {get_param: CloudDomain} + ServerMetadata: {get_param: ServerMetadata} + SchedulerHints: {get_param: CephStorageSchedulerHints} ControllerIpListMap: type: OS::TripleO::Network::Ports::NetIpListMap @@ -1099,6 +1125,7 @@ resources: StorageIpList: {get_attr: [Controller, storage_ip_address]} StorageMgmtIpList: {get_attr: [Controller, storage_mgmt_ip_address]} TenantIpList: {get_attr: [Controller, tenant_ip_address]} + ManagementIpList: {get_attr: [Controller, management_ip_address]} allNodesConfig: type: OS::TripleO::AllNodes::SoftwareConfig @@ -1127,6 +1154,8 @@ resources: neutron_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} keystone_public_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} keystone_admin_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} + DeployIdentifier: {get_param: DeployIdentifier} + UpdateIdentifier: {get_param: UpdateIdentifier} MysqlRootPassword: type: OS::Heat::RandomString @@ -1159,17 +1188,18 @@ resources: RedisVirtualIP: depends_on: Networks - type: OS::TripleO::Controller::Ports::RedisVipPort + type: OS::TripleO::Network::Ports::RedisVipPort properties: ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} ControlPlaneNetwork: {get_param: NeutronControlPlaneID} PortName: redis_virtual_ip NetworkName: {get_param: [ServiceNetMap, RedisNetwork]} + ServiceName: redis # The public VIP is on the External net, falls back to ctlplane PublicVirtualIP: depends_on: Networks - type: OS::TripleO::Controller::Ports::ExternalPort + type: OS::TripleO::Network::Ports::ExternalVipPort properties: ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} ControlPlaneNetwork: {get_param: NeutronControlPlaneID} @@ -1178,21 +1208,21 @@ resources: InternalApiVirtualIP: depends_on: Networks - type: OS::TripleO::Controller::Ports::InternalApiPort + type: OS::TripleO::Network::Ports::InternalApiVipPort properties: ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} PortName: internal_api_virtual_ip StorageVirtualIP: depends_on: Networks - type: OS::TripleO::Controller::Ports::StoragePort + type: OS::TripleO::Network::Ports::StorageVipPort properties: ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} PortName: storage_virtual_ip StorageMgmtVirtualIP: depends_on: Networks - type: OS::TripleO::Controller::Ports::StorageMgmtPort + type: OS::TripleO::Network::Ports::StorageMgmtVipPort properties: ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} PortName: storage_management_virtual_ip @@ -1205,7 +1235,7 @@ resources: InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]} StorageIp: {get_attr: [StorageVirtualIP, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]} - # No tenant VIP required + # No tenant or management VIP required VipConfig: type: OS::TripleO::VipConfig @@ -1496,3 +1526,9 @@ outputs: SwiftInternalVip: description: VIP for Swift Proxy internal endpoint value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} + HostsEntry: + description: | + The content that should be appended to your /etc/hosts if you want to get + hostname-based access to the deployed nodes (useful for testing without + setting up a DNS). + value: {get_attr: [allNodesConfig, hosts_entries]} diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index 2bc519bb..9dd43680 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -51,6 +51,17 @@ parameters: keystone_admin_api_node_ips: type: comma_delimited_list + DeployIdentifier: + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. + UpdateIdentifier: + type: string + description: > + Setting to a previously unused value during stack-update will trigger + package update on all nodes + resources: allNodesConfigImpl: @@ -240,8 +251,17 @@ resources: nova::rabbit_hosts: *rabbit_nodes_array keystone::rabbit_hosts: *rabbit_nodes_array + deploy_identifier: {get_param: DeployIdentifier} + update_identifier: {get_param: UpdateIdentifier} + outputs: config_id: description: The ID of the allNodesConfigImpl resource. value: {get_resource: allNodesConfigImpl} + hosts_entries: + description: | + The content that should be appended to your /etc/hosts if you want to get + hostname-based access to the deployed nodes (useful for testing without + setting up a DNS). + value: {get_attr: [allNodesConfigImpl, config, hosts]} diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index 1dc20a50..ede1263b 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -16,7 +16,7 @@ parameters: description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string KeyName: - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string default: default constraints: @@ -59,13 +59,29 @@ parameters: description: > Heat action when to apply network configuration changes default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] CloudDomain: default: '' type: string description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. - + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: CephStorage: @@ -80,6 +96,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -101,6 +120,16 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::CephStorage::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + + InternalApiPort: + type: OS::TripleO::CephStorage::Ports::InternalApiPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + StoragePort: type: OS::TripleO::CephStorage::Ports::StoragePort properties: @@ -111,26 +140,48 @@ resources: properties: ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + TenantPort: + type: OS::TripleO::CephStorage::Ports::TenantPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::CephStorage::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + NetworkConfig: type: OS::TripleO::CephStorage::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} + InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetIpSubnetMap: type: OS::TripleO::Network::Ports::NetIpSubnetMap properties: ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -230,12 +281,24 @@ outputs: description: Heat resource handle for the ceph storage server value: {get_resource: CephStorage} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} + internal_api_ip_address: + description: IP address of the server in the internal_api network + value: {get_attr: [InternalApiPort, ip_address]} storage_ip_address: description: IP address of the server in the storage network value: {get_attr: [StoragePort, ip_address]} storage_mgmt_ip_address: description: IP address of the server in the storage_mgmt network value: {get_attr: [StorageMgmtPort, ip_address]} + tenant_ip_address: + description: IP address of the server in the tenant network + value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying value: diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index f1d25e78..9fdd0123 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -17,7 +17,6 @@ parameters: description: The size of the loopback file used by the cinder LVM driver. type: number CinderPassword: - default: unset description: The password for the cinder service and db account, used by cinder-api. type: string hidden: true @@ -46,7 +45,7 @@ parameters: - custom_constraint: nova.flavor KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string RabbitPassword: default: 'guest' @@ -70,7 +69,6 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true @@ -112,12 +110,29 @@ parameters: description: > Heat action when to apply network configuration changes default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] CloudDomain: default: '' type: string description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -133,6 +148,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -154,6 +172,11 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::BlockStorage::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + InternalApiPort: type: OS::TripleO::BlockStorage::Ports::InternalApiPort properties: @@ -169,21 +192,37 @@ resources: properties: ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + TenantPort: + type: OS::TripleO::BlockStorage::Ports::TenantPort + properties: + ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::BlockStorage::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + NetworkConfig: type: OS::TripleO::BlockStorage::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -304,6 +343,9 @@ outputs: description: Heat resource handle for the block storage server value: {get_resource: BlockStorage} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} internal_api_ip_address: description: IP address of the server in the internal_api network value: {get_attr: [InternalApiPort, ip_address]} @@ -313,6 +355,12 @@ outputs: storage_mgmt_ip_address: description: IP address of the server in the storage_mgmt network value: {get_attr: [StorageMgmtPort, ip_address]} + tenant_ip_address: + description: IP address of the server in the tenant network + value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying value: diff --git a/puppet/compute.yaml b/puppet/compute.yaml index c33373d1..0d3e0754 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -1,11 +1,10 @@ -heat_template_version: 2015-04-30 +heat_template_version: 2015-10-15 description: > OpenStack hypervisor node configured via Puppet. parameters: AdminPassword: - default: unset description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true @@ -16,12 +15,10 @@ parameters: constraints: - allowed_values: ['', Present] CeilometerMeteringSecret: - default: unset description: Secret shared by the ceilometer services. type: string hidden: true CeilometerPassword: - default: unset description: The password for the ceilometer service account. type: string hidden: true @@ -61,7 +58,7 @@ parameters: description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string KeyName: - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string default: default constraints: @@ -80,7 +77,7 @@ parameters: to create provider networks (and we use this for the default floating network) - if changing this either use different post-install network scripts or be sure to keep 'datacentre' as a mapping network name. - type: string + type: comma_delimited_list default: "datacentre:br-ex" NeutronEnableTunnelling: type: string @@ -91,7 +88,7 @@ parameters: Enable/disable the L2 population feature in the Neutron agents. default: "False" NeutronFlatNetworks: - type: string + type: comma_delimited_list default: 'datacentre' description: > If set, flat networks to configure in neutron plugins. @@ -99,8 +96,8 @@ parameters: type: string default: '' # Has to be here because of the ignored empty value bug NeutronNetworkType: - type: string - description: The tenant network type for Neutron, either gre or vxlan. + type: comma_delimited_list + description: The tenant network type for Neutron. default: 'vxlan' NeutronNetworkVLANRanges: default: 'datacentre' @@ -110,7 +107,6 @@ parameters: VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). type: comma_delimited_list NeutronPassword: - default: unset description: The password for the neutron service account, used by neutron agents. type: string hidden: true @@ -123,10 +119,9 @@ parameters: description: A port to add to the NeutronPhysicalBridge. type: string NeutronTunnelTypes: - type: string + type: comma_delimited_list description: | - The tunnel types for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'gre,vxlan' + The tunnel types for the Neutron tenant network. default: 'vxlan' NeutronTunnelIdRanges: description: | @@ -147,7 +142,6 @@ parameters: default: 'False' type: string NeutronMetadataProxySharedSecret: - default: 'unset' description: Shared secret to prevent spoofing type: string hidden: true @@ -171,9 +165,8 @@ parameters: NeutronMechanismDrivers: default: 'openvswitch' description: | - The mechanism drivers for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'openvswitch,l2_population' - type: string + The mechanism drivers for the Neutron tenant network. + type: comma_delimited_list # Not relevant for Computes, should be removed NeutronAllowL3AgentFailover: default: 'True' @@ -212,7 +205,6 @@ parameters: description: Whether to enable or not the Rbd backend for Nova type: boolean NovaPassword: - default: unset description: The password for the nova service account, used by nova-api. type: string hidden: true @@ -258,7 +250,6 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true @@ -290,13 +281,29 @@ parameters: description: > Heat action when to apply network configuration changes default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] CloudDomain: default: '' type: string description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. - + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -314,6 +321,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -335,6 +345,11 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::Compute::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + InternalApiPort: type: OS::TripleO::Compute::Ports::InternalApiPort properties: @@ -345,26 +360,42 @@ resources: properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + StorageMgmtPort: + type: OS::TripleO::Compute::Ports::StorageMgmtPort + properties: + ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + TenantPort: type: OS::TripleO::Compute::Ports::TenantPort properties: ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + ManagementPort: + type: OS::TripleO::Compute::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} + StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetworkConfig: type: OS::TripleO::Compute::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -395,6 +426,7 @@ resources: - common - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre - nova_nuage_data # Optionally provided by ComputeExtraConfigPre + - midonet_data # Optionally provided by AllNodesExtraConfig datafiles: compute_extraconfig: mapped_data: {get_param: NovaComputeExtraConfig} @@ -441,16 +473,16 @@ resources: neutron::rabbit_user: {get_input: rabbit_username} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} neutron::rabbit_port: {get_input: rabbit_client_port} - neutron_flat_networks: {get_input: neutron_flat_networks} + neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} neutron_host: {get_input: neutron_host} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} - neutron_tenant_network_type: {get_input: neutron_tenant_network_type} - neutron_tunnel_types: {get_input: neutron_tunnel_types} + neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} + neutron::agents::ml2::ovs:tunnel_types: {get_input: neutron_tunnel_types} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron_bridge_mappings: {get_input: neutron_bridge_mappings} + neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} neutron_physical_bridge: {get_input: neutron_physical_bridge} @@ -464,8 +496,9 @@ resources: neutron::core_plugin: {get_input: neutron_core_plugin} neutron::service_plugins: {get_input: neutron_service_plugins} neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} - neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} + neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} + keystone_public_api_virtual_ip: {get_input: keystone_vip} admin_password: {get_input: admin_password} ntp::servers: {get_input: ntp_servers} tripleo::packages::enable_install: {get_input: enable_package_install} @@ -498,36 +531,43 @@ resources: snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} - neutron_flat_networks: {get_param: NeutronFlatNetworks} + neutron_flat_networks: + str_replace: + template: NETWORKS + params: + NETWORKS: {get_param: NeutronFlatNetworks} neutron_host: {get_param: NeutronHost} neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} - neutron_tenant_network_type: {get_param: NeutronNetworkType} - neutron_tunnel_types: {get_param: NeutronTunnelTypes} neutron_tunnel_id_ranges: str_replace: - template: "['RANGES']" + template: RANGES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronTunnelIdRanges} + RANGES: {get_param: NeutronTunnelIdRanges} neutron_vni_ranges: str_replace: - template: "['RANGES']" + template: RANGES + params: + RANGES: {get_param: NeutronVniRanges} + neutron_tenant_network_types: + str_replace: + template: TYPES + params: + TYPES: {get_param: NeutronNetworkType} + neutron_tunnel_types: + str_replace: + template: TYPES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronVniRanges} + TYPES: {get_param: NeutronTunnelTypes} neutron_network_vlan_ranges: str_replace: - template: "['RANGES']" + template: RANGES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronNetworkVLANRanges} - neutron_bridge_mappings: {get_param: NeutronBridgeMappings} + RANGES: {get_param: NeutronNetworkVLANRanges} + neutron_bridge_mappings: + str_replace: + template: MAPPINGS + params: + MAPPINGS: {get_param: NeutronBridgeMappings} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} neutron_physical_bridge: {get_param: NeutronPhysicalBridge} @@ -539,24 +579,23 @@ resources: neutron_core_plugin: {get_param: NeutronCorePlugin} neutron_service_plugins: str_replace: - template: "['PLUGINS']" + template: PLUGINS params: - PLUGINS: - list_join: - - "','" - - {get_param: NeutronServicePlugins} + PLUGINS: {get_param: NeutronServicePlugins} neutron_type_drivers: str_replace: - template: "['DRIVERS']" + template: DRIVERS + params: + DRIVERS: {get_param: NeutronTypeDrivers} + neutron_mechanism_drivers: + str_replace: + template: MECHANISMS params: - DRIVERS: - list_join: - - "','" - - {get_param: NeutronTypeDrivers} - neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} + MECHANISMS: {get_param: NeutronMechanismDrivers} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]} neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]} + keystone_vip: {get_param: KeystonePublicApiVirtualIP} admin_password: {get_param: AdminPassword} rabbit_username: {get_param: RabbitUserName} rabbit_password: {get_param: RabbitPassword} @@ -604,15 +643,24 @@ outputs: ip_address: description: IP address of the server in the ctlplane network value: {get_attr: [NovaCompute, networks, ctlplane, 0]} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} internal_api_ip_address: description: IP address of the server in the internal_api network value: {get_attr: [InternalApiPort, ip_address]} storage_ip_address: description: IP address of the server in the storage network value: {get_attr: [StoragePort, ip_address]} + storage_mgmt_ip_address: + description: IP address of the server in the storage_mgmt network + value: {get_attr: [StorageMgmtPort, ip_address]} tenant_ip_address: description: IP address of the server in the tenant network value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} hostname: description: Hostname of the server value: {get_attr: [NovaCompute, name]} diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index 941e1ac5..ed8129e7 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -17,6 +17,13 @@ parameters: resources: + ControllerPrePuppet: + type: OS::TripleO::Tasks::ControllerPrePuppet + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + ControllerPuppetConfig: type: OS::TripleO::ControllerConfig @@ -26,6 +33,7 @@ resources: # e.g all Deployment resources should have a *Deployment_StepN suffix ControllerLoadBalancerDeployment_Step1: type: OS::Heat::StructuredDeployments + depends_on: ControllerPrePuppet properties: servers: {get_param: servers} config: {get_resource: ControllerPuppetConfig} @@ -98,10 +106,18 @@ resources: step: 5 update_identifier: {get_param: NodeConfigIdentifiers} + ControllerPostPuppet: + type: OS::TripleO::Tasks::ControllerPostPuppet + depends_on: ControllerOvercloudServicesDeployment_Step6 + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: ControllerOvercloudServicesDeployment_Step5 + depends_on: ControllerPostPuppet type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index be472c7c..4ec62762 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2015-04-30 +heat_template_version: 2015-10-15 description: > OpenStack controller node configured by Puppet. @@ -10,12 +10,10 @@ parameters: type: string hidden: true AdminPassword: - default: unset description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true AdminToken: - default: unset description: The keystone auth secret and db password. type: string hidden: true @@ -27,18 +25,20 @@ parameters: description: The ceilometer backend type. type: string CeilometerMeteringSecret: - default: unset description: Secret shared by the ceilometer services. type: string hidden: true CeilometerPassword: - default: unset description: The password for the ceilometer service and db account. type: string hidden: true CinderApiVirtualIP: type: string default: '' + CeilometerWorkers: + default: 0 + description: Number of workers for Ceilometer service. + type: number CinderEnableNfsBackend: default: false description: Whether to enable or not the NFS backend for Cinder @@ -72,7 +72,6 @@ parameters: CinderEnableNfsBackend is true. type: comma_delimited_list CinderPassword: - default: unset description: The password for the cinder service and db account, used by cinder-api. type: string hidden: true @@ -81,6 +80,10 @@ parameters: description: Contains parameters to configure Cinder backends. Typically set via parameter_defaults in the resource registry. type: json + CinderWorkers: + default: 0 + description: Number of workers for Cinder service. + type: number CloudName: default: '' description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org @@ -90,6 +93,15 @@ parameters: description: | Controller specific hiera configuration data to inject into the cluster. type: json + ControllerIPs: + default: {} + description: > + A network mapped list of IPs to assign to Controllers in the following form: + { + "internal_api": ["a.b.c.d", "e.f.g.h"], + ... + } + type: json ControlVirtualInterface: default: 'br-ex' description: Interface where virtual ip will be assigned. @@ -170,7 +182,6 @@ parameters: type: string default: '' GlancePassword: - default: unset description: The password for the glance service and db account, used by the glance services. type: string hidden: true @@ -209,15 +220,17 @@ parameters: default: /dev/log description: Syslog address where HAproxy will send its log type: string + GlanceWorkers: + default: 0 + description: Number of workers for Glance service. + type: number HeatPassword: - default: unset description: The password for the Heat service and db account, used by the Heat services. type: string hidden: true HeatStackDomainAdminPassword: description: Password for heat_domain_admin user. type: string - default: '' hidden: true HeatAuthEncryptionKey: description: Auth encryption key for heat-engine @@ -227,6 +240,10 @@ parameters: default: '*' description: A list of IP/Hostname allowed to connect to horizon type: comma_delimited_list + HeatWorkers: + default: 0 + description: Number of workers for Heat service. + type: number HorizonSecret: description: Secret key for Django type: string @@ -240,9 +257,13 @@ parameters: default: 'REBUILD_PRESERVE_EPHEMERAL' description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. type: string + InstanceNameTemplate: + default: 'instance-%08x' + description: Template string to be used to generate instance names + type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string constraints: - custom_constraint: nova.keypair @@ -290,6 +311,10 @@ parameters: default: false description: Whether IPtables rules should be purged before setting up the new ones. type: boolean + KeystoneWorkers: + default: 0 + description: Number of workers for Keystone service. + type: number MysqlClusterUniquePart: description: A unique identifier of the MySQL cluster the controller is in. type: string @@ -324,7 +349,7 @@ parameters: to create provider networks (and we use this for the default floating network) - if changing this either use different post-install network scripts or be sure to keep 'datacentre' as a mapping network name. - type: string + type: comma_delimited_list default: "datacentre:br-ex" NeutronDnsmasqOptions: default: 'dhcp-option-force=26,1400' @@ -363,7 +388,6 @@ parameters: description: Whether to configure Neutron Distributed Virtual Routers type: string NeutronMetadataProxySharedSecret: - default: 'unset' description: Shared secret to prevent spoofing type: string hidden: true @@ -387,9 +411,8 @@ parameters: NeutronMechanismDrivers: default: 'openvswitch' description: | - The mechanism drivers for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'openvswitch,l2_population' - type: string + The mechanism drivers for the Neutron tenant network. + type: comma_delimited_list NeutronAllowL3AgentFailover: default: 'True' description: Allow automatic l3-agent failover @@ -407,7 +430,7 @@ parameters: Enable/disable the L2 population feature in the Neutron agents. default: "False" NeutronFlatNetworks: - type: string + type: comma_delimited_list default: 'datacentre' description: If set, flat networks to configure in neutron plugins. NeutronL3HA: @@ -416,8 +439,8 @@ parameters: type: string NeutronNetworkType: default: 'vxlan' - description: The tenant network type for Neutron, either gre or vxlan. - type: string + description: The tenant network type for Neutron. + type: comma_delimited_list NeutronNetworkVLANRanges: default: 'datacentre' description: > @@ -426,7 +449,6 @@ parameters: VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). type: comma_delimited_list NeutronPassword: - default: unset description: The password for the neutron service and db account, used by neutron agents. type: string hidden: true @@ -459,9 +481,8 @@ parameters: NeutronTunnelTypes: default: 'vxlan' description: | - The tunnel types for the Neutron tenant network. To specify multiple - values, use a comma separated string, like so: 'gre,vxlan' - type: string + The tunnel types for the Neutron tenant network. + type: comma_delimited_list NeutronTunnelIdRanges: description: | Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges @@ -477,11 +498,18 @@ parameters: NovaApiVirtualIP: type: string default: '' + NeutronWorkers: + default: 0 + description: Number of workers for Neutron service. + type: number NovaPassword: - default: unset description: The password for the nova service and db account, used by nova-api. type: string hidden: true + NovaWorkers: + default: 0 + description: Number of workers for Nova service. + type: number MongoDbNoJournal: default: false description: Should MongoDb journaling be disabled @@ -538,12 +566,10 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true SwiftHashSuffix: - default: unset description: A random string to be used as a salt when hashing to determine mappings in the ring. hidden: true @@ -561,7 +587,6 @@ parameters: description: Partition Power to use when building Swift rings type: number SwiftPassword: - default: unset description: The password for the swift service account, used by the swift proxy services. hidden: true @@ -573,6 +598,10 @@ parameters: type: number default: 3 description: How many replicas to use in the swift rings. + SwiftWorkers: + default: 0 + description: Number of workers for Swift service. + type: number VirtualIP: # DEPRECATED: use per service settings instead type: string default: '' # Has to be here because of the ignored empty value bug @@ -628,12 +657,29 @@ parameters: NodeIndex: type: number default: 0 + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] CloudDomain: default: '' type: string description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -649,6 +695,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -673,26 +722,41 @@ resources: ExternalPort: type: OS::TripleO::Controller::Ports::ExternalPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} InternalApiPort: type: OS::TripleO::Controller::Ports::InternalApiPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} StoragePort: type: OS::TripleO::Controller::Ports::StoragePort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} StorageMgmtPort: type: OS::TripleO::Controller::Ports::StorageMgmtPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} TenantPort: type: OS::TripleO::Controller::Ports::TenantPort properties: + IPPool: {get_param: ControllerIPs} + NodeIndex: {get_param: NodeIndex} + ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::Controller::Ports::ManagementPort + properties: ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]} NetIpMap: @@ -704,6 +768,7 @@ resources: StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetIpSubnetMap: type: OS::TripleO::Network::Ports::NetIpSubnetMap @@ -714,6 +779,7 @@ resources: StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkConfig: type: OS::TripleO::Controller::Net::SoftwareConfig @@ -724,6 +790,7 @@ resources: StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -758,6 +825,14 @@ resources: server: {get_resource: Controller} input_values: bootstack_nodeid: {get_attr: [Controller, name]} + ceilometer_workers: {get_param: CeilometerWorkers} + cinder_workers: {get_param: CinderWorkers} + glance_workers: {get_param: GlanceWorkers} + heat_workers: {get_param: HeatWorkers} + keystone_workers: {get_param: KeystoneWorkers} + nova_workers: {get_param: NovaWorkers} + neutron_workers: {get_param: NeutronWorkers} + swift_workers: {get_param: SwiftWorkers} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} @@ -793,12 +868,9 @@ resources: cinder_nfs_mount_options: {get_param: CinderNfsMountOptions} cinder_nfs_servers: str_replace: - template: "['SERVERS']" + template: SERVERS params: - SERVERS: - list_join: - - "','" - - {get_param: CinderNfsServers} + SERVERS: {get_param: CinderNfsServers} cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize} cinder_password: {get_param: CinderPassword} cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} @@ -874,67 +946,72 @@ resources: template: tripleo-CLUSTER params: CLUSTER: {get_param: MysqlClusterUniquePart} - neutron_flat_networks: {get_param: NeutronFlatNetworks} + neutron_flat_networks: + str_replace: + template: NETWORKS + params: + NETWORKS: {get_param: NeutronFlatNetworks} neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} neutron_agent_mode: {get_param: NeutronAgentMode} neutron_router_distributed: {get_param: NeutronDVR} neutron_core_plugin: {get_param: NeutronCorePlugin} neutron_service_plugins: str_replace: - template: "['PLUGINS']" + template: PLUGINS params: - PLUGINS: - list_join: - - "','" - - {get_param: NeutronServicePlugins} + PLUGINS: {get_param: NeutronServicePlugins} neutron_type_drivers: str_replace: - template: "['DRIVERS']" + template: DRIVERS params: - DRIVERS: - list_join: - - "','" - - {get_param: NeutronTypeDrivers} + DRIVERS: {get_param: NeutronTypeDrivers} neutron_enable_dhcp_agent: {get_param: NeutronEnableDHCPAgent} neutron_enable_l3_agent: {get_param: NeutronEnableL3Agent} neutron_enable_metadata_agent: {get_param: NeutronEnableMetadataAgent} neutron_enable_ovs_agent: {get_param: NeutronEnableOVSAgent} - neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} + neutron_mechanism_drivers: + str_replace: + template: MECHANISMS + params: + MECHANISMS: {get_param: NeutronMechanismDrivers} neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron_l3_ha: {get_param: NeutronL3HA} neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} neutron_network_vlan_ranges: str_replace: - template: "['RANGES']" + template: RANGES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronNetworkVLANRanges} - neutron_bridge_mappings: {get_param: NeutronBridgeMappings} + RANGES: {get_param: NeutronNetworkVLANRanges} + neutron_bridge_mappings: + str_replace: + template: MAPPINGS + params: + MAPPINGS: {get_param: NeutronBridgeMappings} neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge} neutron_public_interface: {get_param: NeutronPublicInterface} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute} neutron_public_interface_tag: {get_param: NeutronPublicInterfaceTag} - neutron_tenant_network_type: {get_param: NeutronNetworkType} - neutron_tunnel_types: {get_param: NeutronTunnelTypes} neutron_tunnel_id_ranges: str_replace: - template: "['RANGES']" + template: RANGES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronTunnelIdRanges} + RANGES: {get_param: NeutronTunnelIdRanges} neutron_vni_ranges: str_replace: - template: "['RANGES']" + template: RANGES + params: + RANGES: {get_param: NeutronVniRanges} + neutron_tenant_network_types: + str_replace: + template: TYPES params: - RANGES: - list_join: - - "','" - - {get_param: NeutronVniRanges} + TYPES: {get_param: NeutronNetworkType} + neutron_tunnel_types: + str_replace: + template: TYPES + params: + TYPES: {get_param: NeutronTunnelTypes} neutron_password: {get_param: NeutronPassword} neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} neutron_dsn: @@ -978,6 +1055,7 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/nova' + instance_name_template: {get_param: InstanceNameTemplate} fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} rabbit_username: {get_param: RabbitUserName} @@ -1062,6 +1140,7 @@ resources: - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre - neutron_nuage_data # Optionally provided by ControllerExtraConfigPre + - midonet_data #Optionally provided by AllNodesExtraConfig datafiles: controller_extraconfig: mapped_data: {get_param: ControllerExtraConfig} @@ -1097,6 +1176,7 @@ resources: swift::storage::all::storage_local_net_ip: {get_input: swift_management_network} swift::swift_hash_suffix: {get_input: swift_hash_suffix} swift::proxy::authtoken::admin_password: {get_input: swift_password} + swift::proxy::workers: {get_input: swift_workers} tripleo::ringbuilder::part_power: {get_input: swift_part_power} tripleo::ringbuilder::replicas: {get_input: swift_replicas} tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours} @@ -1137,6 +1217,7 @@ resources: glance::api::registry_host: {get_input: glance_registry_host} glance::api::keystone_password: {get_input: glance_password} glance::api::debug: {get_input: debug} + glance::api::workers: {get_input: glance_workers} glance_notifier_strategy: {get_input: glance_notifier_strategy} glance_log_file: {get_input: glance_log_file} glance_log_file: {get_input: glance_log_file} @@ -1148,6 +1229,7 @@ resources: glance::registry::identity_uri: {get_input: keystone_identity_uri} glance::registry::debug: {get_input: debug} glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_uri} + glance::registry::workers: {get_input: glance_workers} glance::backend::swift::swift_store_user: service:glance glance::backend::swift::swift_store_key: {get_input: glance_password} glance_backend: {get_input: glance_backend} @@ -1172,8 +1254,11 @@ resources: heat::identity_uri: {get_input: keystone_identity_uri} heat::keystone_password: {get_input: heat_password} heat::api::bind_host: {get_input: heat_api_network} + heat::api::workers: {get_input: heat_workers} heat::api_cloudwatch::bind_host: {get_input: heat_api_network} + heat::api_cloudwatch::workers: {get_input: heat_workers} heat::api_cfn::bind_host: {get_input: heat_api_network} + heat::api_cfn::workers: {get_input: heat_workers} heat::database_connection: {get_input: heat_dsn} heat::debug: {get_input: debug} heat::db::mysql::password: {get_input: heat_password} @@ -1202,6 +1287,9 @@ resources: keystone::endpoint::internal_url: {get_input: keystone_internal_url} keystone::endpoint::admin_url: {get_input: keystone_identity_uri} keystone::endpoint::region: {get_input: keystone_region} + keystone::admin_workers: {get_input: keystone_workers} + keystone::public_workers: {get_input: keystone_workers} + # MongoDB mongodb::server::bind_ip: {get_input: mongo_db_network} mongodb::server::nojournal: {get_input: mongodb_no_journal} @@ -1227,14 +1315,16 @@ resources: neutron::server::auth_uri: {get_input: keystone_auth_uri} neutron::server::identity_uri: {get_input: keystone_identity_uri} neutron::server::database_connection: {get_input: neutron_dsn} + neutron::server::api_workers: {get_input: neutron_workers} neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} - neutron_flat_networks: {get_input: neutron_flat_networks} + neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret} neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network} + neutron::agents::metadata::metadata_workers: {get_input: neutron_workers} neutron_agent_mode: {get_input: neutron_agent_mode} neutron_router_distributed: {get_input: neutron_router_distributed} neutron::core_plugin: {get_input: neutron_core_plugin} @@ -1244,20 +1334,20 @@ resources: neutron::enable_metadata_agent: {get_input: neutron_enable_metadata_agent} neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent} neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} - neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} + neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} neutron::server::l3_ha: {get_input: neutron_l3_ha} neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron_bridge_mappings: {get_input: neutron_bridge_mappings} + neutron::agents::ml2::ovs:bridge_mappings: {get_input: neutron_bridge_mappings} neutron_public_interface: {get_input: neutron_public_interface} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route} neutron_public_interface_tag: {get_input: neutron_public_interface_tag} - neutron_tenant_network_type: {get_input: neutron_tenant_network_type} - neutron_tunnel_types: {get_input: neutron_tunnel_types} + neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} + neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} neutron::server::auth_password: {get_input: neutron_password} neutron::agents::metadata::auth_password: {get_input: neutron_password} neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options} @@ -1305,9 +1395,13 @@ resources: nova::api::api_bind_address: {get_input: nova_api_network} nova::api::metadata_listen: {get_input: nova_metadata_network} nova::api::admin_password: {get_input: nova_password} + nova::api::osapi_compute_workers: {get_input: nova_workers} + nova::api::ec2_workers: {get_input: nova_workers} + nova::api::metadata_workers: {get_input: nova_workers} nova::database_connection: {get_input: nova_dsn} nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} + nova::api::instance_name_template: {get_input: instance_name_template} nova::network::neutron::neutron_admin_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} @@ -1393,6 +1487,9 @@ outputs: tenant_ip_address: description: IP address of the server in the tenant network value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} hostname: description: Hostname of the server value: {get_attr: [Controller, name]} diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml new file mode 100644 index 00000000..26ce7138 --- /dev/null +++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml @@ -0,0 +1,119 @@ +heat_template_version: 2015-10-15 + +description: Configure hieradata for all MidoNet nodes + +parameters: + # Parameters passed from the parent template + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json + + EnableZookeeperOnController: + label: Enable Zookeeper On Controller + description: 'Whether enable Zookeeper cluster on Controller' + type: boolean + default: false + EnableCassandraOnController: + label: Enable Cassandra On Controller + description: 'Whether enable Cassandra cluster on Controller' + type: boolean + default: false + CassandraStoragePort: + label: Cassandra Storage Port + description: 'The Cassandra port for inter-node communication' + type: string + default: '7000' + CassandraSslStoragePort: + label: Cassandra SSL Storage Port + description: 'The SSL port for encrypted communication. Unused unless enabled in encryption_options' + type: string + default: '7001' + CassandraClientPort: + label: Cassandra Client Port + description: 'Native Transport Port' + type: string + default: '9042' + CassandraClientPortThrift: + label: Cassandra Client Thrift Port + description: 'The port for the Thrift RPC service, which is used for client connections' + type: string + default: '9160' + TunnelZoneName: + label: Name of the Tunnelzone + description: 'Name of the tunnel zone used to tunnel packages' + type: string + default: 'tunnelzone_tripleo' + TunnelZoneType: + label: Type of the Tunnel + description: 'Type of the tunnels on the overlay. Choose between `gre` and `vxlan`' + type: string + default: 'vxlan' + +resources: + + NetworkMidoNetConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + midonet_data: + mapped_data: + enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController} + enable_cassandra_on_controller: {get_param: EnableCassandraOnController} + midonet_tunnelzone_name: {get_param: TunnelZoneName} + midonet_tunnelzone_type: {get_param: TunnelZoneType} + midonet_libvirt_qemu_data: | + user = "root" + group = "root" + cgroup_device_acl = [ + "/dev/null", "/dev/full", "/dev/zero", + "/dev/random", "/dev/urandom", + "/dev/ptmx", "/dev/kvm", "/dev/kqemu", + "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", + "/dev/net/tun" + ] + tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort} + tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} + tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} + tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} + tripleo::loadbalancer::midonet_api: true + # Missed Neutron Puppet data + neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' + neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' + neutron::plugins::midonet::midonet_api_port: 8081 + neutron::params::midonet_server_package: 'python-networking-midonet' + + # Make sure the l3 agent does not run + l3_agent_service: false + neutron::agents::l3::manage_service: false + neutron::agents::l3::enabled: false + + + NetworkMidonetDeploymentControllers: + type: OS::Heat::StructuredDeploymentGroup + properties: + config: {get_resource: NetworkMidoNetConfig} + servers: {get_param: controller_servers} + + NetworkMidonetDeploymentComputes: + type: OS::Heat::StructuredDeploymentGroup + properties: + config: {get_resource: NetworkMidoNetConfig} + servers: {get_param: compute_servers} + +outputs: + config_identifier: + value: + list_join: + - ' ' + - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]} + - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml index 60f02bf8..8378d2fc 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml @@ -43,6 +43,11 @@ parameters: description: Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD type: string + UseForwardedFor: + description: Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. + type: boolean + default: false + resources: NeutronNuageConfig: type: OS::Heat::StructuredConfig @@ -61,6 +66,7 @@ resources: neutron::plugins::nuage::nuage_vsd_organization: {get_input: NuageVSDOrganization} neutron::plugins::nuage::nuage_base_uri_version: {get_input: NuageBaseURIVersion} neutron::plugins::nuage::nuage_cms_id: {get_input: NuageCMSId} + nova::api::use_forwarded_for: {get_input: NovaUseForwardedFor} NeutronNuageDeployment: type: OS::Heat::StructuredDeployment @@ -76,6 +82,7 @@ resources: NuageVSDOrganization: {get_param: NeutronNuageVSDOrganization} NuageBaseURIVersion: {get_param: NeutronNuageBaseURIVersion} NuageCMSId: {get_param: NeutronNuageCMSId} + NovaUseForwardedFor: {get_param: UseForwardedFor} outputs: deploy_stdout: diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index 95f5ccb8..b4b51abf 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -7,6 +7,7 @@ ceilometer::agent::auth::auth_region: 'regionOne' # changes in the tripleo-incubator keystone role setup ceilometer::agent::auth::auth_tenant_name: 'admin' +nova::api::admin_tenant_name: 'service' nova::network::neutron::neutron_admin_tenant_name: 'service' nova::network::neutron::neutron_admin_username: 'neutron' nova::network::neutron::dhcp_domain: '' diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index f42ddf6c..b0e6ae96 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -30,7 +30,6 @@ redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}" redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh' # service tenant -nova::api::admin_tenant_name: 'service' glance::api::keystone_tenant: 'service' glance::registry::keystone_tenant: 'service' neutron::server::auth_tenant: 'service' diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index f3a02eba..79a6abbb 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -68,11 +68,19 @@ if hiera('cinder_enable_nfs_backend', false) { } include ::nova::compute::libvirt +if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + file {'/etc/libvirt/qemu.conf': + ensure => present, + content => hiera('midonet_libvirt_qemu_data') + } +} include ::nova::network::neutron include ::neutron # If the value of core plugin is set to 'nuage', # include nuage agent, +# If the value of core plugin is set to 'midonet', +# include midonet agent, # else use the default value of 'ml2' if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { include ::nuage::vrs @@ -84,18 +92,24 @@ if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { nova_metadata_ip => hiera('nova_metadata_node_ips'), nova_auth_ip => hiera('keystone_public_api_virtual_ip'), } -} else { - class { '::neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), - tenant_network_types => [hiera('neutron_tenant_network_type')], - } +} +elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - class { '::neutron::agents::ml2::ovs': - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') + + class {'::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips } +} +else { + + include ::neutron::plugins::ml2 + include ::neutron::agents::ml2::ovs - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { class { '::neutron::agents::n1kv_vem': n1kv_source => hiera('n1kv_vem_source', undef), n1kv_version => hiera('n1kv_vem_version', undef), diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index a8abbb77..a84a534b 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -230,13 +230,61 @@ if hiera('step') >= 3 { include ::nova::scheduler include ::nova::scheduler::filter - include ::neutron + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') + + # Run zookeeper in the controller if configured + if hiera('enable_zookeeper_on_controller') { + class {'::tripleo::cluster::zookeeper': + zookeeper_server_ips => $zookeeper_node_ips, + zookeeper_client_ip => $ipaddress, + zookeeper_hostnames => hiera('controller_node_names') + } + } + + # Run cassandra in the controller if configured + if hiera('enable_cassandra_on_controller') { + class {'::tripleo::cluster::cassandra': + cassandra_servers => $cassandra_node_ips, + cassandra_ip => $ipaddress + } + } + + class {'::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips + } + + class {'::tripleo::network::midonet::api': + zookeeper_servers => $zookeeper_node_ips, + vip => $ipaddress, + keystone_ip => $ipaddress, + keystone_admin_token => hiera('keystone::admin_token'), + bind_address => $ipaddress, + admin_password => hiera('admin_password') + } + + # TODO: find a way to get an empty list from hiera + class {'::neutron': + service_plugins => [] + } + + } + else { + + # ML2 plugin + include ::neutron + } + include ::neutron::server include ::neutron::server::notifications # If the value of core plugin is set to 'nuage', - # include nuage core plugin, - # else use the default value of 'ml2' + # include nuage core plugin, and it does not + # need the l3, dhcp and metadata agents if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { include ::neutron::plugins::nuage } else { @@ -252,51 +300,57 @@ if hiera('step') >= 3 { require => Package['neutron'], } - class { '::neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), - tenant_network_types => [hiera('neutron_tenant_network_type')], - mechanism_drivers => [hiera('neutron_mechanism_drivers')], - } - class { '::neutron::agents::ml2::ovs': - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), - } - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus1000v + # If the value of core plugin is set to 'midonet', + # skip all the ML2 configuration + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), + class {'::neutron::plugins::midonet': + midonet_api_ip => $ipaddress, + keystone_tenant => hiera('neutron::server::auth_tenant'), + keystone_password => hiera('neutron::server::auth_password') } + } else { + + include ::neutron::plugins::ml2 + include ::neutron::agents::ml2::ovs + + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus1000v - class { '::n1k_vsm': - n1kv_source => hiera('n1kv_vsm_source', undef), - n1kv_version => hiera('n1kv_vsm_version', undef), - pacemaker_control => false, + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), + } + + class { '::n1k_vsm': + n1kv_source => hiera('n1kv_vsm_source', undef), + n1kv_version => hiera('n1kv_vsm_version', undef), + pacemaker_control => false, + } } - } - if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::ucsm - } - if 'cisco_nexus' in hiera('neutron_mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::nexus - include ::neutron::plugins::ml2::cisco::type_nexus_vxlan - } + if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::ucsm + } + if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus + include ::neutron::plugins::ml2::cisco::type_nexus_vxlan + } - if hiera('neutron_enable_bigswitch_ml2', false) { - include ::neutron::plugins::ml2::bigswitch::restproxy - } - neutron_l3_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); - } - neutron_dhcp_agent_config { - 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + if hiera('neutron_enable_bigswitch_ml2', false) { + include ::neutron::plugins::ml2::bigswitch::restproxy + } + neutron_l3_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + } + neutron_dhcp_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + } + Service['neutron-server'] -> Service['neutron-ovs-agent-service'] } Service['neutron-server'] -> Service['neutron-dhcp-service'] Service['neutron-server'] -> Service['neutron-l3'] - Service['neutron-server'] -> Service['neutron-ovs-agent-service'] Service['neutron-server'] -> Service['neutron-metadata'] } @@ -450,8 +504,6 @@ if hiera('step') >= 3 { include ::ceilometer::api include ::ceilometer::agent::notification include ::ceilometer::agent::central - include ::ceilometer::alarm::notifier - include ::ceilometer::alarm::evaluator include ::ceilometer::expirer include ::ceilometer::collector include ::ceilometer::agent::auth @@ -469,7 +521,7 @@ if hiera('step') >= 3 { include ::heat::engine # Horizon - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { $_profile_support = 'cisco' } else { $_profile_support = 'None' diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 8459f1a3..d0d46e2d 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -592,8 +592,54 @@ if hiera('step') >= 3 { } include ::nova::network::neutron - # Neutron class definitions - include ::neutron + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + + # TODO(devvesa) provide non-controller ips for these services + $zookeeper_node_ips = hiera('neutron_api_node_ips') + $cassandra_node_ips = hiera('neutron_api_node_ips') + + # Run zookeeper in the controller if configured + if hiera('enable_zookeeper_on_controller') { + class {'::tripleo::cluster::zookeeper': + zookeeper_server_ips => $zookeeper_node_ips, + zookeeper_client_ip => $ipaddress, + zookeeper_hostnames => hiera('controller_node_names') + } + } + + # Run cassandra in the controller if configured + if hiera('enable_cassandra_on_controller') { + class {'::tripleo::cluster::cassandra': + cassandra_servers => $cassandra_node_ips, + cassandra_ip => $ipaddress + } + } + + class {'::tripleo::network::midonet::agent': + zookeeper_servers => $zookeeper_node_ips, + cassandra_seeds => $cassandra_node_ips + } + + class {'::tripleo::network::midonet::api': + zookeeper_servers => hiera('neutron_api_node_ips'), + vip => $public_vip, + keystone_ip => $public_vip, + keystone_admin_token => hiera('keystone::admin_token'), + bind_address => $ipaddress, + admin_password => hiera('admin_password') + } + + # Configure Neutron + class {'::neutron': + service_plugins => [] + } + + } + else { + # Neutron class definitions + include ::neutron + } + class { '::neutron::server' : sync_db => $sync_db, manage_service => false, @@ -603,6 +649,13 @@ if hiera('step') >= 3 { if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { include ::neutron::plugins::nuage } + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + class {'::neutron::plugins::midonet': + midonet_api_ip => $public_vip, + keystone_tenant => hiera('neutron::server::auth_tenant'), + keystone_password => hiera('neutron::server::auth_password') + } + } if hiera('neutron::enable_dhcp_agent',true) { class { '::neutron::agents::dhcp' : manage_service => false, @@ -628,27 +681,20 @@ if hiera('step') >= 3 { enabled => false, } } - if hiera('neutron::core_plugin') == 'ml2' { - class { '::neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), - tenant_network_types => [hiera('neutron_tenant_network_type')], - mechanism_drivers => [hiera('neutron_mechanism_drivers')], - } - class { '::neutron::agents::ml2::ovs': - manage_service => false, - enabled => false, - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), - } + include ::neutron::plugins::ml2 + class { '::neutron::agents::ml2::ovs': + manage_service => false, + enabled => false, } - if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { + + if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::cisco::ucsm } - if 'cisco_nexus' in hiera('neutron_mechanism_drivers') { + if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::cisco::nexus include ::neutron::plugins::ml2::cisco::type_nexus_vxlan } - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { include ::neutron::plugins::ml2::cisco::nexus1000v class { '::neutron::agents::n1kv_vem': @@ -855,14 +901,6 @@ if hiera('step') >= 3 { manage_service => false, enabled => false, } - class { '::ceilometer::alarm::notifier' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::alarm::evaluator' : - manage_service => false, - enabled => false, - } class { '::ceilometer::collector' : manage_service => false, enabled => false, @@ -904,7 +942,7 @@ if hiera('step') >= 3 { # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? } include ::apache::mod::status - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { $_profile_support = 'cisco' } else { $_profile_support = 'None' @@ -1068,15 +1106,32 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::glance::params::api_service_name]], } - # Neutron - # NOTE(gfidente): Neutron will try to populate the database with some data - # as soon as neutron-server is started; to avoid races we want to make this - # happen only on one node, before normal Pacemaker initialization - # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 - exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } -> - pacemaker::resource::service { $::neutron::params::server_service: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + if hiera('step') == 4 { + # Neutron + # NOTE(gfidente): Neutron will try to populate the database with some data + # as soon as neutron-server is started; to avoid races we want to make this + # happen only on one node, before normal Pacemaker initialization + # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 + # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec + # will try to start the service while it's already started by Pacemaker + # It would result to a deployment failure since systemd would return 1 to Puppet + # and the overcloud would fail to deploy (6 would be returned). + # This conditional prevents from a race condition during the deployment. + # https://bugzilla.redhat.com/show_bug.cgi?id=1290582 + exec { 'neutron-server-systemd-start-sleep' : + command => 'systemctl start neutron-server && /usr/bin/sleep 5', + path => '/usr/bin', + unless => '/sbin/pcs resource show neutron-server', + } -> + pacemaker::resource::service { $::neutron::params::server_service: + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name] + } + } else { + pacemaker::resource::service { $::neutron::params::server_service: + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name] + } } if hiera('neutron::enable_l3_agent', true) { pacemaker::resource::service { $::neutron::params::l3_agent_service: @@ -1093,6 +1148,11 @@ if hiera('step') >= 4 { clone_params => 'interleave=true', } } + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + pacemaker::resource::service {'tomcat': + clone_params => 'interleave=true', + } + } if hiera('neutron::enable_metadata_agent', true) { pacemaker::resource::service { $::neutron::params::metadata_agent_service: clone_params => 'interleave=true', @@ -1143,7 +1203,6 @@ if hiera('step') >= 4 { } } - #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': constraint_type => 'order', first_resource => "${::keystone::params::service_name}-clone", @@ -1219,6 +1278,43 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]] } } + if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { + #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat + pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::server_service}-clone", + second_resource => "${::neutron::params::dhcp_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::server_service], + Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], + } + pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::dhcp_agent_service}-clone", + second_resource => "${::neutron::params::metadata_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], + } + pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::metadata_agent_service}-clone", + second_resource => 'tomcat-clone', + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service], + Pacemaker::Resource::Service['tomcat']], + } + pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation': + source => "${::neutron::params::metadata_agent_service}-clone", + target => "${::neutron::params::dhcp_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], + } + } # Nova pacemaker::resource::service { $::nova::params::api_service_name : @@ -1339,12 +1435,6 @@ if hiera('step') >= 4 { pacemaker::resource::service { $::ceilometer::params::api_service_name : clone_params => 'interleave=true', } - pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name : - clone_params => 'interleave=true', - } pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : clone_params => 'interleave=true', } @@ -1419,54 +1509,6 @@ if hiera('step') >= 4 { require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], Pacemaker::Resource::Ocf['delay']], } - pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint': - constraint_type => 'order', - first_resource => 'delay-clone', - second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation': - source => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - target => 'delay-clone', - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Ocf['delay']], - } - pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation': - source => "${::ceilometer::params::alarm_notifier_service_name}-clone", - target => "${::ceilometer::params::alarm_evaluator_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone", - second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation': - source => "${::ceilometer::params::agent_notification_service_name}-clone", - target => "${::ceilometer::params::alarm_notifier_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]], - } if downcase(hiera('ceilometer_backend')) == 'mongodb' { pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': constraint_type => 'order', @@ -1565,7 +1607,7 @@ if hiera('step') >= 4 { } #VSM - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { pacemaker::resource::ocf { 'vsm-p' : ocf_agent_name => 'heartbeat:VirtualDomain', resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml', diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp index 4296208b..2d880d33 100644 --- a/puppet/manifests/ringbuilder.pp +++ b/puppet/manifests/ringbuilder.pp @@ -70,7 +70,7 @@ class tripleo::ringbuilder ( # create local rings swift::ringbuilder::create{ ['object', 'account', 'container']: part_power => $part_power, - replicas => $replicas, + replicas => min(count($device_array), $replicas), min_part_hours => $min_part_hours, } -> diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index fbb2b878..b60664a1 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -7,7 +7,6 @@ parameters: constraints: - custom_constraint: nova.flavor HashSuffix: - default: unset description: A random string to be used as a salt when hashing to determine mappings in the ring. hidden: true @@ -17,7 +16,7 @@ parameters: type: string KeyName: default: default - description: Name of an existing EC2 KeyPair to enable SSH access to the instances + description: Name of an existing Nova key pair to enable SSH access to the instances type: string MountCheck: default: 'false' @@ -40,7 +39,6 @@ parameters: description: The user name for SNMPd with readonly rights running on all Overcloud nodes type: string SnmpdReadonlyUserPassword: - default: unset description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true @@ -82,13 +80,29 @@ parameters: description: > Heat action when to apply network configuration changes default: ['CREATE'] + SoftwareConfigTransport: + default: POLL_SERVER_CFN + description: | + How the server should receive the metadata required for software configuration. + type: string + constraints: + - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE] CloudDomain: default: '' type: string description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. - + ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. + type: json + SchedulerHints: + type: json + description: Optional scheduler hints to pass to nova + default: {} resources: @@ -103,6 +117,9 @@ resources: user_data_format: SOFTWARE_CONFIG user_data: {get_resource: UserData} name: {get_param: Hostname} + software_config_transport: {get_param: SoftwareConfigTransport} + metadata: {get_param: ServerMetadata} + scheduler_hints: {get_param: SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives UserData: @@ -124,6 +141,11 @@ resources: NodeUserData: type: OS::TripleO::NodeUserData + ExternalPort: + type: OS::TripleO::SwiftStorage::Ports::ExternalPort + properties: + ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + InternalApiPort: type: OS::TripleO::SwiftStorage::Ports::InternalApiPort properties: @@ -139,21 +161,37 @@ resources: properties: ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + TenantPort: + type: OS::TripleO::SwiftStorage::Ports::TenantPort + properties: + ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + + ManagementPort: + type: OS::TripleO::SwiftStorage::Ports::ManagementPort + properties: + ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + NetworkConfig: type: OS::TripleO::ObjectStorage::Net::SoftwareConfig properties: ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} + ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]} NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + ExternalIp: {get_attr: [ExternalPort, ip_address]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + TenantIp: {get_attr: [TenantPort, ip_address]} + ManagementIp: {get_attr: [ManagementPort, ip_address]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -272,6 +310,9 @@ outputs: template: 'r1z1-IP:%PORT%/d1' params: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} + external_ip_address: + description: IP address of the server in the external network + value: {get_attr: [ExternalPort, ip_address]} internal_api_ip_address: description: IP address of the server in the internal_api network value: {get_attr: [InternalApiPort, ip_address]} @@ -281,6 +322,12 @@ outputs: storage_mgmt_ip_address: description: IP address of the server in the storage_mgmt network value: {get_attr: [StorageMgmtPort, ip_address]} + tenant_ip_address: + description: IP address of the server in the tenant network + value: {get_attr: [TenantPort, ip_address]} + management_ip_address: + description: IP address of the server in the management network + value: {get_attr: [ManagementPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying value: |