summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--capabilities_map.yaml226
-rw-r--r--environments/puppet-pacemaker.yaml2
-rw-r--r--extraconfig/tasks/noop.yaml10
-rwxr-xr-xextraconfig/tasks/pacemaker_resource_restart.sh63
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker.yaml44
-rw-r--r--extraconfig/tasks/pre_puppet_pacemaker.yaml30
-rw-r--r--firstboot/userdata_heat_admin.yaml2
-rw-r--r--network/external.yaml2
-rw-r--r--network/internal_api.yaml2
-rw-r--r--network/ports/ctlplane_vip.yaml2
-rw-r--r--network/ports/external.yaml2
-rw-r--r--network/ports/internal_api.yaml2
-rw-r--r--network/ports/noop.yaml2
-rw-r--r--network/ports/storage.yaml2
-rw-r--r--network/ports/storage_mgmt.yaml2
-rw-r--r--network/ports/tenant.yaml2
-rw-r--r--network/ports/vip.yaml2
-rw-r--r--network/storage.yaml2
-rw-r--r--network/storage_mgmt.yaml2
-rw-r--r--network/tenant.yaml2
-rw-r--r--overcloud-resource-registry-puppet.yaml4
-rw-r--r--overcloud-resource-registry.yaml81
-rw-r--r--overcloud.yaml56
-rw-r--r--puppet/all-nodes-config.yaml20
-rw-r--r--puppet/ceph-storage.yaml8
-rw-r--r--puppet/cinder-storage.yaml7
-rw-r--r--puppet/compute.yaml103
-rw-r--r--puppet/controller-post.yaml18
-rw-r--r--puppet/controller.yaml163
-rw-r--r--puppet/manifests/overcloud_compute.pp13
-rw-r--r--puppet/manifests/overcloud_controller.pp20
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp27
-rw-r--r--puppet/manifests/ringbuilder.pp2
-rw-r--r--puppet/swift-storage.yaml8
34 files changed, 679 insertions, 254 deletions
diff --git a/capabilities_map.yaml b/capabilities_map.yaml
new file mode 100644
index 00000000..30ee211e
--- /dev/null
+++ b/capabilities_map.yaml
@@ -0,0 +1,226 @@
+# This file holds metadata about the capabilities of the tripleo-heat-templates
+# repository for deployment using puppet. It groups configuration by topic,
+# describes possible combinations of environments and resource capabilities.
+
+# root_template: identifies repository's root template
+# root_environment: identifies root_environment, this one is special in terms of
+# order in which the environments are merged before deploying. This one serves as
+# a base and it's parameters/resource_registry gets overriden by other environments
+# if used.
+
+# topics:
+# High Level grouping by purpose of environments
+# Attributes:
+# title: (required)
+# description: (optional)
+# environment_groups: (required)
+
+# environment_groups:
+# Identifies an environment choice. If group includes multiple environments it
+# indicates that environments in group are mutually exclusive.
+# Attributes:
+# title: (optional)
+# description: (optional)
+# tags: a list of tags to provide aditional information for e.g. filtering (optional)
+# environments: (required)
+
+# environments:
+# List of environments in environment group
+# Attributes:
+# file: a file name including path within repository (required)
+# title: (required)
+# description: (optional)
+# requires: an array of environments which are required by this environment (optional)
+# resource_registry: [tbd] (optional)
+
+# resource_registry:
+# [tbd] Each environment can provide options on resource_registry level applicable
+# only when that given environment is used. (resource_type of that environment can
+# be implemented using multiple templates).
+
+root_template: overcloud.yaml
+root_environment: overcloud-resource-registry-puppet.yaml
+topics:
+ - title: Basic Configuration
+ description:
+ environment_groups:
+ - title:
+ description: Enable basic configuration required for OpenStack Deployment
+ environments:
+ - file: overcloud-resource-registry-puppet.yaml
+ title: Default Configuration
+ description:
+
+ - title: Deployment options
+ description:
+ environment_groups:
+ - title: High Availability
+ description: Enables configuration of an Overcloud controller with Pacemaker
+ environments:
+ - file: environments/puppet-pacemaker.yaml
+ title: Pacemaker
+ description: Enable configuration of an Overcloud controller with Pacemaker
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Docker RDO
+ description: >
+ Docker container with heat agents for containerized compute node
+ environments:
+ - file: environments/docker-rdo.yaml
+ title: Docker RDO
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+
+ # - title: Network Interface Configuration
+ # description:
+ # environment_groups:
+
+ - title: Overlay network Configuration
+ description:
+ environment_groups:
+ - title: Network Isolation
+ description: >
+ Enable the creation of Neutron networks for
+ isolated Overcloud traffic and configure each role to assign ports
+ (related to that role) on these networks.
+ environments:
+ - file: environments/network-isolation.yaml
+ title: Network Isolation
+ description: Enable Network Isolation
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Single nic or Bonding
+ description: >
+ Configure roles to use pair of bonded nics or to use Vlans on a
+ single nic. This option assumes use of Network Isolation.
+ environments:
+ - file: environments/net-bond-with-vlans.yaml
+ title: Bond with Vlans
+ description: >
+ Configure each role to use a pair of bonded nics (nic2 and
+ nic3) and configures an IP address on each relevant isolated network
+ for each role. This option assumes use of Network Isolation.
+ requires:
+ - environments/network-isolation.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/net-single-nic-with-vlans.yaml
+ title: Single nic with Vlans
+ description: >
+ Configure each role to use Vlans on a single nic for
+ each isolated network. This option assumes use of Network Isolation.
+ requires:
+ - environments/network-isolation.yaml
+ - overcloud-resource-registry-puppet.yaml
+
+ - title: Neutron Plugin Configuration
+ description:
+ environment_groups:
+ - title: BigSwitch extensions or Cisco N1KV backend
+ description:
+ environments:
+ - file: environments/neutron-ml2-bigswitch.yaml
+ title: BigSwitch extensions
+ description: >
+ Enable Big Switch extensions, configured via puppet
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-ml2-cisco-n1kv.yaml
+ title: Cisco N1KV backend
+ description: >
+ Enable a Cisco N1KV backend, configured via puppet
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Cisco Neutron plugin
+ description: >
+ Enable a Cisco Neutron plugin
+ environments:
+ - file: environments/neutron-ml2-cisco-nexus-ucsm.yaml
+ title: Cisco Neutron plugin
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+
+ - title: Storage
+ description:
+ environment_groups:
+ - title: Cinder NetApp backend
+ description: >
+ Enable a Cinder NetApp backend, configured via puppet
+ environments:
+ - file: environments/cinder-netapp-config.yaml
+ title: Cinder NetApp backend
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Externally managed Ceph
+ description: >
+ Enable the use of an externally managed Ceph cluster
+ environments:
+ - file: environments/puppet-ceph-external.yaml
+ title: Externally managed Ceph
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Ceph Devel
+ description: >
+ Enable a Ceph storage cluster using the controller and 2 ceph nodes.
+ Rbd backends are enabled for Cinder, Glance, and Nova.
+ environments:
+ - file: environments/puppet-ceph-devel.yaml
+ title: Ceph Devel
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Storage Environment
+ description: >
+ Can be used to set up storage backends. Defaults to Ceph used as a
+ backend for Cinder, Glance and Nova ephemeral storage. It configures
+ for example which services will use Ceph, or if any of the services
+ will use NFS. And more. Usually requires to be edited by user first.
+ tags:
+ - no-gui
+ environments:
+ - file: environments/storage-environment.yaml
+ title: Storage Environment
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+
+ - title: Utilities
+ description:
+ environment_groups:
+ - title: Config Debug
+ description: Enable config management (e.g. Puppet) debugging
+ environments:
+ - file: environments/config-debug.yaml
+ title: Config Debug
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Disable journal in MongoDb
+ description: >
+ Since, when journaling is enabled, MongoDb will create big journal
+ file it can take time. In a CI environment for example journaling is
+ not necessary.
+ environments:
+ - file: environments/mongodb-nojournal.yaml
+ title: Disable journal in MongoDb
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Overcloud Steps
+ description: >
+ Specifies hooks/breakpoints where overcloud deployment should stop
+ Allows operator validation between steps, and/or more granular control.
+ Note: the wildcards relate to naming convention for some resource suffixes,
+ e.g see puppet/*-post.yaml, enabling this will mean we wait for
+ a user signal on every *Deployment_StepN resource defined in those files.
+ tags:
+ - no-gui
+ environments:
+ - file: environments/overcloud-steps.yaml
+ title: Overcloud Steps
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml
index f235cf8f..8986e35f 100644
--- a/environments/puppet-pacemaker.yaml
+++ b/environments/puppet-pacemaker.yaml
@@ -2,3 +2,5 @@
# Overcloud controller with Pacemaker.
resource_registry:
OS::TripleO::ControllerConfig: ../puppet/controller-config-pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
diff --git a/extraconfig/tasks/noop.yaml b/extraconfig/tasks/noop.yaml
new file mode 100644
index 00000000..0cff7469
--- /dev/null
+++ b/extraconfig/tasks/noop.yaml
@@ -0,0 +1,10 @@
+heat_template_version: 2014-10-16
+description: 'No-op task'
+
+parameters:
+ servers:
+ type: json
+ input_values:
+ type: json
+ default: {}
+ description: input values for the software deployments
diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh
new file mode 100755
index 00000000..ad3c3701
--- /dev/null
+++ b/extraconfig/tasks/pacemaker_resource_restart.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+set -eux
+
+pacemaker_status=$(systemctl is-active pacemaker)
+check_interval=3
+
+function check_resource {
+
+ service=$1
+ state=$2
+ timeout=$3
+ tstart=$(date +%s)
+ tend=$(( $tstart + $timeout ))
+
+ if [ "$state" = "stopped" ]; then
+ match_for_incomplete='Started'
+ else # started
+ match_for_incomplete='Stopped'
+ fi
+
+ while (( $(date +%s) < $tend )); do
+ node_states=$(pcs status --full | grep "$service" | grep -v Clone)
+ if echo "$node_states" | grep -q "$match_for_incomplete"; then
+ echo "$service not yet $state, sleeping $check_interval seconds."
+ sleep $check_interval
+ else
+ echo "$service has $state"
+ return
+ fi
+ done
+
+ echo "$service never $state after $timeout seconds" | tee /dev/fd/2
+ exit 1
+
+}
+
+# Run if pacemaker is running, we're the bootstrap node,
+# and we're updating the deployment (not creating).
+if [ "$pacemaker_status" = "active" -a \
+ "$(hiera bootstrap_nodeid)" = "$(facter hostname)" -a \
+ "$(hiera update_identifier)" != "nil" ]; then
+
+ pcs resource disable httpd
+ check_resource httpd stopped 300
+ pcs resource disable openstack-keystone
+ check_resource openstack-keystone stopped 1200
+
+ if pcs status | grep haproxy-clone; then
+ pcs resource restart haproxy-clone
+ fi
+ pcs resource restart redis-master
+ pcs resource restart mongod-clone
+ pcs resource restart rabbitmq-clone
+ pcs resource restart memcached-clone
+ pcs resource restart galera-master
+
+ pcs resource enable openstack-keystone
+ check_resource openstack-keystone started 300
+ pcs resource enable httpd
+ check_resource httpd started 800
+
+fi
diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml
new file mode 100644
index 00000000..7de41d94
--- /dev/null
+++ b/extraconfig/tasks/post_puppet_pacemaker.yaml
@@ -0,0 +1,44 @@
+heat_template_version: 2014-10-16
+description: 'Post-Puppet Config for Pacemaker deployments'
+
+parameters:
+ servers:
+ type: json
+ input_values:
+ type: json
+ description: input values for the software deployments
+
+resources:
+
+ ControllerPostPuppetMaintenanceModeConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ pacemaker_status=$(systemctl is-active pacemaker)
+
+ if [ "$pacemaker_status" = "active" ]; then
+ pcs property set maintenance-mode=false
+ fi
+
+ ControllerPostPuppetMaintenanceModeDeployment:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: ControllerPostPuppetMaintenanceModeConfig}
+ input_values: {get_param: input_values}
+
+ ControllerPostPuppetRestartConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: pacemaker_resource_restart.sh}
+
+ ControllerPostPuppetRestartDeployment:
+ type: OS::Heat::SoftwareDeployments
+ depends_on: ControllerPostPuppetMaintenanceModeDeployment
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: ControllerPostPuppetRestartConfig}
+ input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/pre_puppet_pacemaker.yaml b/extraconfig/tasks/pre_puppet_pacemaker.yaml
new file mode 100644
index 00000000..2cfe92a7
--- /dev/null
+++ b/extraconfig/tasks/pre_puppet_pacemaker.yaml
@@ -0,0 +1,30 @@
+heat_template_version: 2014-10-16
+description: 'Pre-Puppet Config for Pacemaker deployments'
+
+parameters:
+ servers:
+ type: json
+ input_values:
+ type: json
+ description: input values for the software deployments
+
+resources:
+
+ ControllerPrePuppetMaintenanceModeConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ pacemaker_status=$(systemctl is-active pacemaker)
+
+ if [ "$pacemaker_status" = "active" ]; then
+ pcs property set maintenance-mode=true
+ fi
+
+ ControllerPrePuppetMaintenanceModeDeployment:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: ControllerPrePuppetMaintenanceModeConfig}
+ input_values: {get_param: input_values}
diff --git a/firstboot/userdata_heat_admin.yaml b/firstboot/userdata_heat_admin.yaml
index 73481c63..f8891b29 100644
--- a/firstboot/userdata_heat_admin.yaml
+++ b/firstboot/userdata_heat_admin.yaml
@@ -1,7 +1,7 @@
heat_template_version: 2014-10-16
parameters:
- # Can be overriden via parameter_defaults in the environment
+ # Can be overridden via parameter_defaults in the environment
node_admin_username:
type: string
default: heat-admin
diff --git a/network/external.yaml b/network/external.yaml
index e8f92a5e..3b24da7e 100644
--- a/network/external.yaml
+++ b/network/external.yaml
@@ -15,7 +15,7 @@ parameters:
type: json
ExternalNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: This admin state of the network.
type: boolean
ExternalNetEnableDHCP:
default: false
diff --git a/network/internal_api.yaml b/network/internal_api.yaml
index 69154bef..6f8aa3a8 100644
--- a/network/internal_api.yaml
+++ b/network/internal_api.yaml
@@ -15,7 +15,7 @@ parameters:
type: json
InternalApiNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: This admin state of the network.
type: boolean
InternalApiNetEnableDHCP:
default: false
diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml
index 3e949f41..ab6b18f8 100644
--- a/network/ports/ctlplane_vip.yaml
+++ b/network/ports/ctlplane_vip.yaml
@@ -13,7 +13,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/ports/external.yaml b/network/ports/external.yaml
index 1e2fff68..4180a223 100644
--- a/network/ports/external.yaml
+++ b/network/ports/external.yaml
@@ -13,7 +13,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/ports/internal_api.yaml b/network/ports/internal_api.yaml
index d528b327..01cdfe9b 100644
--- a/network/ports/internal_api.yaml
+++ b/network/ports/internal_api.yaml
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/ports/noop.yaml b/network/ports/noop.yaml
index 31ee6f3c..028624fd 100644
--- a/network/ports/noop.yaml
+++ b/network/ports/noop.yaml
@@ -16,7 +16,7 @@ parameters:
default: ''
type: string
NetworkName:
- description: # Here for compatability with vip.yaml
+ description: # Here for compatibility with vip.yaml
default: ''
type: string
FixedIPs:
diff --git a/network/ports/storage.yaml b/network/ports/storage.yaml
index 88fb537c..1d2384c5 100644
--- a/network/ports/storage.yaml
+++ b/network/ports/storage.yaml
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/ports/storage_mgmt.yaml b/network/ports/storage_mgmt.yaml
index c98a21ef..f10e3582 100644
--- a/network/ports/storage_mgmt.yaml
+++ b/network/ports/storage_mgmt.yaml
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/ports/tenant.yaml b/network/ports/tenant.yaml
index 94408ca2..ccdc57ee 100644
--- a/network/ports/tenant.yaml
+++ b/network/ports/tenant.yaml
@@ -12,7 +12,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml
index 56efc178..ab6cd2c0 100644
--- a/network/ports/vip.yaml
+++ b/network/ports/vip.yaml
@@ -13,7 +13,7 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
default: ''
type: string
diff --git a/network/storage.yaml b/network/storage.yaml
index 60b779e0..dc9f35ea 100644
--- a/network/storage.yaml
+++ b/network/storage.yaml
@@ -15,7 +15,7 @@ parameters:
type: json
StorageNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: This admin state of the network.
type: boolean
StorageNetEnableDHCP:
default: false
diff --git a/network/storage_mgmt.yaml b/network/storage_mgmt.yaml
index 043bc87b..59933c8c 100644
--- a/network/storage_mgmt.yaml
+++ b/network/storage_mgmt.yaml
@@ -15,7 +15,7 @@ parameters:
type: json
StorageMgmtNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: This admin state of the network.
type: boolean
StorageMgmtNetEnableDHCP:
default: false
diff --git a/network/tenant.yaml b/network/tenant.yaml
index daf5cb75..6fe96121 100644
--- a/network/tenant.yaml
+++ b/network/tenant.yaml
@@ -15,7 +15,7 @@ parameters:
type: json
TenantNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: This admin state of the network.
type: boolean
TenantNetEnableDHCP:
default: false
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
index c072c292..77368d0a 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.yaml
@@ -21,7 +21,11 @@ resource_registry:
OS::TripleO::CephClusterConfig::SoftwareConfig: puppet/ceph-cluster-config.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
OS::TripleO::BootstrapNode::SoftwareConfig: puppet/bootstrap-config.yaml
+
+ # Tasks (for internal TripleO usage)
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
+ OS::TripleO::Tasks::ControllerPrePuppet: extraconfig/tasks/noop.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: extraconfig/tasks/noop.yaml
# This creates the "heat-admin" user for all OS images by default
# To disable, replace with firstboot/userdata_default.yaml
diff --git a/overcloud-resource-registry.yaml b/overcloud-resource-registry.yaml
deleted file mode 100644
index 11a33599..00000000
--- a/overcloud-resource-registry.yaml
+++ /dev/null
@@ -1,81 +0,0 @@
-resource_registry:
- OS::TripleO::BlockStorage: os-apply-config/cinder-storage.yaml
- OS::TripleO::BlockStorage::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::Compute: os-apply-config/compute.yaml
- OS::TripleO::Compute::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment
- OS::TripleO::Controller: os-apply-config/controller.yaml
- OS::TripleO::Controller::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::ObjectStorage: os-apply-config/swift-storage.yaml
- OS::TripleO::ObjectStorage::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::CephStorage: os-apply-config/ceph-storage.yaml
- OS::TripleO::CephStorage::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::ControllerPostDeployment: os-apply-config/controller-post.yaml
- OS::TripleO::ComputePostDeployment: os-apply-config/compute-post.yaml
- OS::TripleO::ObjectStoragePostDeployment: os-apply-config/swift-storage-post.yaml
- OS::TripleO::BlockStoragePostDeployment: os-apply-config/cinder-storage-post.yaml
- OS::TripleO::CephStoragePostDeployment: os-apply-config/ceph-storage-post.yaml
- OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig: os-apply-config/swift-devices-and-proxy-config.yaml
- OS::TripleO::CephClusterConfig::SoftwareConfig: os-apply-config/ceph-cluster-config.yaml
- OS::TripleO::AllNodes::SoftwareConfig: os-apply-config/all-nodes-config.yaml
- OS::TripleO::BootstrapNode::SoftwareConfig: bootstrap-config.yaml
- OS::TripleO::NodeUserData: firstboot/userdata_default.yaml
- OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
- OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
-
- # "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
- # phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
- # configuration with knowledge of all nodes in the cluster is required vs single
- # node configuration in the pre_deploy step.
- OS::TripleO::AllNodesExtraConfig: extraconfig/all_nodes/default.yaml
-
- # TripleO overcloud networks
- OS::TripleO::Network: network/networks.yaml
- OS::TripleO::VipConfig: os-apply-config/vip-config.yaml
-
- OS::TripleO::Network::External: network/noop.yaml
- OS::TripleO::Network::InternalApi: network/noop.yaml
- OS::TripleO::Network::StorageMgmt: network/noop.yaml
- OS::TripleO::Network::Storage: network/noop.yaml
- OS::TripleO::Network::Tenant: network/noop.yaml
-
- OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml
- OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml
- OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml
- OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml
-
- # Port assignments for the controller role
- OS::TripleO::Controller::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::TenantPort: network/ports/noop.yaml
-
- # Port assignments for the compute role
- OS::TripleO::Compute::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::Compute::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::Compute::Ports::TenantPort: network/ports/noop.yaml
-
- # Port assignments for the ceph storage role
- OS::TripleO::CephStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-
- # Port assignments for the swift storage role
- OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-
- # Port assignments for the block storage role
- OS::TripleO::BlockStorage::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-
- # Port assignments for service virtual IPs for the controller role
- OS::TripleO::Controller::Ports::RedisVipPort: network/ports/noop.yaml
-
- # Service Endpoint Mappings
- OS::TripleO::Endpoint: network/endpoints/endpoint.yaml
- OS::TripleO::EndpointMap: network/endpoints/endpoint_map.yaml
-
- # validation resources
- OS::TripleO::AllNodes::Validation: os-apply-config/all-nodes-validation.yaml
diff --git a/overcloud.yaml b/overcloud.yaml
index faec16dd..1a571120 100644
--- a/overcloud.yaml
+++ b/overcloud.yaml
@@ -93,7 +93,7 @@ parameters:
type: string
KeyName:
default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
constraints:
- custom_constraint: nova.keypair
@@ -109,7 +109,7 @@ parameters:
to create provider networks (and we use this for the default floating
network) - if changing this either use different post-install network
scripts or be sure to keep 'datacentre' as a mapping network name.
- type: string
+ type: comma_delimited_list
default: "datacentre:br-ex"
NeutronControlPlaneID:
default: 'ctlplane'
@@ -128,15 +128,15 @@ parameters:
Enable/disable the L2 population feature in the Neutron agents.
default: "False"
NeutronFlatNetworks:
- type: string
+ type: comma_delimited_list
default: 'datacentre'
description: >
If set, flat networks to configure in neutron plugins. Defaults to
'datacentre' to permit external network creation.
NeutronNetworkType:
default: 'vxlan'
- description: The tenant network type for Neutron, either gre or vxlan.
- type: string
+ description: The tenant network type for Neutron.
+ type: comma_delimited_list
NeutronPassword:
default: unset
description: The password for the neutron service account, used by neutron agents.
@@ -176,9 +176,8 @@ parameters:
NeutronTunnelTypes:
default: 'vxlan'
description: |
- The tunnel types for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'gre,vxlan'
- type: string
+ The tunnel types for the Neutron tenant network.
+ type: comma_delimited_list
NeutronTunnelIdRanges:
description: |
Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
@@ -211,9 +210,8 @@ parameters:
NeutronMechanismDrivers:
default: 'openvswitch'
description: |
- The mechanism drivers for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'openvswitch,l2_population'
- type: string
+ The mechanism drivers for the Neutron tenant network.
+ type: comma_delimited_list
NeutronAllowL3AgentFailover:
default: 'False'
description: Allow automatic l3-agent failover
@@ -368,6 +366,10 @@ parameters:
default: true
description: Whether to enable Swift Storage on the Controller
type: boolean
+ ControllerSchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
ExtraConfig:
default: {}
description: |
@@ -585,6 +587,10 @@ parameters:
default: ''
description: Libvirt VIF driver configuration for the network
type: string
+ NovaComputeSchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
NovaEnableRbdBackend:
default: false
description: Whether to enable or not the Rbd backend for Nova
@@ -659,6 +665,11 @@ parameters:
BlockStorage specific configuration to inject into the cluster. Same
structure as ExtraConfig.
type: json
+ BlockStorageSchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
+
# Object storage specific parameters
ObjectStorageCount:
@@ -678,7 +689,10 @@ parameters:
ObjectStorage specific configuration to inject into the cluster. Same
structure as ExtraConfig.
type: json
-
+ ObjectStorageSchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
# Ceph storage specific parameters
CephStorageCount:
@@ -699,6 +713,11 @@ parameters:
CephStorage specific configuration to inject into the cluster. Same
structure as ExtraConfig.
type: json
+ CephStorageSchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
+
# Hostname format for each role
# Note %index% is translated into the index of the node, e.g 0/1/2 etc
@@ -936,6 +955,7 @@ resources:
'%stackname%': {get_param: 'OS::stack_name'}
NodeIndex: '%index%'
ServerMetadata: {get_param: ServerMetadata}
+ SchedulerHints: {get_param: ControllerSchedulerHints}
Compute:
type: OS::Heat::ResourceGroup
@@ -1011,6 +1031,7 @@ resources:
'%stackname%': {get_param: 'OS::stack_name'}
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
+ SchedulerHints: {get_param: NovaComputeSchedulerHints}
BlockStorage:
type: OS::Heat::ResourceGroup
@@ -1050,6 +1071,7 @@ resources:
BlockStorageExtraConfig: {get_param: BlockStorageExtraConfig}
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
+ SchedulerHints: {get_param: BlockStorageSchedulerHints}
ObjectStorage:
type: OS::Heat::ResourceGroup
@@ -1080,6 +1102,7 @@ resources:
ObjectStorageExtraConfig: {get_param: ObjectStorageExtraConfig}
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
+ SchedulerHints: {get_param: ObjectStorageSchedulerHints}
CephStorage:
type: OS::Heat::ResourceGroup
@@ -1105,6 +1128,7 @@ resources:
CephStorageExtraConfig: {get_param: CephStorageExtraConfig}
CloudDomain: {get_param: CloudDomain}
ServerMetadata: {get_param: ServerMetadata}
+ SchedulerHints: {get_param: CephStorageSchedulerHints}
ControllerIpListMap:
type: OS::TripleO::Network::Ports::NetIpListMap
@@ -1143,6 +1167,8 @@ resources:
neutron_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
keystone_public_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
keystone_admin_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
+ DeployIdentifier: {get_param: DeployIdentifier}
+ UpdateIdentifier: {get_param: UpdateIdentifier}
MysqlRootPassword:
type: OS::Heat::RandomString
@@ -1512,3 +1538,9 @@ outputs:
SwiftInternalVip:
description: VIP for Swift Proxy internal endpoint
value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
+ HostsEntry:
+ description: |
+ The content that should be appended to your /etc/hosts if you want do get
+ hostname-based access to the deployed nodes (useful for testing without
+ setting up a DNS).
+ value: {get_attr: [allNodesConfig, hosts_entries]}
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 2bc519bb..895ddc3d 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -51,6 +51,17 @@ parameters:
keystone_admin_api_node_ips:
type: comma_delimited_list
+ DeployIdentifier:
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+ UpdateIdentifier:
+ type: string
+ description: >
+ Setting to a previously unused value during stack-update will trigger
+ package update on all nodes
+
resources:
allNodesConfigImpl:
@@ -240,8 +251,17 @@ resources:
nova::rabbit_hosts: *rabbit_nodes_array
keystone::rabbit_hosts: *rabbit_nodes_array
+ deploy_identifier: {get_param: DeployIdentifier}
+ update_identifier: {get_param: UpdateIdentifier}
+
outputs:
config_id:
description: The ID of the allNodesConfigImpl resource.
value:
{get_resource: allNodesConfigImpl}
+ hosts_entries:
+ description: |
+ The content that should be appended to your /etc/hosts if you want do get
+ hostname-based access to the deployed nodes (useful for testing without
+ setting up a DNS).
+ value: {get_attr: [allNodesConfigImpl, config, hosts]}
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index b34d2c02..b6a1007a 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -16,7 +16,7 @@ parameters:
description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
type: string
KeyName:
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
default: default
constraints:
@@ -71,7 +71,10 @@ parameters:
Extra properties or metadata passed to Nova for the created nodes in
the overcloud. It's accessible via the Nova metadata API.
type: json
-
+ SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
resources:
CephStorage:
@@ -87,6 +90,7 @@ resources:
user_data: {get_resource: UserData}
name: {get_param: Hostname}
metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index 82c0e814..fc197059 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -46,7 +46,7 @@ parameters:
- custom_constraint: nova.flavor
KeyName:
default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
RabbitPassword:
default: 'guest'
@@ -124,6 +124,10 @@ parameters:
Extra properties or metadata passed to Nova for the created nodes in
the overcloud. It's accessible via the Nova metadata API.
type: json
+ SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
resources:
@@ -140,6 +144,7 @@ resources:
user_data: {get_resource: UserData}
name: {get_param: Hostname}
metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index 42c6e276..43ef5820 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
OpenStack hypervisor node configured via Puppet.
@@ -61,7 +61,7 @@ parameters:
description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
type: string
KeyName:
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
default: default
constraints:
@@ -80,7 +80,7 @@ parameters:
to create provider networks (and we use this for the default floating
network) - if changing this either use different post-install network
scripts or be sure to keep 'datacentre' as a mapping network name.
- type: string
+ type: comma_delimited_list
default: "datacentre:br-ex"
NeutronEnableTunnelling:
type: string
@@ -91,7 +91,7 @@ parameters:
Enable/disable the L2 population feature in the Neutron agents.
default: "False"
NeutronFlatNetworks:
- type: string
+ type: comma_delimited_list
default: 'datacentre'
description: >
If set, flat networks to configure in neutron plugins.
@@ -99,8 +99,8 @@ parameters:
type: string
default: '' # Has to be here because of the ignored empty value bug
NeutronNetworkType:
- type: string
- description: The tenant network type for Neutron, either gre or vxlan.
+ type: comma_delimited_list
+ description: The tenant network type for Neutron.
default: 'vxlan'
NeutronNetworkVLANRanges:
default: 'datacentre'
@@ -123,10 +123,9 @@ parameters:
description: A port to add to the NeutronPhysicalBridge.
type: string
NeutronTunnelTypes:
- type: string
+ type: comma_delimited_list
description: |
- The tunnel types for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'gre,vxlan'
+ The tunnel types for the Neutron tenant network.
default: 'vxlan'
NeutronTunnelIdRanges:
description: |
@@ -171,9 +170,8 @@ parameters:
NeutronMechanismDrivers:
default: 'openvswitch'
description: |
- The mechanism drivers for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'openvswitch,l2_population'
- type: string
+ The mechanism drivers for the Neutron tenant network.
+ type: comma_delimited_list
# Not relevant for Computes, should be removed
NeutronAllowL3AgentFailover:
default: 'True'
@@ -296,13 +294,16 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
-
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
the overcloud. It's accessible via the Nova metadata API.
type: json
+ SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
resources:
@@ -321,6 +322,7 @@ resources:
user_data: {get_resource: UserData}
name: {get_param: Hostname}
metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -448,16 +450,16 @@ resources:
neutron::rabbit_user: {get_input: rabbit_username}
neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
neutron::rabbit_port: {get_input: rabbit_client_port}
- neutron_flat_networks: {get_input: neutron_flat_networks}
+ neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks}
neutron_host: {get_input: neutron_host}
neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
- neutron_tenant_network_type: {get_input: neutron_tenant_network_type}
- neutron_tunnel_types: {get_input: neutron_tunnel_types}
+ neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types}
+ neutron::agents::ml2::ovs:tunnel_types: {get_input: neutron_tunnel_types}
neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges}
- neutron_bridge_mappings: {get_input: neutron_bridge_mappings}
+ neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings}
neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling}
neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
neutron_physical_bridge: {get_input: neutron_physical_bridge}
@@ -471,7 +473,7 @@ resources:
neutron::core_plugin: {get_input: neutron_core_plugin}
neutron::service_plugins: {get_input: neutron_service_plugins}
neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers}
- neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers}
+ neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers}
neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device}
keystone_public_api_virtual_ip: {get_input: keystone_vip}
admin_password: {get_input: admin_password}
@@ -506,36 +508,43 @@ resources:
snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
- neutron_flat_networks: {get_param: NeutronFlatNetworks}
+ neutron_flat_networks:
+ str_replace:
+ template: NETWORKS
+ params:
+ NETWORKS: {get_param: NeutronFlatNetworks}
neutron_host: {get_param: NeutronHost}
neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]}
- neutron_tenant_network_type: {get_param: NeutronNetworkType}
- neutron_tunnel_types: {get_param: NeutronTunnelTypes}
neutron_tunnel_id_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronTunnelIdRanges}
+ RANGES: {get_param: NeutronTunnelIdRanges}
neutron_vni_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronVniRanges}
+ RANGES: {get_param: NeutronVniRanges}
+ neutron_tenant_network_types:
+ str_replace:
+ template: TYPES
+ params:
+ TYPES: {get_param: NeutronNetworkType}
+ neutron_tunnel_types:
+ str_replace:
+ template: TYPES
+ params:
+ TYPES: {get_param: NeutronTunnelTypes}
neutron_network_vlan_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
+ params:
+ RANGES: {get_param: NeutronNetworkVLANRanges}
+ neutron_bridge_mappings:
+ str_replace:
+ template: MAPPINGS
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronNetworkVLANRanges}
- neutron_bridge_mappings: {get_param: NeutronBridgeMappings}
+ MAPPINGS: {get_param: NeutronBridgeMappings}
neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
neutron_physical_bridge: {get_param: NeutronPhysicalBridge}
@@ -547,21 +556,19 @@ resources:
neutron_core_plugin: {get_param: NeutronCorePlugin}
neutron_service_plugins:
str_replace:
- template: "['PLUGINS']"
+ template: PLUGINS
params:
- PLUGINS:
- list_join:
- - "','"
- - {get_param: NeutronServicePlugins}
+ PLUGINS: {get_param: NeutronServicePlugins}
neutron_type_drivers:
str_replace:
- template: "['DRIVERS']"
+ template: DRIVERS
+ params:
+ DRIVERS: {get_param: NeutronTypeDrivers}
+ neutron_mechanism_drivers:
+ str_replace:
+ template: MECHANISMS
params:
- DRIVERS:
- list_join:
- - "','"
- - {get_param: NeutronTypeDrivers}
- neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers}
+ MECHANISMS: {get_param: NeutronMechanismDrivers}
neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]}
neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]}
diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml
index 941e1ac5..ed8129e7 100644
--- a/puppet/controller-post.yaml
+++ b/puppet/controller-post.yaml
@@ -17,6 +17,13 @@ parameters:
resources:
+ ControllerPrePuppet:
+ type: OS::TripleO::Tasks::ControllerPrePuppet
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
ControllerPuppetConfig:
type: OS::TripleO::ControllerConfig
@@ -26,6 +33,7 @@ resources:
# e.g all Deployment resources should have a *Deployment_StepN suffix
ControllerLoadBalancerDeployment_Step1:
type: OS::Heat::StructuredDeployments
+ depends_on: ControllerPrePuppet
properties:
servers: {get_param: servers}
config: {get_resource: ControllerPuppetConfig}
@@ -98,10 +106,18 @@ resources:
step: 5
update_identifier: {get_param: NodeConfigIdentifiers}
+ ControllerPostPuppet:
+ type: OS::TripleO::Tasks::ControllerPostPuppet
+ depends_on: ControllerOvercloudServicesDeployment_Step6
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
ExtraConfig:
- depends_on: ControllerOvercloudServicesDeployment_Step5
+ depends_on: ControllerPostPuppet
type: OS::TripleO::NodeExtraConfigPost
properties:
servers: {get_param: servers}
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index 97b5456b..7089f60b 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
OpenStack controller node configured by Puppet.
@@ -39,6 +39,10 @@ parameters:
CinderApiVirtualIP:
type: string
default: ''
+ CeilometerWorkers:
+ default: 0
+ description: Number of workers for Ceilometer service.
+ type: number
CinderEnableNfsBackend:
default: false
description: Whether to enable or not the NFS backend for Cinder
@@ -81,6 +85,10 @@ parameters:
description: Contains parameters to configure Cinder backends. Typically
set via parameter_defaults in the resource registry.
type: json
+ CinderWorkers:
+ default: 0
+ description: Number of workers for Cinder service.
+ type: number
CloudName:
default: ''
description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
@@ -209,6 +217,10 @@ parameters:
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
+ GlanceWorkers:
+ default: 0
+ description: Number of workers for Glance service.
+ type: number
HeatPassword:
default: unset
description: The password for the Heat service and db account, used by the Heat services.
@@ -227,6 +239,10 @@ parameters:
default: '*'
description: A list of IP/Hostname allowed to connect to horizon
type: comma_delimited_list
+ HeatWorkers:
+ default: 0
+ description: Number of workers for Heat service.
+ type: number
HorizonSecret:
description: Secret key for Django
type: string
@@ -246,7 +262,7 @@ parameters:
type: string
KeyName:
default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
constraints:
- custom_constraint: nova.keypair
@@ -294,6 +310,10 @@ parameters:
default: false
description: Whether IPtables rules should be purged before setting up the new ones.
type: boolean
+ KeystoneWorkers:
+ default: 0
+ description: Number of workers for Keystone service.
+ type: number
MysqlClusterUniquePart:
description: A unique identifier of the MySQL cluster the controller is in.
type: string
@@ -328,7 +348,7 @@ parameters:
to create provider networks (and we use this for the default floating
network) - if changing this either use different post-install network
scripts or be sure to keep 'datacentre' as a mapping network name.
- type: string
+ type: comma_delimited_list
default: "datacentre:br-ex"
NeutronDnsmasqOptions:
default: 'dhcp-option-force=26,1400'
@@ -391,9 +411,8 @@ parameters:
NeutronMechanismDrivers:
default: 'openvswitch'
description: |
- The mechanism drivers for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'openvswitch,l2_population'
- type: string
+ The mechanism drivers for the Neutron tenant network.
+ type: comma_delimited_list
NeutronAllowL3AgentFailover:
default: 'True'
description: Allow automatic l3-agent failover
@@ -411,7 +430,7 @@ parameters:
Enable/disable the L2 population feature in the Neutron agents.
default: "False"
NeutronFlatNetworks:
- type: string
+ type: comma_delimited_list
default: 'datacentre'
description: If set, flat networks to configure in neutron plugins.
NeutronL3HA:
@@ -420,8 +439,8 @@ parameters:
type: string
NeutronNetworkType:
default: 'vxlan'
- description: The tenant network type for Neutron, either gre or vxlan.
- type: string
+ description: The tenant network type for Neutron.
+ type: comma_delimited_list
NeutronNetworkVLANRanges:
default: 'datacentre'
description: >
@@ -463,9 +482,8 @@ parameters:
NeutronTunnelTypes:
default: 'vxlan'
description: |
- The tunnel types for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'gre,vxlan'
- type: string
+ The tunnel types for the Neutron tenant network.
+ type: comma_delimited_list
NeutronTunnelIdRanges:
description: |
Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
@@ -481,11 +499,19 @@ parameters:
NovaApiVirtualIP:
type: string
default: ''
+ NeutronWorkers:
+ default: 0
+ description: Number of workers for Neutron service.
+ type: number
NovaPassword:
default: unset
description: The password for the nova service and db account, used by nova-api.
type: string
hidden: true
+ NovaWorkers:
+ default: 0
+ description: Number of workers for Nova service.
+ type: number
MongoDbNoJournal:
default: false
description: Should MongoDb journaling be disabled
@@ -577,6 +603,10 @@ parameters:
type: number
default: 3
description: How many replicas to use in the swift rings.
+ SwiftWorkers:
+ default: 0
+ description: Number of workers for Swift service.
+ type: number
VirtualIP: # DEPRECATED: use per service settings instead
type: string
default: '' # Has to be here because of the ignored empty value bug
@@ -644,6 +674,10 @@ parameters:
Extra properties or metadata passed to Nova for the created nodes in
the overcloud. It's accessible via the Nova metadata API.
type: json
+ SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
resources:
@@ -660,6 +694,7 @@ resources:
user_data: {get_resource: UserData}
name: {get_param: Hostname}
metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -769,6 +804,14 @@ resources:
server: {get_resource: Controller}
input_values:
bootstack_nodeid: {get_attr: [Controller, name]}
+ ceilometer_workers: {get_param: CeilometerWorkers}
+ cinder_workers: {get_param: CinderWorkers}
+ glance_workers: {get_param: GlanceWorkers}
+ heat_workers: {get_param: HeatWorkers}
+ keystone_workers: {get_param: KeystoneWorkers}
+ nova_workers: {get_param: NovaWorkers}
+ neutron_workers: {get_param: NeutronWorkers}
+ swift_workers: {get_param: SwiftWorkers}
neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
@@ -804,12 +847,9 @@ resources:
cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
cinder_nfs_servers:
str_replace:
- template: "['SERVERS']"
+ template: SERVERS
params:
- SERVERS:
- list_join:
- - "','"
- - {get_param: CinderNfsServers}
+ SERVERS: {get_param: CinderNfsServers}
cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
cinder_password: {get_param: CinderPassword}
cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
@@ -885,67 +925,72 @@ resources:
template: tripleo-CLUSTER
params:
CLUSTER: {get_param: MysqlClusterUniquePart}
- neutron_flat_networks: {get_param: NeutronFlatNetworks}
+ neutron_flat_networks:
+ str_replace:
+ template: NETWORKS
+ params:
+ NETWORKS: {get_param: NeutronFlatNetworks}
neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
neutron_agent_mode: {get_param: NeutronAgentMode}
neutron_router_distributed: {get_param: NeutronDVR}
neutron_core_plugin: {get_param: NeutronCorePlugin}
neutron_service_plugins:
str_replace:
- template: "['PLUGINS']"
+ template: PLUGINS
params:
- PLUGINS:
- list_join:
- - "','"
- - {get_param: NeutronServicePlugins}
+ PLUGINS: {get_param: NeutronServicePlugins}
neutron_type_drivers:
str_replace:
- template: "['DRIVERS']"
+ template: DRIVERS
params:
- DRIVERS:
- list_join:
- - "','"
- - {get_param: NeutronTypeDrivers}
+ DRIVERS: {get_param: NeutronTypeDrivers}
neutron_enable_dhcp_agent: {get_param: NeutronEnableDHCPAgent}
neutron_enable_l3_agent: {get_param: NeutronEnableL3Agent}
neutron_enable_metadata_agent: {get_param: NeutronEnableMetadataAgent}
neutron_enable_ovs_agent: {get_param: NeutronEnableOVSAgent}
- neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers}
+ neutron_mechanism_drivers:
+ str_replace:
+ template: MECHANISMS
+ params:
+ MECHANISMS: {get_param: NeutronMechanismDrivers}
neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron_l3_ha: {get_param: NeutronL3HA}
neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
neutron_network_vlan_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronNetworkVLANRanges}
- neutron_bridge_mappings: {get_param: NeutronBridgeMappings}
+ RANGES: {get_param: NeutronNetworkVLANRanges}
+ neutron_bridge_mappings:
+ str_replace:
+ template: MAPPINGS
+ params:
+ MAPPINGS: {get_param: NeutronBridgeMappings}
neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge}
neutron_public_interface: {get_param: NeutronPublicInterface}
neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute}
neutron_public_interface_tag: {get_param: NeutronPublicInterfaceTag}
- neutron_tenant_network_type: {get_param: NeutronNetworkType}
- neutron_tunnel_types: {get_param: NeutronTunnelTypes}
neutron_tunnel_id_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronTunnelIdRanges}
+ RANGES: {get_param: NeutronTunnelIdRanges}
neutron_vni_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronVniRanges}
+ RANGES: {get_param: NeutronVniRanges}
+ neutron_tenant_network_types:
+ str_replace:
+ template: TYPES
+ params:
+ TYPES: {get_param: NeutronNetworkType}
+ neutron_tunnel_types:
+ str_replace:
+ template: TYPES
+ params:
+ TYPES: {get_param: NeutronTunnelTypes}
neutron_password: {get_param: NeutronPassword}
neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions}
neutron_dsn:
@@ -1109,6 +1154,7 @@ resources:
swift::storage::all::storage_local_net_ip: {get_input: swift_management_network}
swift::swift_hash_suffix: {get_input: swift_hash_suffix}
swift::proxy::authtoken::admin_password: {get_input: swift_password}
+ swift::proxy::workers: {get_input: swift_workers}
tripleo::ringbuilder::part_power: {get_input: swift_part_power}
tripleo::ringbuilder::replicas: {get_input: swift_replicas}
tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours}
@@ -1149,6 +1195,7 @@ resources:
glance::api::registry_host: {get_input: glance_registry_host}
glance::api::keystone_password: {get_input: glance_password}
glance::api::debug: {get_input: debug}
+ glance::api::workers: {get_input: glance_workers}
glance_notifier_strategy: {get_input: glance_notifier_strategy}
glance_log_file: {get_input: glance_log_file}
glance_log_file: {get_input: glance_log_file}
@@ -1160,6 +1207,7 @@ resources:
glance::registry::identity_uri: {get_input: keystone_identity_uri}
glance::registry::debug: {get_input: debug}
glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_uri}
+ glance::registry::workers: {get_input: glance_workers}
glance::backend::swift::swift_store_user: service:glance
glance::backend::swift::swift_store_key: {get_input: glance_password}
glance_backend: {get_input: glance_backend}
@@ -1184,8 +1232,11 @@ resources:
heat::identity_uri: {get_input: keystone_identity_uri}
heat::keystone_password: {get_input: heat_password}
heat::api::bind_host: {get_input: heat_api_network}
+ heat::api::workers: {get_input: heat_workers}
heat::api_cloudwatch::bind_host: {get_input: heat_api_network}
+ heat::api_cloudwatch::workers: {get_input: heat_workers}
heat::api_cfn::bind_host: {get_input: heat_api_network}
+ heat::api_cfn::workers: {get_input: heat_workers}
heat::database_connection: {get_input: heat_dsn}
heat::debug: {get_input: debug}
heat::db::mysql::password: {get_input: heat_password}
@@ -1214,6 +1265,9 @@ resources:
keystone::endpoint::internal_url: {get_input: keystone_internal_url}
keystone::endpoint::admin_url: {get_input: keystone_identity_uri}
keystone::endpoint::region: {get_input: keystone_region}
+ keystone::admin_workers: {get_input: keystone_workers}
+ keystone::public_workers: {get_input: keystone_workers}
+
# MongoDB
mongodb::server::bind_ip: {get_input: mongo_db_network}
mongodb::server::nojournal: {get_input: mongodb_no_journal}
@@ -1239,14 +1293,16 @@ resources:
neutron::server::auth_uri: {get_input: keystone_auth_uri}
neutron::server::identity_uri: {get_input: keystone_identity_uri}
neutron::server::database_connection: {get_input: neutron_dsn}
+ neutron::server::api_workers: {get_input: neutron_workers}
neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge}
neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling}
neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata}
neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
- neutron_flat_networks: {get_input: neutron_flat_networks}
+ neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks}
neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network}
+ neutron::agents::metadata::metadata_workers: {get_input: neutron_workers}
neutron_agent_mode: {get_input: neutron_agent_mode}
neutron_router_distributed: {get_input: neutron_router_distributed}
neutron::core_plugin: {get_input: neutron_core_plugin}
@@ -1256,20 +1312,20 @@ resources:
neutron::enable_metadata_agent: {get_input: neutron_enable_metadata_agent}
neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent}
neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers}
- neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers}
+ neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers}
neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover}
neutron::server::l3_ha: {get_input: neutron_l3_ha}
neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network}
neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges}
- neutron_bridge_mappings: {get_input: neutron_bridge_mappings}
+ neutron::agents::ml2::ovs:bridge_mappings: {get_input: neutron_bridge_mappings}
neutron_public_interface: {get_input: neutron_public_interface}
neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device}
neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route}
neutron_public_interface_tag: {get_input: neutron_public_interface_tag}
- neutron_tenant_network_type: {get_input: neutron_tenant_network_type}
- neutron_tunnel_types: {get_input: neutron_tunnel_types}
+ neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types}
+ neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types}
neutron::server::auth_password: {get_input: neutron_password}
neutron::agents::metadata::auth_password: {get_input: neutron_password}
neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options}
@@ -1317,6 +1373,9 @@ resources:
nova::api::api_bind_address: {get_input: nova_api_network}
nova::api::metadata_listen: {get_input: nova_metadata_network}
nova::api::admin_password: {get_input: nova_password}
+ nova::api::osapi_compute_workers: {get_input: nova_workers}
+ nova::api::ec2_workers: {get_input: nova_workers}
+ nova::api::metadata_workers: {get_input: nova_workers}
nova::database_connection: {get_input: nova_dsn}
nova::glance_api_servers: {get_input: glance_api_servers}
nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index f3a02eba..e0566ac1 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -85,17 +85,10 @@ if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
nova_auth_ip => hiera('keystone_public_api_virtual_ip'),
}
} else {
- class { '::neutron::plugins::ml2':
- flat_networks => split(hiera('neutron_flat_networks'), ','),
- tenant_network_types => [hiera('neutron_tenant_network_type')],
- }
-
- class { '::neutron::agents::ml2::ovs':
- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
- tunnel_types => split(hiera('neutron_tunnel_types'), ','),
- }
+ include ::neutron::plugins::ml2
+ include ::neutron::agents::ml2::ovs
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
class { '::neutron::agents::n1kv_vem':
n1kv_source => hiera('n1kv_vem_source', undef),
n1kv_version => hiera('n1kv_vem_version', undef),
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 683c1213..7d3012e5 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -252,16 +252,10 @@ if hiera('step') >= 3 {
require => Package['neutron'],
}
- class { '::neutron::plugins::ml2':
- flat_networks => split(hiera('neutron_flat_networks'), ','),
- tenant_network_types => [hiera('neutron_tenant_network_type')],
- mechanism_drivers => [hiera('neutron_mechanism_drivers')],
- }
- class { '::neutron::agents::ml2::ovs':
- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
- tunnel_types => split(hiera('neutron_tunnel_types'), ','),
- }
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+ include ::neutron::plugins::ml2
+ include ::neutron::agents::ml2::ovs
+
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
include ::neutron::plugins::ml2::cisco::nexus1000v
class { '::neutron::agents::n1kv_vem':
@@ -276,10 +270,10 @@ if hiera('step') >= 3 {
}
}
- if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
include ::neutron::plugins::ml2::cisco::ucsm
}
- if 'cisco_nexus' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
include ::neutron::plugins::ml2::cisco::nexus
include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
}
@@ -467,7 +461,7 @@ if hiera('step') >= 3 {
include ::heat::engine
# Horizon
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
$_profile_support = 'cisco'
} else {
$_profile_support = 'None'
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index f4f7a4ea..ad356e33 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -628,27 +628,20 @@ if hiera('step') >= 3 {
enabled => false,
}
}
- if hiera('neutron::core_plugin') == 'ml2' {
- class { '::neutron::plugins::ml2':
- flat_networks => split(hiera('neutron_flat_networks'), ','),
- tenant_network_types => [hiera('neutron_tenant_network_type')],
- mechanism_drivers => [hiera('neutron_mechanism_drivers')],
- }
- class { '::neutron::agents::ml2::ovs':
- manage_service => false,
- enabled => false,
- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
- tunnel_types => split(hiera('neutron_tunnel_types'), ','),
- }
+ include ::neutron::plugins::ml2
+ class { '::neutron::agents::ml2::ovs':
+ manage_service => false,
+ enabled => false,
}
- if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
+
+ if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
include ::neutron::plugins::ml2::cisco::ucsm
}
- if 'cisco_nexus' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
include ::neutron::plugins::ml2::cisco::nexus
include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
}
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
include ::neutron::plugins::ml2::cisco::nexus1000v
class { '::neutron::agents::n1kv_vem':
@@ -896,7 +889,7 @@ if hiera('step') >= 3 {
# service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
}
include ::apache::mod::status
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
$_profile_support = 'cisco'
} else {
$_profile_support = 'None'
@@ -1520,7 +1513,7 @@ if hiera('step') >= 4 {
}
#VSM
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
pacemaker::resource::ocf { 'vsm-p' :
ocf_agent_name => 'heartbeat:VirtualDomain',
resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp
index 4296208b..2d880d33 100644
--- a/puppet/manifests/ringbuilder.pp
+++ b/puppet/manifests/ringbuilder.pp
@@ -70,7 +70,7 @@ class tripleo::ringbuilder (
# create local rings
swift::ringbuilder::create{ ['object', 'account', 'container']:
part_power => $part_power,
- replicas => $replicas,
+ replicas => min(count($device_array), $replicas),
min_part_hours => $min_part_hours,
} ->
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index a8183f76..721dcba4 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -17,7 +17,7 @@ parameters:
type: string
KeyName:
default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
MountCheck:
default: 'false'
@@ -94,7 +94,10 @@ parameters:
Extra properties or metadata passed to Nova for the created nodes in
the overcloud. It's accessible via the Nova metadata API.
type: json
-
+ SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
resources:
@@ -110,6 +113,7 @@ resources:
user_data: {get_resource: UserData}
name: {get_param: Hostname}
metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData: