aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore11
-rw-r--r--Gemfile24
-rw-r--r--Makefile49
-rw-r--r--README.rst11
-rw-r--r--Rakefile6
-rw-r--r--capabilities_map.yaml226
-rw-r--r--deprecated/block-storage.yaml92
-rw-r--r--deprecated/nfs-source.yaml36
-rw-r--r--deprecated/nova-compute-config.yaml69
-rw-r--r--deprecated/nova-compute-instance.yaml273
-rw-r--r--deprecated/overcloud-source.yaml952
-rw-r--r--deprecated/overcloud-vlan-port.yaml39
-rw-r--r--deprecated/ssl-source.yaml54
-rw-r--r--deprecated/swift-deploy.yaml69
-rw-r--r--deprecated/swift-source.yaml15
-rw-r--r--deprecated/swift-storage-source.yaml77
-rw-r--r--deprecated/undercloud-bm-nova-config.yaml23
-rw-r--r--deprecated/undercloud-bm-nova-deploy.yaml37
-rw-r--r--deprecated/undercloud-source.yaml412
-rw-r--r--deprecated/undercloud-vlan-port.yaml37
-rw-r--r--deprecated/undercloud-vm-ironic-config.yaml27
-rw-r--r--deprecated/undercloud-vm-ironic-deploy.yaml44
-rw-r--r--deprecated/undercloud-vm-nova-config.yaml29
-rw-r--r--deprecated/undercloud-vm-nova-deploy.yaml52
-rw-r--r--docker/README-containers.md9
-rw-r--r--docker/compute-post.yaml201
-rw-r--r--docker/firstboot/install_docker_agents.yaml8
-rw-r--r--docker/firstboot/start_docker_agents.sh14
-rw-r--r--environments/docker-network-isolation.yaml4
-rw-r--r--environments/docker-network.yaml2
-rw-r--r--environments/docker-rdo.yaml17
-rw-r--r--environments/docker.yaml22
-rw-r--r--environments/enable-tls.yaml41
-rw-r--r--environments/external-loadbalancer-vip.yaml37
-rw-r--r--environments/inject-trust-anchor.yaml6
-rw-r--r--environments/ips-from-pool.yaml20
-rw-r--r--environments/manage-firewall.yaml2
-rw-r--r--environments/net-bond-with-vlans-no-external.yaml26
-rw-r--r--environments/net-single-nic-with-vlans-no-external.yaml25
-rw-r--r--environments/network-isolation-no-tunneling.yaml37
-rw-r--r--environments/network-isolation.yaml25
-rw-r--r--environments/network-management.yaml24
-rw-r--r--environments/neutron-midonet.yaml20
-rw-r--r--environments/neutron-nuage-config.yaml15
-rw-r--r--environments/nova-nuage-config.yaml8
-rw-r--r--environments/puppet-ceph-external.yaml9
-rw-r--r--environments/puppet-pacemaker.yaml2
-rw-r--r--environments/updates/README.md9
-rw-r--r--environments/updates/update-from-keystone-admin-internal-api.yaml33
-rw-r--r--extraconfig/tasks/noop.yaml10
-rwxr-xr-xextraconfig/tasks/pacemaker_resource_restart.sh70
-rw-r--r--extraconfig/tasks/post_puppet_pacemaker.yaml44
-rw-r--r--extraconfig/tasks/pre_puppet_pacemaker.yaml30
-rwxr-xr-xextraconfig/tasks/yum_update.sh117
-rw-r--r--firstboot/userdata_heat_admin.yaml2
-rw-r--r--net-config-bond.yaml4
-rw-r--r--net-config-bridge.yaml4
-rw-r--r--net-config-linux-bridge.yaml73
-rw-r--r--net-config-noop.yaml4
-rw-r--r--network/config/bond-with-vlans/README.md34
-rw-r--r--network/config/bond-with-vlans/ceph-storage.yaml16
-rw-r--r--network/config/bond-with-vlans/cinder-storage.yaml16
-rw-r--r--network/config/bond-with-vlans/compute.yaml16
-rw-r--r--network/config/bond-with-vlans/controller-no-external.yaml131
-rw-r--r--network/config/bond-with-vlans/controller.yaml21
-rw-r--r--network/config/bond-with-vlans/swift-storage.yaml16
-rw-r--r--network/config/multiple-nics/README.md18
-rw-r--r--network/config/multiple-nics/ceph-storage.yaml23
-rw-r--r--network/config/multiple-nics/cinder-storage.yaml23
-rw-r--r--network/config/multiple-nics/compute.yaml35
-rw-r--r--network/config/multiple-nics/controller.yaml23
-rw-r--r--network/config/multiple-nics/swift-storage.yaml23
-rw-r--r--network/config/single-nic-vlans/README.md32
-rw-r--r--network/config/single-nic-vlans/ceph-storage.yaml15
-rw-r--r--network/config/single-nic-vlans/cinder-storage.yaml15
-rw-r--r--network/config/single-nic-vlans/compute.yaml15
-rw-r--r--network/config/single-nic-vlans/controller-no-external.yaml114
-rw-r--r--network/config/single-nic-vlans/controller.yaml14
-rw-r--r--network/config/single-nic-vlans/swift-storage.yaml15
-rw-r--r--network/endpoints/endpoint.yaml60
-rw-r--r--network/endpoints/endpoint_map.yaml450
-rw-r--r--network/external.yaml2
-rw-r--r--network/internal_api.yaml2
-rw-r--r--network/management.yaml64
-rw-r--r--network/networks.yaml3
-rw-r--r--network/ports/ctlplane_vip.yaml7
-rw-r--r--network/ports/external.yaml9
-rw-r--r--network/ports/external_from_pool.yaml45
-rw-r--r--network/ports/from_service.yaml34
-rw-r--r--network/ports/internal_api.yaml16
-rw-r--r--network/ports/internal_api_from_pool.yaml45
-rw-r--r--network/ports/management.yaml42
-rw-r--r--network/ports/net_ip_list_map.yaml4
-rw-r--r--network/ports/net_ip_map.yaml4
-rw-r--r--network/ports/net_ip_subnet_map.yaml4
-rw-r--r--network/ports/net_vip_map_external.yaml50
-rw-r--r--network/ports/noop.yaml14
-rw-r--r--network/ports/storage.yaml17
-rw-r--r--network/ports/storage_from_pool.yaml45
-rw-r--r--network/ports/storage_mgmt.yaml16
-rw-r--r--network/ports/storage_mgmt_from_pool.yaml45
-rw-r--r--network/ports/tenant.yaml16
-rw-r--r--network/ports/tenant_from_pool.yaml45
-rw-r--r--network/ports/vip.yaml7
-rw-r--r--network/storage.yaml2
-rw-r--r--network/storage_mgmt.yaml2
-rw-r--r--network/tenant.yaml2
-rw-r--r--os-apply-config/all-nodes-config.yaml93
-rw-r--r--os-apply-config/ceph-cluster-config.yaml57
-rw-r--r--os-apply-config/ceph-storage-post.yaml21
-rw-r--r--os-apply-config/ceph-storage.yaml172
-rw-r--r--os-apply-config/cinder-storage-post.yaml21
-rw-r--r--os-apply-config/cinder-storage.yaml234
-rw-r--r--os-apply-config/compute-post.yaml22
-rw-r--r--os-apply-config/compute.yaml562
-rw-r--r--os-apply-config/controller-post.yaml20
-rw-r--r--os-apply-config/controller.yaml1213
-rw-r--r--os-apply-config/swift-devices-and-proxy-config.yaml38
-rw-r--r--os-apply-config/swift-storage-post.yaml21
-rw-r--r--os-apply-config/swift-storage.yaml209
-rw-r--r--os-apply-config/vip-config.yaml16
-rw-r--r--overcloud-resource-registry-puppet.yaml34
-rw-r--r--overcloud-resource-registry.yaml76
l---------[-rw-r--r--]overcloud-without-mergepy.yaml1453
-rw-r--r--overcloud.yaml1538
-rw-r--r--puppet/all-nodes-config.yaml20
-rw-r--r--puppet/ceph-cluster-config.yaml37
-rw-r--r--puppet/ceph-storage.yaml101
-rw-r--r--puppet/cinder-storage.yaml111
-rw-r--r--puppet/compute.yaml266
-rw-r--r--puppet/controller-post.yaml18
-rw-r--r--puppet/controller.yaml693
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml119
-rw-r--r--puppet/extraconfig/ceph/ceph-external-config.yaml36
-rw-r--r--puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml92
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml5
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml90
-rw-r--r--puppet/extraconfig/tls/ca-inject.yaml66
-rw-r--r--puppet/extraconfig/tls/no-ca.yaml17
-rw-r--r--puppet/extraconfig/tls/no-tls.yaml34
-rw-r--r--puppet/extraconfig/tls/tls-cert-inject.yaml93
-rw-r--r--puppet/hieradata/ceph.yaml9
-rw-r--r--puppet/hieradata/common.yaml3
-rw-r--r--puppet/hieradata/compute.yaml9
-rw-r--r--puppet/hieradata/controller.yaml122
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp9
-rw-r--r--puppet/manifests/overcloud_compute.pp69
-rw-r--r--puppet/manifests/overcloud_controller.pp271
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp1219
-rw-r--r--puppet/manifests/overcloud_object.pp9
-rw-r--r--puppet/manifests/overcloud_volume.pp5
-rw-r--r--puppet/manifests/ringbuilder.pp24
-rw-r--r--puppet/swift-storage.yaml89
-rw-r--r--setup.cfg7
-rwxr-xr-xtest_merge.bash43
-rw-r--r--tripleo_heat_merge/__init__.py0
-rw-r--r--tripleo_heat_merge/merge.py436
157 files changed, 6718 insertions, 8509 deletions
diff --git a/.gitignore b/.gitignore
index 3035c9e5..3d7aded8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,12 +1,3 @@
-# Built via Makefile
-overcloud.yaml
-overcloud-with-block-storage.yaml
-overcloud-with-block-storage-nfs.yaml
-undercloud-bm.yaml
-undercloud-vm.yaml
-undercloud-vm-ironic.yaml
-undercloud-vm-ironic-vlan.yaml
-
*.py[cod]
# C extensions
@@ -45,6 +36,8 @@ nosetests.xml
*~
*.swp
+*.bundle
+Gemfile.lock
doc/_build
diff --git a/Gemfile b/Gemfile
new file mode 100644
index 00000000..302ef415
--- /dev/null
+++ b/Gemfile
@@ -0,0 +1,24 @@
+source 'https://rubygems.org'
+
+group :development, :test do
+ gem 'puppetlabs_spec_helper', :require => false
+
+ gem 'puppet-lint', '~> 1.1'
+ gem 'puppet-lint-absolute_classname-check'
+ gem 'puppet-lint-absolute_template_path'
+ gem 'puppet-lint-trailing_newline-check'
+
+ # Puppet 4.x related lint checks
+ gem 'puppet-lint-unquoted_string-check'
+ gem 'puppet-lint-leading_zero-check'
+ gem 'puppet-lint-variable_contains_upcase'
+ gem 'puppet-lint-numericvariable'
+end
+
+if puppetversion = ENV['PUPPET_GEM_VERSION']
+ gem 'puppet', puppetversion, :require => false
+else
+ gem 'puppet', :require => false
+end
+
+# vim:ft=ruby
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 131e1b9e..00000000
--- a/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-generated_templates = \
- overcloud.yaml \
- overcloud-with-block-storage-nfs.yaml \
- undercloud-vm.yaml \
- undercloud-bm.yaml \
- undercloud-vm-ironic.yaml \
- undercloud-vm-ironic-vlan.yaml
-
-# Files included in deprecated/overcloud-source.yaml via FileInclude
-overcloud_source_deps = deprecated/nova-compute-instance.yaml
-
-all: $(generated_templates)
-VALIDATE := $(patsubst %,validate-%,$(generated_templates))
-validate-all: $(VALIDATE)
-$(VALIDATE):
- heat template-validate -f $(subst validate-,,$@)
-
-# You can define in CONTROLEXTRA one or more additional YAML files to further extend the template, some additions could be:
-# - overcloud-vlan-port.yaml to activate the VLAN auto-assignment from Neutron
-# - nfs-source.yaml to configure Cinder with NFS
-overcloud.yaml: deprecated/overcloud-source.yaml deprecated/block-storage.yaml deprecated/swift-deploy.yaml deprecated/swift-source.yaml deprecated/swift-storage-source.yaml deprecated/ssl-source.yaml deprecated/nova-compute-config.yaml $(overcloud_source_deps)
- python ./tripleo_heat_merge/merge.py --hot --scale NovaCompute=$${COMPUTESCALE:-'1'} --scale controller=$${CONTROLSCALE:-'1'} --scale SwiftStorage=$${SWIFTSTORAGESCALE:-'0'} --scale BlockStorage=$${BLOCKSTORAGESCALE:-'0'} --scale CephStorage=$${CEPHSTORAGESCALE:-'0'} deprecated/overcloud-source.yaml deprecated/block-storage.yaml deprecated/swift-source.yaml deprecated/swift-storage-source.yaml deprecated/ssl-source.yaml deprecated/swift-deploy.yaml deprecated/nova-compute-config.yaml ${CONTROLEXTRA} > $@.tmp
- mv $@.tmp $@
-
-undercloud-vm.yaml: deprecated/undercloud-source.yaml deprecated/undercloud-vm-nova-config.yaml deprecated/undercloud-vm-nova-deploy.yaml
- python ./tripleo_heat_merge/merge.py --hot $^ > $@.tmp
- mv $@.tmp $@
-
-undercloud-bm.yaml: deprecated/undercloud-source.yaml deprecated/undercloud-bm-nova-config.yaml deprecated/undercloud-bm-nova-deploy.yaml
- python ./tripleo_heat_merge/merge.py --hot $^ > $@.tmp
- mv $@.tmp $@
-
-undercloud-vm-ironic.yaml: deprecated/undercloud-source.yaml deprecated/undercloud-vm-ironic-config.yaml deprecated/undercloud-vm-ironic-deploy.yaml
- python ./tripleo_heat_merge/merge.py --hot $^ > $@.tmp
- mv $@.tmp $@
-
-undercloud-vm-ironic-vlan.yaml: deprecated/undercloud-source.yaml deprecated/undercloud-vm-ironic-config.yaml deprecated/undercloud-vm-ironic-deploy.yaml deprecated/undercloud-vlan-port.yaml
- python ./tripleo_heat_merge/merge.py --hot $^ > $@.tmp
- mv $@.tmp $@
-
-check: test
-
-test:
- @bash test_merge.bash
-
-clean:
- rm -f $(generated_templates)
-
-.PHONY: clean overcloud.yaml check
diff --git a/README.rst b/README.rst
index 148a741f..36f9fba0 100644
--- a/README.rst
+++ b/README.rst
@@ -15,8 +15,7 @@ Features
The ability to deploy a multi-node, role based OpenStack deployment using
OpenStack Heat. Notable features include:
- * Choice of deployment/configuration tooling: puppet, os-apply-config, and
- (soon) docker
+ * Choice of deployment/configuration tooling: puppet, (soon) docker
* Role based deployment: roles for the controller, compute, ceph, swift,
and cinder storage
@@ -29,8 +28,6 @@ Directories
A description of the directory layout in TripleO Heat Templates.
- * deprecated: contains templates that have been deprecated
-
* environments: contains heat environment files that can be used with -e
on the command like to enable features, etc.
@@ -45,11 +42,5 @@ A description of the directory layout in TripleO Heat Templates.
* puppet: templates mostly driven by configuration with puppet. To use these
templates you can use the overcloud-resource-registry-puppet.yaml.
- * os-apply-config: templates mostly driven by configuration w/
- os-collect-config and bash based
- elements (which use the Heat os-apply-config group).
- These will soon be deprecated and are no longer part
- of the upstream CI testing efforts.
-
* validation-scripts: validation scripts useful to all deployment
configurations
diff --git a/Rakefile b/Rakefile
new file mode 100644
index 00000000..bca6a6c2
--- /dev/null
+++ b/Rakefile
@@ -0,0 +1,6 @@
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+
+PuppetLint.configuration.fail_on_warnings = true
+PuppetLint.configuration.send('disable_80chars')
+PuppetLint.configuration.send('disable_autoloader_layout')
diff --git a/capabilities_map.yaml b/capabilities_map.yaml
new file mode 100644
index 00000000..30ee211e
--- /dev/null
+++ b/capabilities_map.yaml
@@ -0,0 +1,226 @@
+# This file holds metadata about the capabilities of the tripleo-heat-templates
+# repository for deployment using puppet. It groups configuration by topic,
+# describes possible combinations of environments and resource capabilities.
+
+# root_template: identifies repository's root template
+# root_environment: identifies root_environment, this one is special in terms of
+# order in which the environments are merged before deploying. This one serves as
+# a base and it's parameters/resource_registry gets overriden by other environments
+# if used.
+
+# topics:
+# High Level grouping by purpose of environments
+# Attributes:
+# title: (required)
+# description: (optional)
+# environment_groups: (required)
+
+# environment_groups:
+# Identifies an environment choice. If group includes multiple environments it
+# indicates that environments in group are mutually exclusive.
+# Attributes:
+# title: (optional)
+# description: (optional)
+# tags: a list of tags to provide aditional information for e.g. filtering (optional)
+# environments: (required)
+
+# environments:
+# List of environments in environment group
+# Attributes:
+# file: a file name including path within repository (required)
+# title: (required)
+# description: (optional)
+# requires: an array of environments which are required by this environment (optional)
+# resource_registry: [tbd] (optional)
+
+# resource_registry:
+# [tbd] Each environment can provide options on resource_registry level applicable
+# only when that given environment is used. (resource_type of that environment can
+# be implemented using multiple templates).
+
+root_template: overcloud.yaml
+root_environment: overcloud-resource-registry-puppet.yaml
+topics:
+ - title: Basic Configuration
+ description:
+ environment_groups:
+ - title:
+ description: Enable basic configuration required for OpenStack Deployment
+ environments:
+ - file: overcloud-resource-registry-puppet.yaml
+ title: Default Configuration
+ description:
+
+ - title: Deployment options
+ description:
+ environment_groups:
+ - title: High Availability
+ description: Enables configuration of an Overcloud controller with Pacemaker
+ environments:
+ - file: environments/puppet-pacemaker.yaml
+ title: Pacemaker
+ description: Enable configuration of an Overcloud controller with Pacemaker
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Docker RDO
+ description: >
+ Docker container with heat agents for containerized compute node
+ environments:
+ - file: environments/docker-rdo.yaml
+ title: Docker RDO
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+
+ # - title: Network Interface Configuration
+ # description:
+ # environment_groups:
+
+ - title: Overlay network Configuration
+ description:
+ environment_groups:
+ - title: Network Isolation
+ description: >
+ Enable the creation of Neutron networks for
+ isolated Overcloud traffic and configure each role to assign ports
+ (related to that role) on these networks.
+ environments:
+ - file: environments/network-isolation.yaml
+ title: Network Isolation
+ description: Enable Network Isolation
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Single nic or Bonding
+ description: >
+ Configure roles to use pair of bonded nics or to use Vlans on a
+ single nic. This option assumes use of Network Isolation.
+ environments:
+ - file: environments/net-bond-with-vlans.yaml
+ title: Bond with Vlans
+ description: >
+ Configure each role to use a pair of bonded nics (nic2 and
+ nic3) and configures an IP address on each relevant isolated network
+ for each role. This option assumes use of Network Isolation.
+ requires:
+ - environments/network-isolation.yaml
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/net-single-nic-with-vlans.yaml
+ title: Single nic with Vlans
+ description: >
+ Configure each role to use Vlans on a single nic for
+ each isolated network. This option assumes use of Network Isolation.
+ requires:
+ - environments/network-isolation.yaml
+ - overcloud-resource-registry-puppet.yaml
+
+ - title: Neutron Plugin Configuration
+ description:
+ environment_groups:
+ - title: BigSwitch extensions or Cisco N1KV backend
+ description:
+ environments:
+ - file: environments/neutron-ml2-bigswitch.yaml
+ title: BigSwitch extensions
+ description: >
+ Enable Big Switch extensions, configured via puppet
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-ml2-cisco-n1kv.yaml
+ title: Cisco N1KV backend
+ description: >
+ Enable a Cisco N1KV backend, configured via puppet
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Cisco Neutron plugin
+ description: >
+ Enable a Cisco Neutron plugin
+ environments:
+ - file: environments/neutron-ml2-cisco-nexus-ucsm.yaml
+ title: Cisco Neutron plugin
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+
+ - title: Storage
+ description:
+ environment_groups:
+ - title: Cinder NetApp backend
+ description: >
+ Enable a Cinder NetApp backend, configured via puppet
+ environments:
+ - file: environments/cinder-netapp-config.yaml
+ title: Cinder NetApp backend
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Externally managed Ceph
+ description: >
+ Enable the use of an externally managed Ceph cluster
+ environments:
+ - file: environments/puppet-ceph-external.yaml
+ title: Externally managed Ceph
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Ceph Devel
+ description: >
+ Enable a Ceph storage cluster using the controller and 2 ceph nodes.
+ Rbd backends are enabled for Cinder, Glance, and Nova.
+ environments:
+ - file: environments/puppet-ceph-devel.yaml
+ title: Ceph Devel
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Storage Environment
+ description: >
+ Can be used to set up storage backends. Defaults to Ceph used as a
+ backend for Cinder, Glance and Nova ephemeral storage. It configures
+ for example which services will use Ceph, or if any of the services
+ will use NFS. And more. Usually requires to be edited by user first.
+ tags:
+ - no-gui
+ environments:
+ - file: environments/storage-environment.yaml
+ title: Storage Environment
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+
+ - title: Utilities
+ description:
+ environment_groups:
+ - title: Config Debug
+ description: Enable config management (e.g. Puppet) debugging
+ environments:
+ - file: environments/config-debug.yaml
+ title: Config Debug
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Disable journal in MongoDb
+ description: >
+ Since, when journaling is enabled, MongoDb will create big journal
+ file it can take time. In a CI environment for example journaling is
+ not necessary.
+ environments:
+ - file: environments/mongodb-nojournal.yaml
+ title: Disable journal in MongoDb
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - title: Overcloud Steps
+ description: >
+ Specifies hooks/breakpoints where overcloud deployment should stop
+ Allows operator validation between steps, and/or more granular control.
+ Note: the wildcards relate to naming convention for some resource suffixes,
+ e.g see puppet/*-post.yaml, enabling this will mean we wait for
+ a user signal on every *Deployment_StepN resource defined in those files.
+ tags:
+ - no-gui
+ environments:
+ - file: environments/overcloud-steps.yaml
+ title: Overcloud Steps
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
diff --git a/deprecated/block-storage.yaml b/deprecated/block-storage.yaml
deleted file mode 100644
index c288044a..00000000
--- a/deprecated/block-storage.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'Common Block Storage Configuration'
-parameters:
- BlockStorageImage:
- type: string
- default: overcloud-cinder-volume
- OvercloudBlockStorageFlavor:
- description: Flavor for block storage nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- BlockStorageExtraConfig:
- default: {}
- description: |
- Controller specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
-resources:
- BlockStorage0:
- type: OS::Nova::Server
- properties:
- image:
- {get_param: BlockStorageImage}
- flavor: {get_param: OvercloudBlockStorageFlavor}
- key_name: {get_param: KeyName}
- user_data_format: SOFTWARE_CONFIG
- BlockStorage0AllNodesDeployment:
- depends_on: [BlockStorage0Deployment,BlockStorage0PassthroughSpecific]
- type: OS::Heat::StructuredDeployment
- properties:
- signal_transport: {get_param: DefaultSignalTransport}
- config: {get_resource: allNodesConfig}
- server: {get_resource: BlockStorage0}
- BlockStorage0Deployment:
- type: OS::Heat::StructuredDeployment
- properties:
- server: {get_resource: BlockStorage0}
- config: {get_resource: BlockStorageConfig}
- input_values:
- controller_virtual_ip: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- cinder_dsn: {"Fn::Join": ['', ['mysql://cinder:unset@', {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} , '/cinder']]}
- signal_transport: NO_SIGNAL
- BlockStorage0Passthrough:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: BlockStoragePassthrough}
- server: {get_resource: BlockStorage0}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config: {get_param: ExtraConfig}
- BlockStorage0PassthroughSpecific:
- depends_on: [BlockStorage0Passthrough]
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: BlockStoragePassthroughSpecific}
- server: {get_resource: BlockStorage0}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config_specific: {get_param: BlockStorageExtraConfig}
- BlockStorageConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- admin-password: {get_param: AdminPassword}
- keystone:
- host: {get_input: controller_virtual_ip}
- cinder:
- db: {get_input: cinder_dsn}
- volume_size_mb:
- get_param: CinderLVMLoopDeviceSize
- service-password:
- get_param: CinderPassword
- iscsi-helper:
- get_param: CinderISCSIHelper
- rabbit:
- host: {get_input: controller_virtual_ip}
- username: {get_param: RabbitUserName}
- password: {get_param: RabbitPassword}
- glance:
- host: {get_input: controller_virtual_ip}
- port: {get_param: GlancePort}
- BlockStoragePassthrough:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config: {get_input: passthrough_config}
- BlockStoragePassthroughSpecific:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config: {get_input: passthrough_config_specific}
diff --git a/deprecated/nfs-source.yaml b/deprecated/nfs-source.yaml
deleted file mode 100644
index 5d865ea7..00000000
--- a/deprecated/nfs-source.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-resources:
- controllerNfsServerConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- nfs_server:
- shares:
- - name: cinder
- clients:
- - machine: 192.0.2.0/24
- options: rw,async,all_squash,anonuid=0,anongid=0
- controllerCinderNfsConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- cinder:
- include_nfs_backend: true
- nfs_shares:
- Fn::Join:
- - ':'
- - - {get_attr: [controller0, networks, ctlplane, 0]}
- - /mnt/state/var/lib/nfs/cinder
- controllerNfsServerDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: controllerNfsServerConfig}
- server: {get_resource: controller0}
- signal_transport: NO_SIGNAL
- controller0CinderNfsDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: controllerCinderNfsConfig}
- server: {get_resource: controller0}
- signal_transport: NO_SIGNAL
diff --git a/deprecated/nova-compute-config.yaml b/deprecated/nova-compute-config.yaml
deleted file mode 100644
index 1fe787e3..00000000
--- a/deprecated/nova-compute-config.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-resources:
- NovaComputeConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- nova:
- compute_driver: { get_input: nova_compute_driver }
- compute_libvirt_type: { get_input: nova_compute_libvirt_type }
- debug: {get_param: Debug}
- host: {get_input: nova_api_host}
- public_ip: {get_input: nova_public_ip}
- service-password: {get_input: nova_password}
- ceilometer:
- debug: {get_param: Debug}
- metering_secret: {get_input: ceilometer_metering_secret}
- service-password: {get_input: ceilometer_password}
- compute_agent: {get_input: ceilometer_compute_agent}
- snmpd:
- export_MIB: UCD-SNMP-MIB
- readonly_user_name: {get_input: snmpd_readonly_user_name}
- readonly_user_password: {get_input: snmpd_readonly_user_password}
- glance:
- debug: {get_param: Debug}
- host: {get_input: glance_host}
- port: {get_input: glance_port}
- protocol: {get_input: glance_protocol}
- keystone:
- debug: {get_param: Debug}
- host: {get_input: keystone_host}
- neutron:
- debug: {get_param: Debug}
- flat-networks: {get_input: neutron_flat_networks}
- host: {get_input: neutron_host}
- router_distributed: {get_input: neutron_router_distributed}
- agent_mode: {get_input: neutron_agent_mode}
- metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
- mechanism_drivers: {get_input: neutron_mechanism_drivers}
- allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover}
- l3_ha: {get_input: neutron_l3_ha}
- ovs:
- local_ip: {get_input: neutron_local_ip}
- tenant_network_type: {get_input: neutron_tenant_network_type}
- tunnel_types: {get_input: neutron_tunnel_types}
- network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
- bridge_mappings: {get_input: neutron_bridge_mappings}
- enable_tunneling: {get_input: neutron_enable_tunneling}
- physical_bridge: {get_input: neutron_physical_bridge}
- public_interface: {get_input: neutron_public_interface}
- public_interface_raw_device: {get_input: neutron_public_interface_raw_device}
- service-password: {get_input: neutron_password}
- admin-password: {get_input: admin_password}
- rabbit:
- host: {get_input: rabbit_host}
- username: {get_input: rabbit_username}
- password: {get_input: rabbit_password}
- ntp:
- servers:
- - {server: {get_input: ntp_server}}
- NovaComputePassthrough:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config: {get_input: passthrough_config}
- NovaComputePassthroughSpecific:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config: {get_input: passthrough_config_specific}
diff --git a/deprecated/nova-compute-instance.yaml b/deprecated/nova-compute-instance.yaml
deleted file mode 100644
index 811c0fc3..00000000
--- a/deprecated/nova-compute-instance.yaml
+++ /dev/null
@@ -1,273 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'Nova Compute'
-parameters:
- AdminPassword:
- default: unset
- description: The password for the keystone admin account, used for monitoring, querying neutron etc.
- type: string
- hidden: true
- AllNodesConfig:
- type: string
- description: OS::Heat::Config to use for all nodes deployment
- CeilometerComputeAgent:
- description: Indicates whether the Compute agent is present and expects nova-compute to be configured accordingly
- type: string
- default: ''
- constraints:
- - allowed_values: ['', Present]
- CeilometerMeteringSecret:
- default: unset
- description: Secret shared by the ceilometer services.
- type: string
- hidden: true
- CeilometerPassword:
- default: unset
- description: The password for the ceilometer service account.
- type: string
- hidden: true
- DefaultSignalTransport:
- default: CFN_SIGNAL
- description: Transport to use for software-config signals.
- type: string
- constraints:
- - allowed_values: [ CFN_SIGNAL, HEAT_SIGNAL, NO_SIGNAL ]
- ExtraConfig:
- default: {}
- description: |
- Additional configuration to inject into the cluster. The JSON should have
- the following structure:
- {"FILEKEY":
- {"config":
- [{"section": "SECTIONNAME",
- "values":
- [{"option": "OPTIONNAME",
- "value": "VALUENAME"
- }
- ]
- }
- ]
- }
- }
- For instance:
- {"nova":
- {"config":
- [{"section": "default",
- "values":
- [{"option": "force_config_drive",
- "value": "always"
- }
- ]
- },
- {"section": "cells",
- "values":
- [{"option": "driver",
- "value": "nova.cells.rpc_driver.CellsRPCDriver"
- }
- ]
- }
- ]
- }
- }
- type: json
- GlanceHost:
- type: string
- GlancePort:
- default: 9292
- description: Glance port.
- type: string
- GlanceProtocol:
- default: http
- description: Protocol to use when connecting to glance, set to https for SSL.
- type: string
- ImageUpdatePolicy:
- default: 'REBUILD_PRESERVE_EPHEMERAL'
- description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
- type: string
- KeyName:
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
- type: string
- default: default
- KeystoneHost:
- type: string
- NeutronBridgeMappings:
- type: string
- NeutronEnableTunnelling:
- type: string
- NeutronFlatNetworks:
- type: string
- default: ''
- description: If set, flat networks to configure in neutron plugins.
- NeutronHost:
- type: string
- NeutronNetworkType:
- default: 'vxlan'
- description: The tenant network type for Neutron, either gre or vxlan.
- type: string
- NeutronNetworkVLANRanges:
- type: string
- NeutronPhysicalBridge:
- default: ''
- description: An OVS bridge to create for accessing external networks.
- type: string
- NeutronPublicInterface:
- default: ''
- description: A port to add to the NeutronPhysicalBridge.
- type: string
- NeutronTunnelTypes:
- default: 'vxlan'
- description: |
- The tunnel types for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'gre,vxlan'
- type: string
- NeutronPublicInterfaceRawDevice:
- default: ''
- type: string
- NeutronDVR:
- default: 'False'
- type: string
- NeutronMechanismDrivers:
- default: 'openvswitch'
- description: |
- The mechanism drivers for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'openvswitch,l2_population'
- type: string
- NeutronAllowL3AgentFailover:
- default: 'True'
- description: Allow automatic l3-agent failover
- type: string
- NeutronL3HA:
- default: 'False'
- description: Whether to enable l3-agent HA
- type: string
- NovaApiHost:
- type: string
- NovaComputeDriver:
- type: string
- default: libvirt.LibvirtDriver
- NovaComputeExtraConfig:
- default: {}
- description: |
- NovaCompute specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
- NovaComputeLibvirtType:
- type: string
- default: ''
- NovaImage:
- type: string
- default: overcloud-compute
- NovaPassword:
- default: unset
- description: The password for the nova service account, used by nova-api.
- type: string
- hidden: true
- NovaPublicIP:
- type: string
- NtpServer:
- type: string
- default: ''
- OvercloudComputeFlavor:
- description: Use this flavor
- type: string
- constraints:
- - custom_constraint: nova.flavor
- RabbitHost:
- type: string
- RabbitUserName:
- type: string
- RabbitPassword:
- type: string
- hidden: true
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- default: unset
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
-resources:
- NovaCompute0:
- type: OS::Nova::Server
- properties:
- image:
- {get_param: NovaImage}
- image_update_policy:
- get_param: ImageUpdatePolicy
- flavor: {get_param: OvercloudComputeFlavor}
- key_name: {get_param: KeyName}
- networks:
- - network: ctlplane
- user_data_format: SOFTWARE_CONFIG
- NovaCompute0Deployment:
- type: OS::Heat::StructuredDeployment
- properties:
- signal_transport: NO_SIGNAL
- config: {get_resource: NovaComputeConfig}
- server: {get_resource: NovaCompute0}
- input_values:
- nova_compute_driver: {get_param: NovaComputeDriver}
- nova_compute_libvirt_type: {get_param: NovaComputeLibvirtType}
- nova_public_ip: {get_param: NovaPublicIP}
- nova_api_host: {get_param: NovaApiHost}
- nova_password: {get_param: NovaPassword}
- ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
- ceilometer_password: {get_param: CeilometerPassword}
- ceilometer_compute_agent: {get_param: CeilometerComputeAgent}
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
- glance_host: {get_param: GlanceHost}
- glance_port: {get_param: GlancePort}
- glance_protocol: {get_param: GlanceProtocol}
- keystone_host: {get_param: KeystoneHost}
- neutron_flat_networks: {get_param: NeutronFlatNetworks}
- neutron_host: {get_param: NeutronHost}
- neutron_local_ip: {get_attr: [NovaCompute0, networks, ctlplane, 0]}
- neutron_tenant_network_type: {get_param: NeutronNetworkType}
- neutron_tunnel_types: {get_param: NeutronTunnelTypes}
- neutron_network_vlan_ranges: {get_param: NeutronNetworkVLANRanges}
- neutron_bridge_mappings: {get_param: NeutronBridgeMappings}
- neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
- neutron_physical_bridge: {get_param: NeutronPhysicalBridge}
- neutron_public_interface: {get_param: NeutronPublicInterface}
- neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
- neutron_password: {get_param: NeutronPassword}
- neutron_agent_mode: {get_param: NeutronComputeAgentMode}
- neutron_router_distributed: {get_param: NeutronDVR}
- neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
- neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers}
- neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
- neutron_l3_ha: {get_param: NeutronL3HA}
- admin_password: {get_param: AdminPassword}
- rabbit_host: {get_param: RabbitHost}
- rabbit_username: {get_param: RabbitUserName}
- rabbit_password: {get_param: RabbitPassword}
- ntp_server: {get_param: NtpServer}
- NovaCompute0AllNodesDeployment:
- depends_on:
- - controller0AllNodesDeployment
- - NovaCompute0Deployment
- - NovaCompute0PassthroughSpecific
- type: OS::Heat::StructuredDeployment
- properties:
- signal_transport: {get_param: DefaultSignalTransport}
- config: {get_param: AllNodesConfig}
- server: {get_resource: NovaCompute0}
- NovaCompute0Passthrough:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: NovaComputePassthrough}
- server: {get_resource: NovaCompute0}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config: {get_param: ExtraConfig}
- NovaCompute0PassthroughSpecific:
- depends_on: [NovaCompute0Passthrough]
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: NovaComputePassthroughSpecific}
- server: {get_resource: NovaCompute0}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config_specific: {get_param: NovaComputeExtraConfig}
diff --git a/deprecated/overcloud-source.yaml b/deprecated/overcloud-source.yaml
deleted file mode 100644
index 0729b338..00000000
--- a/deprecated/overcloud-source.yaml
+++ /dev/null
@@ -1,952 +0,0 @@
-description: Deprecated. Please migrate to use overcloud-without-mergepy instead.
-heat_template_version: 2013-05-23
-parameters:
- AdminPassword:
- default: unset
- description: The password for the keystone admin account, used for monitoring, querying neutron etc.
- type: string
- hidden: true
- AdminToken:
- default: unset
- description: The keystone auth secret.
- type: string
- hidden: true
- CeilometerComputeAgent:
- description: Indicates whether the Compute agent is present and expects nova-compute to be configured accordingly
- type: string
- default: ''
- constraints:
- - allowed_values: ['', Present]
- CeilometerMeteringSecret:
- default: unset
- description: Secret shared by the ceilometer services.
- type: string
- hidden: true
- CeilometerPassword:
- default: unset
- description: The password for the ceilometer service account.
- type: string
- hidden: true
- CinderISCSIHelper:
- default: tgtadm
- description: The iSCSI helper to use with cinder.
- type: string
- CinderLVMLoopDeviceSize:
- default: 5000
- description: The size of the loopback file used by the cinder LVM driver.
- type: number
- CinderPassword:
- default: unset
- description: The password for the cinder service account, used by cinder-api.
- type: string
- hidden: true
- CloudName:
- default: ''
- description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
- type: string
- ControlFixedIPs:
- default: []
- description: Should be used for arbitrary ips.
- type: json
- controllerExtraConfig:
- default: {}
- description: |
- Controller specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
- controllerImage:
- type: string
- default: overcloud-control
- constraints:
- - custom_constraint: glance.image
- ControlVirtualInterface:
- default: 'br-ex'
- description: Interface where virtual ip will be assigned.
- type: string
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- DefaultSignalTransport:
- default: CFN_SIGNAL
- description: Transport to use for software-config signals.
- type: string
- constraints:
- - allowed_values: [ CFN_SIGNAL, HEAT_SIGNAL, NO_SIGNAL ]
- ExtraConfig:
- default: {}
- description: |
- Additional configuration to inject into the cluster. The JSON should have
- the following structure:
- {"FILEKEY":
- {"config":
- [{"section": "SECTIONNAME",
- "values":
- [{"option": "OPTIONNAME",
- "value": "VALUENAME"
- }
- ]
- }
- ]
- }
- }
- For instance:
- {"nova":
- {"config":
- [{"section": "default",
- "values":
- [{"option": "force_config_drive",
- "value": "always"
- }
- ]
- },
- {"section": "cells",
- "values":
- [{"option": "driver",
- "value": "nova.cells.rpc_driver.CellsRPCDriver"
- }
- ]
- }
- ]
- }
- }
- type: json
- GlanceLogFile:
- description: The filepath of the file to use for logging messages from Glance.
- type: string
- default: ''
- HorizonPort:
- type: number
- default: 80
- description: Horizon web server port.
- GlancePassword:
- default: unset
- description: The password for the glance service account, used by the glance services.
- type: string
- hidden: true
- GlancePort:
- default: 9292
- description: Glance port.
- type: string
- GlanceProtocol:
- default: http
- description: Protocol to use when connecting to glance, set to https for SSL.
- type: string
- GlanceNotifierStrategy:
- description: Strategy to use for Glance notification queue
- type: string
- default: noop
- HeatPassword:
- default: unset
- description: The password for the Heat service account, used by the Heat services.
- type: string
- hidden: true
- HeatStackDomainAdminPassword:
- description: Password for heat_domain_admin user.
- type: string
- default: ''
- hidden: true
- HypervisorNeutronPhysicalBridge:
- default: 'br-ex'
- description: >
- An OVS bridge to create on each hypervisor. This defaults to br-ex the
- same as the control plane nodes, as we have a uniform configuration of
- the openvswitch agent. Typically should not need to be changed.
- type: string
- HypervisorNeutronPublicInterface:
- default: nic1
- description: What interface to add to the HypervisorNeutronPhysicalBridge.
- type: string
- ImageUpdatePolicy:
- default: 'REBUILD_PRESERVE_EPHEMERAL'
- description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
- type: string
- KeyName:
- default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
- type: string
- constraints:
- - custom_constraint: nova.keypair
- KeystoneCACertificate:
- default: ''
- description: Keystone self-signed certificate authority certificate.
- type: string
- KeystoneSigningCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSigningKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- KeystoneSSLCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSSLCertificateKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- MysqlInnodbBufferPoolSize:
- description: >
- Specifies the size of the buffer pool in megabytes. Setting to
- zero should be interpreted as "no value" and will defer to the
- lower level default.
- type: number
- default: 0
- NeutronBridgeMappings:
- description: >
- The OVS logical->physical bridge mappings to use. See the Neutron
- documentation for details. Defaults to mapping br-ex - the external
- bridge on hosts - to a physical name 'datacentre' which can be used
- to create provider networks (and we use this for the default floating
- network) - if changing this either use different post-install network
- scripts or be sure to keep 'datacentre' as a mapping network name.
- type: string
- default: "datacentre:br-ex"
- NeutronControlPlaneID:
- default: ''
- type: string
- description: Neutron ID for ctlplane network.
- NeutronDnsmasqOptions:
- default: 'dhcp-option-force=26,1400'
- description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the tunnel overhead.
- type: string
- NeutronFlatNetworks:
- type: string
- default: 'datacentre'
- description: >
- If set, flat networks to configure in neutron plugins. Defaults to
- 'datacentre' to permit external network creation.
- NeutronNetworkType:
- default: 'vxlan'
- description: The tenant network type for Neutron, either gre or vxlan.
- type: string
- NeutronNetworkVLANRanges:
- default: 'datacentre'
- description: >
- The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
- Neutron documentation for permitted values. Defaults to permitting any
- VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
- type: string
- NeutronPassword:
- default: unset
- description: The password for the neutron service account, used by neutron agents.
- type: string
- hidden: true
- NeutronPublicInterface:
- default: nic1
- description: What interface to bridge onto br-ex for network nodes.
- type: string
- NeutronPublicInterfaceDefaultRoute:
- default: ''
- description: A custom default route for the NeutronPublicInterface.
- type: string
- NeutronPublicInterfaceIP:
- default: ''
- description: A custom IP address to put onto the NeutronPublicInterface.
- type: string
- NeutronPublicInterfaceRawDevice:
- default: ''
- description: If set, the public interface is a vlan with this device as the raw device.
- type: string
- NeutronPublicInterfaceTag:
- default: ''
- description: >
- VLAN tag for creating a public VLAN. The tag will be used to
- create an access port on the exterior bridge for each control plane node,
- and that port will be given the IP address returned by neutron from the
- public network. Set CONTROLEXTRA=overcloud-vlan-port.yaml when compiling
- overcloud.yaml to include the deployment of VLAN ports to the control
- plane.
- type: string
- NeutronComputeAgentMode:
- default: 'dvr'
- description: Agent mode for the neutron-l3-agent on the compute hosts
- type: string
- NeutronAgentMode:
- default: 'dvr_snat'
- description: Agent mode for the neutron-l3-agent on the controller hosts
- type: string
- NeutronDVR:
- default: 'False'
- description: Whether to configure Neutron Distributed Virtual Routers
- type: string
- NeutronMetadataProxySharedSecret:
- default: 'unset'
- description: Shared secret to prevent spoofing
- type: string
- NeutronTunnelTypes:
- default: 'vxlan'
- description: |
- The tunnel types for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'gre,vxlan'
- type: string
- NeutronMechanismDrivers:
- default: 'openvswitch'
- description: |
- The mechanism drivers for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'openvswitch,l2_population'
- type: string
- NeutronAllowL3AgentFailover:
- default: 'True'
- description: Allow automatic l3-agent failover
- type: string
- NeutronL3HA:
- default: 'False'
- description: Whether to enable l3-agent HA
- type: string
- NovaComputeDriver:
- default: libvirt.LibvirtDriver
- type: string
- NovaComputeExtraConfig:
- default: {}
- description: |
- NovaCompute specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
- NovaComputeLibvirtType:
- default: ''
- type: string
- NovaImage:
- type: string
- default: overcloud-compute
- constraints:
- - custom_constraint: glance.image
- NovaPassword:
- default: unset
- description: The password for the nova service account, used by nova-api.
- type: string
- hidden: true
- NtpServer:
- type: string
- default: ''
- OvercloudComputeFlavor:
- description: Flavor for compute nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- OvercloudControlFlavor:
- description: Flavor for control nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- PublicVirtualFixedIPs:
- default: []
- description: >
- Control the IP allocation for the PublicVirtualInterface port. E.g.
- [{'ip_address':'1.2.3.4'}]
- type: json
- PublicVirtualInterface:
- default: 'br-ex'
- description: >
- Specifies the interface where the public-facing virtual ip will be assigned.
- This should be int_public when a VLAN is being used.
- type: string
- PublicVirtualNetwork:
- default: 'ctlplane'
- type: string
- description: >
- Neutron network to allocate public virtual IP port on.
- RabbitCookieSalt:
- type: string
- default: unset
- description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
- RabbitPassword:
- default: guest
- description: The password for RabbitMQ
- type: string
- hidden: true
- RabbitUserName:
- default: guest
- description: The username for RabbitMQ
- type: string
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- default: unset
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
-resources:
- ControlVirtualIP:
- type: OS::Neutron::Port
- properties:
- name: control_virtual_ip
- network_id: {get_param: NeutronControlPlaneID}
- fixed_ips:
- get_param: ControlFixedIPs
- replacement_policy: AUTO
- MysqlClusterUniquePart:
- type: OS::Heat::RandomString
- properties:
- length: 10
- MysqlRootPassword:
- type: OS::Heat::RandomString
- properties:
- length: 10
- PublicVirtualIP:
- type: OS::Neutron::Port
- properties:
- name: public_virtual_ip
- network: {get_param: PublicVirtualNetwork}
- fixed_ips:
- get_param: PublicVirtualFixedIPs
- replacement_policy: AUTO
- RabbitCookie:
- type: OS::Heat::RandomString
- properties:
- length: 20
- salt:
- get_param: RabbitCookieSalt
- NovaCompute0Deployment:
- type: FileInclude
- Path: deprecated/nova-compute-instance.yaml
- SubKey: resources.NovaCompute0Deployment
- parameters:
- DefaultSignalTransport:
- get_param: DefaultSignalTransport
- NovaApiHost: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- KeystoneHost: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- NeutronHost: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- GlanceHost: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- RabbitHost: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- NovaPublicIP: {get_attr: [PublicVirtualIP, fixed_ips, 0, ip_address]}
- NeutronNetworkType:
- get_param: NeutronNetworkType
- NeutronTunnelTypes:
- get_param: NeutronTunnelTypes
- NeutronEnableTunnelling: "True"
- NeutronFlatNetworks:
- get_param: NeutronFlatNetworks
- NeutronNetworkVLANRanges:
- get_param: NeutronNetworkVLANRanges
- NeutronPhysicalBridge:
- get_param: HypervisorNeutronPhysicalBridge
- NeutronPublicInterface:
- get_param: HypervisorNeutronPublicInterface
- NeutronBridgeMappings:
- get_param: NeutronBridgeMappings
- NeutronDVR:
- get_param: NeutronDVR
- NeutronAgentMode:
- get_param: NeutronComputeAgentMode
- NeutronPublicInterfaceRawDevice:
- get_param: NeutronPublicInterfaceRawDevice
- NeutronMechanismDrivers:
- get_param: NeutronMechanismDrivers
- NeutronAllowL3AgentFailover:
- get_param: NeutronAllowL3AgentFailover
- NeutronL3HA:
- get_param: NeutronL3HA
- NovaCompute0AllNodesDeployment:
- type: FileInclude
- Path: deprecated/nova-compute-instance.yaml
- SubKey: resources.NovaCompute0AllNodesDeployment
- parameters:
- AllNodesConfig: {get_resource: allNodesConfig}
- NovaCompute0:
- type: FileInclude
- Path: deprecated/nova-compute-instance.yaml
- SubKey: resources.NovaCompute0
- NovaCompute0Passthrough:
- type: FileInclude
- Path: deprecated/nova-compute-instance.yaml
- SubKey: resources.NovaCompute0Passthrough
- parameters:
- passthrough_config: {get_param: ExtraConfig}
- NovaCompute0PassthroughSpecific:
- type: FileInclude
- Path: deprecated/nova-compute-instance.yaml
- SubKey: resources.NovaCompute0PassthroughSpecific
- parameters:
- passthrough_config_specific: {get_param: NovaComputeExtraConfig}
- HeatAuthEncryptionKey:
- type: OS::Heat::RandomString
- controllerConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- admin-password:
- get_param: AdminPassword
- admin-token:
- get_param: AdminToken
- bootstack:
- public_interface_ip:
- get_param: NeutronPublicInterfaceIP
- bootstrap_host:
- bootstrap_nodeid:
- Fn::Select:
- - 0
- - Fn::Select:
- - 0
- - Merge::Map:
- controller0:
- - get_attr:
- - controller0
- - name
- nodeid: {get_input: bootstack_nodeid}
- database:
- host: &database_host
- {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- cinder:
- db:
- Fn::Join:
- - ''
- - - mysql://cinder:unset@
- - *database_host
- - /cinder
- debug: {get_param: Debug}
- volume_size_mb:
- get_param: CinderLVMLoopDeviceSize
- service-password:
- get_param: CinderPassword
- iscsi-helper:
- get_param: CinderISCSIHelper
- controller-address:
- get_input: controller_host
- corosync:
- bindnetaddr: {get_input: controller_host}
- mcastport: 5577
- nodes:
- Merge::Map:
- controller0:
- ip: {get_attr: [controller0, networks, ctlplane, 0]}
- pacemaker:
- stonith_enabled : false
- recheck_interval : 5
- quorum_policy : ignore
- db-password: unset
- glance:
- registry:
- host: {get_input: controller_virtual_ip}
- backend: swift
- db:
- Fn::Join:
- - ''
- - - mysql://glance:unset@
- - *database_host
- - /glance
- debug: {get_param: Debug}
- host:
- get_input: controller_virtual_ip
- port:
- get_param: GlancePort
- protocol:
- get_param: GlanceProtocol
- service-password:
- get_param: GlancePassword
- swift-store-user: service:glance
- swift-store-key:
- get_param: GlancePassword
- notifier-strategy:
- get_param: GlanceNotifierStrategy
- log-file:
- get_param: GlanceLogFile
- heat:
- admin_password:
- get_param: HeatPassword
- admin_tenant_name: service
- admin_user: heat
- auth_encryption_key:
- get_resource: HeatAuthEncryptionKey
- db:
- Fn::Join:
- - ''
- - - mysql://heat:unset@
- - *database_host
- - /heat
- debug: {get_param: Debug}
- stack_domain_admin_password: {get_param: HeatStackDomainAdminPassword}
- watch_server_url: {get_input: heat.watch_server_url}
- metadata_server_url: {get_input: heat.metadata_server_url}
- waitcondition_server_url: {get_input: heat.waitcondition_server_url}
- horizon:
- port: {get_param: HorizonPort}
- caches:
- memcached:
- nodes:
- Merge::Map:
- controller0:
- {get_attr: [controller0, name]}
- keystone:
- db:
- Fn::Join:
- - ''
- - - mysql://keystone:unset@
- - *database_host
- - /keystone
- debug: {get_param: Debug}
- host:
- get_input: controller_virtual_ip
- ca_certificate: {get_param: KeystoneCACertificate}
- signing_key: {get_param: KeystoneSigningKey}
- signing_certificate: {get_param: KeystoneSigningCertificate}
- ssl:
- certificate: {get_param: KeystoneSSLCertificate}
- certificate_key: {get_param: KeystoneSSLCertificateKey}
- mysql:
- innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize}
- local_bind: true
- root-password: {get_resource: MysqlRootPassword}
- nodes:
- Merge::Map:
- controller0:
- ip: {get_attr: [controller0, networks, ctlplane, 0]}
- cluster_name:
- Fn::Join:
- - '-'
- - - 'tripleo'
- - {get_resource: MysqlClusterUniquePart}
- neutron:
- debug: {get_param: Debug}
- flat-networks: {get_param: NeutronFlatNetworks}
- host: {get_input: controller_virtual_ip}
- metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
- agent_mode: {get_param: NeutronAgentMode}
- router_distributed: {get_param: NeutronDVR}
- mechanism_drivers: {get_param: NeutronMechanismDrivers}
- allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
- l3_ha: {get_param: NeutronL3HA}
- ovs:
- enable_tunneling: 'True'
- local_ip:
- get_input: controller_host
- network_vlan_ranges: {get_param: NeutronNetworkVLANRanges}
- bridge_mappings: {get_param: NeutronBridgeMappings}
- public_interface:
- get_param: NeutronPublicInterface
- public_interface_raw_device:
- get_param: NeutronPublicInterfaceRawDevice
- public_interface_route:
- get_param: NeutronPublicInterfaceDefaultRoute
- public_interface_tag:
- get_param: NeutronPublicInterfaceTag
- physical_bridge: br-ex
- tenant_network_type:
- get_param: NeutronNetworkType
- tunnel_types:
- get_param: NeutronTunnelTypes
- ovs_db:
- Fn::Join:
- - ''
- - - mysql://neutron:unset@
- - *database_host
- - /ovs_neutron?charset=utf8
- service-password:
- get_param: NeutronPassword
- dnsmasq-options:
- get_param: NeutronDnsmasqOptions
- ceilometer:
- db:
- Fn::Join:
- - ''
- - - mysql://ceilometer:unset@
- - *database_host
- - /ceilometer
- debug: {get_param: Debug}
- metering_secret: {get_param: CeilometerMeteringSecret}
- service-password:
- get_param: CeilometerPassword
- snmpd:
- export_MIB: UCD-SNMP-MIB
- readonly_user_name:
- get_param: SnmpdReadonlyUserName
- readonly_user_password:
- get_param: SnmpdReadonlyUserPassword
- nova:
- compute_driver: libvirt.LibvirtDriver
- db:
- Fn::Join:
- - ''
- - - mysql://nova:unset@
- - *database_host
- - /nova
- default_floating_pool:
- ext-net
- host: {get_input: controller_virtual_ip}
- metadata-proxy: true
- service-password:
- get_param: NovaPassword
- rabbit:
- host: {get_input: controller_virtual_ip}
- username:
- get_param: RabbitUserName
- password:
- get_param: RabbitPassword
- cookie:
- get_attr:
- - RabbitCookie
- - value
- rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
- rabbit_port: {get_param: RabbitClientPort}
- ntp:
- servers:
- - {server: {get_param: NtpServer}}
- virtual_interfaces:
- instances:
- - vrrp_instance_name: VI_CONTROL
- virtual_router_id: 51
- keepalive_interface:
- get_param: ControlVirtualInterface
- priority: 101
- virtual_ips:
- - ip: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- interface:
- get_param: ControlVirtualInterface
- - vrrp_instance_name: VI_PUBLIC
- virtual_router_id: 52
- keepalive_interface:
- get_param: PublicVirtualInterface
- priority: 101
- virtual_ips:
- - ip: {get_attr: [PublicVirtualIP, fixed_ips, 0, ip_address]}
- interface:
- get_param: PublicVirtualInterface
- vrrp_sync_groups:
- - name: VG1
- members:
- - VI_CONTROL
- - VI_PUBLIC
- keepalived:
- keepalive_interface:
- get_param: PublicVirtualInterface
- priority: 101
- virtual_ips:
- -
- ip: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- interface:
- get_param: ControlVirtualInterface
- -
- ip: {get_attr: [PublicVirtualIP, fixed_ips, 0, ip_address]}
- interface:
- get_param: PublicVirtualInterface
- haproxy:
- nodes:
- Merge::Map:
- controller0:
- ip: {get_attr: [controller0, networks, ctlplane, 0]}
- name: {get_attr: [controller0, name]}
- net_binds:
- - &control_vip {ip: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}}
- - &public_vip {ip: {get_attr: [PublicVirtualIP, fixed_ips, 0, ip_address]}}
- options:
- - option httpchk GET /
- services:
- - name: keystone_admin
- port: 35357
- - name: keystone_public
- port: 5000
- - name: horizon
- port: 80
- - name: neutron
- port: 9696
- - name: cinder
- port: 8776
- - name: glance_api
- port: 9292
- - name: glance_registry
- port: 9191
- options: # overwrite options as glace_reg needs auth for http req
- - name: heat_api
- port: 8004
- - name: heat_cloudwatch
- port: 8003
- - name: heat_cfn
- port: 8000
- - name: mysql
- port: 3306
- net_binds:
- - *control_vip
- extra_server_params:
- - backup
- options:
- - timeout client 0
- - timeout server 0
- - name: nova_ec2
- port: 8773
- - name: nova_osapi
- port: 8774
- - name: nova_metadata
- port: 8775
- - name: nova_novncproxy
- port: 6080
- - name: ceilometer
- port: 8777
- options: # overwrite options as ceil needs auth for http req
- - name: swift_proxy_server
- port: 8080
- options:
- - option httpchk GET /info
- - name: rabbitmq
- port: 5672
- net_binds:
- - *control_vip
- options:
- - timeout client 0
- - timeout server 0
- - maxconn 1500
- controllerPassthrough:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config: {get_input: passthrough_config}
- controllerPassthroughSpecific:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config: {get_input: passthrough_config_specific}
- controller0:
- type: OS::Nova::Server
- properties:
- image:
- get_param: controllerImage
- image_update_policy:
- get_param: ImageUpdatePolicy
- flavor:
- get_param: OvercloudControlFlavor
- key_name:
- get_param: KeyName
- networks:
- - network: ctlplane
- user_data_format: SOFTWARE_CONFIG
- controller0AllNodesDeployment:
- depends_on: [controller0Deployment,controller0SSLDeployment,controller0Swift,controller0PassthroughSpecific]
- type: OS::Heat::StructuredDeployment
- properties:
- signal_transport: {get_param: DefaultSignalTransport}
- config: {get_resource: allNodesConfig}
- server: {get_resource: controller0}
- controller0Deployment:
- type: OS::Heat::StructuredDeployment
- properties:
- signal_transport: NO_SIGNAL
- config: {get_resource: controllerConfig}
- server: {get_resource: controller0}
- input_values:
- bootstack_nodeid: {get_attr: [controller0, name]}
- controller_host: {get_attr: [controller0, networks, ctlplane, 0]}
- controller_virtual_ip:
- {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- heat.watch_server_url:
- Fn::Join:
- - ''
- - - 'http://'
- - {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- - ':8003'
- heat.metadata_server_url:
- Fn::Join:
- - ''
- - - 'http://'
- - {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- - ':8000'
- heat.waitcondition_server_url:
- Fn::Join:
- - ''
- - - 'http://'
- - {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- - ':8000/v1/waitcondition'
- allNodesConfig:
- type: OS::Heat::StructuredConfig
- properties:
- config:
- completion-signal: {get_input: deploy_signal_id}
- hosts:
- Fn::Join:
- - "\n"
- - - Fn::Join:
- - "\n"
- - Merge::Map:
- NovaCompute0:
- Fn::Join:
- - ' '
- - - {get_attr: [NovaCompute0, networks, ctlplane, 0]}
- - {get_attr: [NovaCompute0, name]}
- - Fn::Join:
- - "\n"
- - Merge::Map:
- BlockStorage0:
- Fn::Join:
- - ' '
- - - {get_attr: [BlockStorage0, networks, ctlplane, 0]}
- - {get_attr: [BlockStorage0, name]}
- - Fn::Join:
- - "\n"
- - Merge::Map:
- SwiftStorage0:
- Fn::Join:
- - ' '
- - - {get_attr: [SwiftStorage0, networks, ctlplane, 0]}
- - {get_attr: [SwiftStorage0, name]}
- - Fn::Join:
- - "\n"
- - Merge::Map:
- controller0:
- Fn::Join:
- - ' '
- - - {get_attr: [controller0, networks, ctlplane, 0]}
- - {get_attr: [controller0, name]}
- - {get_param: CloudName}
- rabbit:
- nodes:
- Fn::Join:
- - ','
- - Merge::Map:
- controller0:
- {get_attr: [controller0, name]}
- sysctl:
- net.ipv4.tcp_keepalive_time: 5
- net.ipv4.tcp_keepalive_probes: 5
- net.ipv4.tcp_keepalive_intvl: 1
- controller0SSLDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: SSLConfig}
- server: {get_resource: controller0}
- signal_transport: NO_SIGNAL
- input_values:
- controller_host: {get_attr: [controller0, networks, ctlplane, 0]}
- ssl_certificate: {get_param: SSLCertificate}
- ssl_key: {get_param: SSLKey}
- ssl_ca_certificate: {get_param: SSLCACertificate}
- controller0Passthrough:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: controllerPassthrough}
- server: {get_resource: controller0}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config: {get_param: ExtraConfig}
- controller0PassthroughSpecific:
- depends_on: [controller0Passthrough]
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: controllerPassthroughSpecific}
- server: {get_resource: controller0}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config_specific: {get_param: controllerExtraConfig}
-outputs:
- KeystoneURL:
- description: URL for the Overcloud Keystone service
- value:
- Fn::Join:
- - ''
- - - http://
- - {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- - :5000/v2.0/
diff --git a/deprecated/overcloud-vlan-port.yaml b/deprecated/overcloud-vlan-port.yaml
deleted file mode 100644
index 8f6f6937..00000000
--- a/deprecated/overcloud-vlan-port.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-outputs:
- controller0PublicIP:
- description: Address for registering endpoints in the cloud.
- value: {get_attr: [controller0_VLANPort, fixed_ips, 0, ip_address]}
-resources:
- # Override the main template which can also supply a static route.
- controller0_99_VLANPort:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: ControllerVLANPortConfig}
- server: {get_resource: controller0}
- signal_transport: NO_SIGNAL
- input_values:
- vlan_port:
- list_join:
- - '/'
- - - {get_attr: [controller0_VLANPort, fixed_ips, 0, ip_address]}
- # This should also be pulled out of the subnet. May need a
- # neutron fix too - XXX make into a parameter and feed it
- # in via _overcloud.sh for now.
- - '24'
- # Tell the instance to apply the default route.
- # Reinstate when https://bugs.launchpad.net/heat/+bug/1336656 is
- # sorted
- # public_interface_route:
- # get_attr: [controller0_VLANPort, fixed_ips, 0, subnet, gateway_ip]
- ControllerVLANPortConfig:
- type: OS::Heat::StructuredConfig
- properties:
- config:
- neutron:
- ovs:
- public_interface_tag_ip: {get_input: vlan_port}
- controller0_VLANPort:
- type: OS::Neutron::Port
- properties:
- name: controller0_vlan
- network: public
- replacement_policy: AUTO
diff --git a/deprecated/ssl-source.yaml b/deprecated/ssl-source.yaml
deleted file mode 100644
index a9357323..00000000
--- a/deprecated/ssl-source.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-description: 'ssl-source: SSL endpoint metadata for openstack'
-parameters:
- SSLCACertificate:
- default: ''
- description: If set, the contents of an SSL certificate authority file.
- type: string
- SSLCertificate:
- default: ''
- description: If set, the contents of an SSL certificate .crt file for encrypting SSL endpoints.
- type: string
- hidden: true
- SSLKey:
- default: ''
- description: If set, the contents of an SSL certificate .key file for encrypting SSL endpoints.
- type: string
- hidden: true
-resources:
- SSLConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- ssl:
- ca_certificate: {get_input: ssl_ca_certificate}
- stunnel:
- cert: {get_input: ssl_certificate}
- key: {get_input: ssl_key}
- cacert: {get_input: ssl_ca_certificate}
- connect_host: {get_input: controller_host}
- ports:
- - name: 'ec2'
- accept: 13773
- connect: 8773
- - name: 'image'
- accept: 13292
- connect: 9292
- - name: 'identity'
- accept: 13000
- connect: 5000
- - name: 'network'
- accept: 13696
- connect: 9696
- - name: 'compute'
- accept: 13774
- connect: 8774
- - name: 'swift-proxy'
- accept: 13080
- connect: 8080
- - name: 'cinder'
- accept: 13776
- connect: 8776
- - name: 'ceilometer'
- accept: 13777
- connect: 8777
diff --git a/deprecated/swift-deploy.yaml b/deprecated/swift-deploy.yaml
deleted file mode 100644
index d4d32cbd..00000000
--- a/deprecated/swift-deploy.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-description: 'Swift-proxy: OpenStack object storage proxy'
-parameters:
- SwiftHashSuffix:
- default: unset
- description: A random string to be used as a salt when hashing to determine mappings in the ring.
- type: string
- hidden: true
- SwiftMountCheck:
- default: 'false'
- description: Value of mount_check in Swift account/container/object -server.conf
- type: boolean
- SwiftMinPartHours:
- type: number
- default: 1
- description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance.
- SwiftPartPower:
- default: 10
- description: Partition Power to use when building Swift rings
- type: number
- SwiftPassword:
- default: unset
- description: The password for the swift service account, used by the swift proxy services.
- type: string
- hidden: true
- SwiftReplicas:
- type: number
- default: 3
- description: How many replicas to use in the swift rings.
-resources:
- controller0Swift:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: SwiftConfig}
- server: {get_resource: controller0}
- signal_transport: NO_SIGNAL
- input_values:
- swift_hash_suffix: {get_param: SwiftHashSuffix}
- swift_mount_check: {get_param: SwiftMountCheck}
- swift_password: {get_param: SwiftPassword}
- swift_part_power: {get_param: SwiftPartPower}
- swift_devices:
- Fn::Join:
- - ', '
- - Merge::Map:
- controller0:
- Fn::Join:
- - ''
- - - 'r1z1-'
- - {get_attr: [controller0, networks, ctlplane, 0]}
- - ':%PORT%/d1'
- SwiftStorage0:
- Fn::Join:
- - ''
- - - 'r1z1-'
- - {get_attr: [SwiftStorage0, networks, ctlplane, 0]}
- - ':%PORT%/d1'
- swift_proxy_memcache:
- Fn::Join:
- - ','
- - Merge::Map:
- controller0:
- Fn::Join:
- - ', '
- - - Fn::Join:
- - ''
- - - {get_attr: [controller0, networks, ctlplane, 0]}
- - ':11211'
- swift_replicas: { get_param: SwiftReplicas}
- swift_min_part_hours: { get_param: SwiftMinPartHours}
diff --git a/deprecated/swift-source.yaml b/deprecated/swift-source.yaml
deleted file mode 100644
index e6fd951e..00000000
--- a/deprecated/swift-source.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-resources:
- SwiftConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- swift:
- devices: { get_input: swift_devices }
- hash: { get_input: swift_hash_suffix }
- mount-check: { get_input: swift_mount_check }
- part-power: { get_input: swift_part_power }
- proxy-memcache: { get_input: swift_proxy_memcache }
- replicas: {get_input: swift_replicas }
- min-part-hours: {get_input: swift_min_part_hours }
- service-password: { get_input: swift_password }
diff --git a/deprecated/swift-storage-source.yaml b/deprecated/swift-storage-source.yaml
deleted file mode 100644
index 176925b6..00000000
--- a/deprecated/swift-storage-source.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-heat_template_version: 2013-05-23
-description: 'Common Swift Storage Configuration'
-parameters:
- OvercloudSwiftStorageFlavor:
- description: Flavor for Swift storage nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- SwiftReplicas:
- type: number
- default: 1
- description: How many replicas to use in the swift rings.
- SwiftStorageImage:
- type: string
- default: overcloud-swift-storage
-resources:
- SwiftStorage0:
- type: OS::Nova::Server
- properties:
- image: {get_param: SwiftStorageImage}
- flavor: {get_param: OvercloudSwiftStorageFlavor}
- key_name: {get_param: KeyName}
- user_data_format: SOFTWARE_CONFIG
- SwiftKeystoneConfig:
- type: OS::Heat::StructuredConfig
- properties:
- config:
- keystone:
- host: {get_input: keystone_host}
- SwiftStorage0Keystone:
- type: OS::Heat::StructuredDeployment
- properties:
- server: {get_resource: SwiftStorage0}
- config: {get_resource: SwiftKeystoneConfig}
- signal_transport: NO_SIGNAL
- input_values:
- keystone_host: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- SwiftStorage0Deploy:
- type: OS::Heat::StructuredDeployment
- properties:
- server: {get_resource: SwiftStorage0}
- config: {get_resource: SwiftConfig}
- signal_transport: NO_SIGNAL
- input_values:
- swift_hash_suffix: {get_param: SwiftHashSuffix}
- swift_mount_check: {get_param: SwiftMountCheck}
- swift_password: {get_param: SwiftPassword}
- swift_part_power: {get_param: SwiftPartPower}
- swift_devices:
- Fn::Join:
- - ', '
- - Merge::Map:
- controller0:
- Fn::Join:
- - ''
- - - 'r1z1-'
- - {get_attr: [controller0, networks, ctlplane, 0]}
- - ':%PORT%/d1'
- SwiftStorage0:
- Fn::Join:
- - ''
- - - 'r1z1-'
- - {get_attr: [SwiftStorage0, networks, ctlplane, 0]}
- - ':%PORT%/d1'
- swift_proxy_memcache:
- Fn::Join:
- - ','
- - Merge::Map:
- controller0:
- Fn::Join:
- - ', '
- - - Fn::Join:
- - ''
- - - {get_attr: [controller0, networks, ctlplane, 0]}
- - ':11211'
- swift_replicas: { get_param: SwiftReplicas}
- swift_min_part_hours: { get_param: SwiftMinPartHours}
diff --git a/deprecated/undercloud-bm-nova-config.yaml b/deprecated/undercloud-bm-nova-config.yaml
deleted file mode 100644
index 306dc0a1..00000000
--- a/deprecated/undercloud-bm-nova-config.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-resources:
- undercloudNovaConfig:
- type: OS::Heat::StructuredConfig
- properties:
- config:
- nova:
- compute_hostname: undercloud
- compute_driver: {get_param: NovaComputeDriver}
- compute_manager: {get_param: NovaComputeManager}
- scheduler_host_manager: {get_param: NovaSchedulerHostManager}
- db: {list_join: ['', ['mysql://nova:', {get_param: NovaPassword}, '@localhost/nova']]}
- default_ephemeral_format: ext4
- host: 127.0.0.1
- metadata-proxy: false
- tuning:
- ram_allocation_ratio: 1.0
- reserved_host_memory_mb: 0
- baremetal:
- arch: {get_input: nova_arch}
- db: {list_join: ['', ['mysql://nova:', {get_param: NovaPassword}, '@localhost/nova_bm']]}
- power_manager: {get_input: power_manager}
- pxe_deploy_timeout: {get_input: pxe_deploy_timeout}
- service-password: {get_input: nova_service_password}
diff --git a/deprecated/undercloud-bm-nova-deploy.yaml b/deprecated/undercloud-bm-nova-deploy.yaml
deleted file mode 100644
index dca68329..00000000
--- a/deprecated/undercloud-bm-nova-deploy.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-parameters:
- NeutronPublicInterface:
- default: nic1
- description: What interface to bridge onto br-ex for network nodes.
- type: string
- NovaComputeDriver:
- default: baremetal.driver.BareMetalDriver
- description: Full class name for the Nova compute driver
- type: string
- NovaComputeManager:
- default: nova.compute.manager.ComputeManager
- description: Full class name for the Nova compute manager
- type: string
- NovaSchedulerHostManager:
- default: nova.scheduler.host_manager.HostManager
- description: Full class name for the Nova scheduler host manager
- type: string
- PowerManager:
- default: nova.virt.baremetal.ipmi.IPMI
- description: Bare metal power manager driver.
- type: string
- PxeDeployTimeout:
- default: 2400
- description: Timeout for PXE deployment of baremetal nodes
- type: number
-resources:
- undercloudNovaDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: undercloudNovaConfig}
- server: {get_resource: undercloud}
- signal_transport: NO_SIGNAL
- input_values:
- nova_arch: {get_param: BaremetalArch}
- power_manager: {get_param: PowerManager}
- pxe_deploy_timeout: {get_param: PxeDeployTimeout}
- nova_service_password: {get_param: NovaPassword}
diff --git a/deprecated/undercloud-source.yaml b/deprecated/undercloud-source.yaml
deleted file mode 100644
index 317896d9..00000000
--- a/deprecated/undercloud-source.yaml
+++ /dev/null
@@ -1,412 +0,0 @@
-description: Deprecated. Use instack-undercloud instead. All-in-one baremetal OpenStack and all dependencies.
-heat_template_version: 2013-05-23
-parameters:
- AdminPassword:
- default: unset
- description: The password for the keystone admin account, used for monitoring, querying neutron etc.
- type: string
- hidden: true
- AdminToken:
- default: unset
- description: The keystone auth secret.
- type: string
- hidden: true
- BaremetalArch:
- default: i386
- description: The architecture to use in Nova-BM - i386 or amd64.
- type: string
- CeilometerMeteringSecret:
- default: unset
- description: Secret shared by the ceilometer services.
- type: string
- hidden: true
- CeilometerPassword:
- default: unset
- description: The password for the ceilometer service account.
- type: string
- hidden: true
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- DefaultSignalTransport:
- default: CFN_SIGNAL
- description: Transport to use for software-config signals.
- type: string
- constraints:
- - allowed_values: [ CFN_SIGNAL, HEAT_SIGNAL, NO_SIGNAL ]
- ExtraConfig:
- default: {}
- description: |
- Additional configuration to inject into the cluster. The JSON should have
- the following structure:
- {"FILEKEY":
- {"config:
- [{"section": "SECTIONNAME",
- "values":
- [{"option": "OPTIONNAME",
- "value": "VALUENAME"
- }
- ]
- }
- ]
- }
- }
- For instance:
- {"nova":
- {"config":
- [{"section": "default",
- "values":
- [{"option": "compute_manager",
- "value": "ironic.nova.compute.manager.ClusterComputeManager"
- }
- ]
- },
- {"section": "cells",
- "values":
- [{"option": "driver",
- "value": "nova.cells.rpc_driver.CellsRPCDriver"
- }
- ]
- }
- ]
- }
- }
- type: json
- Flavor:
- description: Flavor to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- GlanceLogFile:
- description: The filepath of the file to use for logging messages from Glance.
- type: string
- default: ''
- GlancePassword:
- default: unset
- description: The password for the glance service account, used by the glance services.
- type: string
- hidden: true
- GlancePort:
- default: 9292
- description: Glance port.
- type: string
- GlanceProtocol:
- default: http
- description: Protocol to use when connecting to glance, set to https for SSL.
- type: string
- GlanceNotifierStrategy:
- description: Strategy to use for Glance notification queue
- type: string
- default: noop
- KeyName:
- default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
- type: string
- KeystoneCACertificate:
- default: ''
- description: Keystone self-signed certificate authority certificate.
- type: string
- KeystoneSigningCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSigningKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- KeystoneSSLCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSSLCertificateKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- HeatPassword:
- default: unset
- description: The password for the Heat service account, used by the Heat services.
- type: string
- hidden: true
- HeatStackDomainAdminPassword:
- description: Password for heat_domain_admin user.
- type: string
- default: ''
- hidden: true
- ImageUpdatePolicy:
- default: REBUILD_PRESERVE_EPHEMERAL
- description: What policy to use when reconstructing instances. REBUILD for rebuilds,
- REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
- type: string
- MysqlInnodbBufferPoolSize:
- description: >
- Specifies the size of the buffer pool in megabytes. Setting to
- zero should be interpreted as "no value" and will defer to the
- lower level default.
- type: number
- default: 0
- NeutronPassword:
- default: unset
- description: The password for the neutron service account, used by neutron agents.
- type: string
- hidden: true
- NeutronPublicInterfaceDefaultRoute:
- default: ''
- description: A custom default route for the NeutronPublicInterface.
- type: string
- NeutronPublicInterfaceIP:
- default: ''
- description: >
- A custom IP address to put onto the NeutronPublicInterface bridge.
- See also NeutronPublicInterfaceTagIP for adding a VLAN tagging IP.
- NeutronPublicInterfaceIP is deprecated in the context of deploying
- underclouds - its only needed for the seed bootstrap process.
- type: string
- NeutronPublicInterfaceRawDevice:
- default: ''
- description: If set, the public interface is a vlan with this device as the raw device.
- type: string
- NeutronPublicInterfaceTag:
- default: ''
- description: >
- VLAN tag for creating a public VLAN. The tag will be used to
- create an access port on the exterior bridge, and that port will be
- given the IP address returned by neutron from the public network.
- type: string
- NovaPassword:
- default: unset
- description: The password for the nova service account, used by nova-api.
- type: string
- hidden: true
- NeutronDVR:
- default: 'False'
- type: string
- NtpServer:
- type: string
- default: ''
- RabbitCookieSalt:
- type: string
- default: unset
- description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
- RabbitUserName:
- default: guest
- description: The username for RabbitMQ
- type: string
- RabbitPassword:
- default: guest
- description: The password for RabbitMQ
- type: string
- hidden: true
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- default: unset
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
- undercloudImage:
- default: undercloud
- type: string
-resources:
- RabbitCookie:
- type: OS::Heat::RandomString
- properties:
- length: 20
- salt:
- get_param: RabbitCookieSalt
- MysqlRootPassword:
- type: OS::Heat::RandomString
- properties:
- length: 10
- undercloudConfig:
- type: OS::Heat::StructuredConfig
- properties:
- config:
- completion-signal: {get_input: deploy_signal_id}
- admin-password:
- get_param: AdminPassword
- admin-token:
- get_param: AdminToken
- bootstrap_host:
- bootstrap_nodeid:
- Fn::Select:
- - 0
- - Fn::Select:
- - 0
- - Merge::Map:
- undercloud:
- - get_attr:
- - undercloud
- - name
- nodeid: {get_input: bootstack_nodeid}
- bootstack:
- public_interface_ip:
- get_param: NeutronPublicInterfaceIP
- controller-address:
- get_input: controller_host
- corosync:
- bindnetaddr: {get_input: controller_host}
- mcastport: 5577
- nodes:
- Merge::Map:
- controller0:
- ip: {get_attr: [undercloud, networks, ctlplane, 0]}
- pacemaker:
- stonith_enabled : false
- recheck_interval : 5
- quorum_policy : ignore
- ceilometer:
- db: {list_join: ['', ['mysql://ceilometer:', {get_param: CeilometerPassword}, '@localhost/ceilometer']]}
- debug: {get_param: Debug}
- metering_secret: {get_param: CeilometerMeteringSecret}
- snmpd_readonly_user_name:
- get_param: SnmpdReadonlyUserName
- snmpd_readonly_user_password:
- get_param: SnmpdReadonlyUserPassword
- service-password:
- get_param: CeilometerPassword
- db-password: unset
- glance:
- backend: file
- db: {list_join: ['', ['mysql://glance:', {get_param: GlancePassword}, '@localhost/glance']]}
- debug: {get_param: Debug}
- host: 127.0.0.1
- port:
- get_param: GlancePort
- protocol:
- get_param: GlanceProtocol
- service-password:
- get_param: GlancePassword
- notifier-strategy:
- get_param: GlanceNotifierStrategy
- log-file:
- get_param: GlanceLogFile
- heat:
- admin_password:
- get_param: HeatPassword
- admin_tenant_name: service
- admin_user: heat
- auth_encryption_key: unset___________
- db: {list_join: ['', ['mysql://heat:', {get_param: HeatPassword}, '@localhost/heat']]}
- debug: {get_param: Debug}
- stack_domain_admin_password: {get_param: HeatStackDomainAdminPassword}
- watch_server_url: {get_input: heat.watch_server_url}
- metadata_server_url: {get_input: heat.metadata_server_url}
- waitcondition_server_url: {get_input: heat.waitcondition_server_url}
- keystone:
- db: {list_join: ['', ['mysql://keystone:', {get_param: AdminToken}, '@localhost/keystone']]}
- debug: {get_param: Debug}
- host: 127.0.0.1
- ca_certificate: {get_param: KeystoneCACertificate}
- signing_key: {get_param: KeystoneSigningKey}
- signing_certificate: {get_param: KeystoneSigningCertificate}
- ssl:
- certificate: {get_param: KeystoneSSLCertificate}
- certificate_key: {get_param: KeystoneSSLCertificateKey}
- mysql:
- innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize}
- root-password: {get_resource: MysqlRootPassword}
- bind_address: 127.0.0.1
- neutron:
- debug: {get_param: Debug}
- host: 127.0.0.1
- ovs_db: {list_join: ['', ['mysql://neutron:', {get_param: NeutronPassword}, '@localhost/ovs_neutron?charset=utf8']]}
- ovs:
- local_ip:
- get_input: controller_host
- public_interface:
- get_param: NeutronPublicInterface
- public_interface_raw_device:
- get_param: NeutronPublicInterfaceRawDevice
- public_interface_route:
- get_param: NeutronPublicInterfaceDefaultRoute
- public_interface_tag:
- get_param: NeutronPublicInterfaceTag
- physical_bridge: br-ctlplane
- physical_network: ctlplane
- network_vlan_ranges: ctlplane
- bridge_mappings: ctlplane:br-ctlplane
- tenant_network_type: vlan
- enable_tunneling: 'False'
- service-password:
- get_param: NeutronPassword
- rabbit:
- host: 127.0.0.1
- username:
- get_param: RabbitUserName
- password:
- get_param: RabbitPassword
- cookie:
- get_attr:
- - RabbitCookie
- - value
- ntp:
- servers:
- - {server: {get_param: NtpServer}}
- undercloudPassthroughConfig:
- type: OS::Heat::StructuredConfig
- properties:
- config: {get_input: passthrough_config}
- undercloud:
- type: OS::Nova::Server
- properties:
- image:
- get_param: undercloudImage
- flavor:
- get_param: Flavor
- key_name:
- get_param: KeyName
- image_update_policy:
- get_param: ImageUpdatePolicy
- networks:
- - network: ctlplane
- user_data_format: SOFTWARE_CONFIG
- undercloudDeployment:
- depends_on: [undercloudPassthroughDeployment]
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: undercloudConfig}
- server: {get_resource: undercloud}
- signal_transport: {get_param: DefaultSignalTransport}
- input_values:
- bootstack_nodeid:
- get_attr:
- - undercloud
- - name
- controller_host:
- get_attr:
- - undercloud
- - networks
- - ctlplane
- - 0
- heat.watch_server_url:
- Fn::Join:
- - ''
- - - 'http://'
- - get_attr: [undercloud, networks, ctlplane, 0]
- - ':8003'
- heat.metadata_server_url:
- Fn::Join:
- - ''
- - - 'http://'
- - {get_attr: [undercloud, networks, ctlplane, 0]}
- - ':8000'
- heat.waitcondition_server_url:
- Fn::Join:
- - ''
- - - 'http://'
- - {get_attr: [undercloud, networks, ctlplane, 0]}
- - ':8000/v1/waitcondition'
- undercloudPassthroughDeployment:
- depends_on: [undercloudNovaDeployment]
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: undercloudPassthroughConfig}
- server: {get_resource: undercloud}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config: {get_param: ExtraConfig}
diff --git a/deprecated/undercloud-vlan-port.yaml b/deprecated/undercloud-vlan-port.yaml
deleted file mode 100644
index 7e39f5fc..00000000
--- a/deprecated/undercloud-vlan-port.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-outputs:
- PublicIP:
- description: Address for registering endpoints in the cloud.
- value: {get_attr: [undercloud_VLANPort, fixed_ips, 0, ip_address]}
-resources:
- # Override the main template which can also supply a static route.
- undercloud_99VLANPort:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: undercloudVLANPortConfig}
- server: {get_resource: undercloud}
- signal_transport: NO_SIGNAL
- undercloudVLANPortConfig:
- type: OS::Heat::StructuredConfig
- properties:
- config:
- neutron:
- ovs:
- public_interface_tag_ip:
- Fn::Join:
- - '/'
- - - {get_attr: [undercloud_VLANPort, fixed_ips, 0, ip_address]}
- - '24'
- # This should also be pulled out of the subnet. May need a
- # neutron fix too - XXX make into a parameter and feed it
- # in via _undercloud.sh for now.
- # Tell the instance to apply the default route.
- # Reinstate when https://bugs.launchpad.net/heat/+bug/1336656 is
- # sorted
- # public_interface_route:
- # get_attr: [undercloud_VLANPort, fixed_ips, 0, subnet, gateway_ip]
- undercloud_VLANPort:
- type: OS::Neutron::Port
- properties:
- name: undercloud_vlan
- network: public
- replacement_policy: AUTO \ No newline at end of file
diff --git a/deprecated/undercloud-vm-ironic-config.yaml b/deprecated/undercloud-vm-ironic-config.yaml
deleted file mode 100644
index cc0dafb6..00000000
--- a/deprecated/undercloud-vm-ironic-config.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-resources:
- undercloudNovaConfig:
- type: OS::Heat::StructuredConfig
- properties:
- config:
- nova:
- compute_hostname: undercloud
- compute_driver: {get_param: NovaComputeDriver}
- compute_manager: {get_param: NovaComputeManager}
- scheduler_host_manager: {get_param: NovaSchedulerHostManager}
- db: {list_join: ['', ['mysql://nova:', {get_param: NovaPassword}, '@localhost/nova']]}
- debug: {get_param: Debug}
- default_ephemeral_format: ext4
- host: 127.0.0.1
- metadata-proxy: false
- tuning:
- ram_allocation_ratio: 1.0
- reserved_host_memory_mb: 0
- service-password: {get_input: nova_service_password}
- undercloudIronicConfig:
- type: OS::Heat::StructuredConfig
- properties:
- config:
- ironic:
- db: {list_join: ['', ['mysql://ironic:', {get_param: IronicPassword}, '@localhost/ironic']]}
- service-password: {get_input: ironic_service_password}
- virtual_power_ssh_key: {get_input: virtual_power_ssh_key}
diff --git a/deprecated/undercloud-vm-ironic-deploy.yaml b/deprecated/undercloud-vm-ironic-deploy.yaml
deleted file mode 100644
index 5d23495c..00000000
--- a/deprecated/undercloud-vm-ironic-deploy.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-parameters:
- IronicPassword:
- type: string
- description: Ironic password for keystone access
- hidden: true
- NeutronPublicInterface:
- default: nic1
- description: What interface to bridge onto br-ex for network nodes.
- type: string
- NovaComputeDriver:
- default: nova.virt.ironic.driver.IronicDriver
- description: Full class name for the Nova compute driver
- type: string
- NovaComputeManager:
- default: ironic.nova.compute.manager.ClusteredComputeManager
- description: Full class name for the Nova compute manager
- type: string
- NovaSchedulerHostManager:
- default: nova.scheduler.ironic_host_manager.IronicHostManager
- description: Full class name for the Nova scheduler host manager
- type: string
- PowerSSHPrivateKey:
- description: Private key for using to ssh to a virtual power host.
- type: string
- hidden: true
-resources:
- undercloudNovaDeployment:
- depends_on: [undercloudIronicDeployment]
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: undercloudNovaConfig}
- server: {get_resource: undercloud}
- signal_transport: NO_SIGNAL
- input_values:
- nova_service_password: {get_param: NovaPassword}
- undercloudIronicDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: undercloudIronicConfig}
- server: {get_resource: undercloud}
- signal_transport: NO_SIGNAL
- input_values:
- ironic_service_password: {get_param: IronicPassword}
- virtual_power_ssh_key: {get_param: PowerSSHPrivateKey}
diff --git a/deprecated/undercloud-vm-nova-config.yaml b/deprecated/undercloud-vm-nova-config.yaml
deleted file mode 100644
index 1fb8abb3..00000000
--- a/deprecated/undercloud-vm-nova-config.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-resources:
- undercloudNovaConfig:
- type: OS::Heat::StructuredConfig
- properties:
- config:
- nova:
- compute_hostname: undercloud
- compute_driver: {get_param: NovaComputeDriver}
- compute_manager: {get_param: NovaComputeManager}
- scheduler_host_manager: {get_param: NovaSchedulerHostManager}
- db: {list_join: ['', ['mysql://nova:', {get_param: NovaPassword}, '@localhost/nova']]}
- default_ephemeral_format: ext4
- host: 127.0.0.1
- metadata-proxy: false
- tuning:
- ram_allocation_ratio: 1.0
- reserved_host_memory_mb: 0
- baremetal:
- arch: {get_input: nova_arch}
- db: {list_join: ['', ['mysql://nova:', {get_param: NovaPassword}, '@localhost/nova_bm']]}
- power_manager: {get_input: power_manager}
- pxe_deploy_timeout: {get_input: pxe_deploy_timeout}
- virtual_power:
- user: {get_input: user}
- ssh_host: {get_input: ssh_host}
- ssh_key: {get_input: ssh_key}
- type: virsh
- service-password: {get_input: nova_service_password}
-
diff --git a/deprecated/undercloud-vm-nova-deploy.yaml b/deprecated/undercloud-vm-nova-deploy.yaml
deleted file mode 100644
index da15b46d..00000000
--- a/deprecated/undercloud-vm-nova-deploy.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-parameters:
- NeutronPublicInterface:
- default: nic1
- description: What interface to bridge onto br-ex for network nodes.
- type: string
- NovaComputeDriver:
- default: baremetal.driver.BareMetalDriver
- description: Full class name for the Nova compute driver
- type: string
- NovaComputeManager:
- default: nova.compute.manager.ComputeManager
- description: Full class name for the Nova compute manager
- type: string
- NovaSchedulerHostManager:
- default: nova.scheduler.host_manager.HostManager
- description: Full class name for the Nova scheduler host manager
- type: string
- PowerManager:
- default: nova.virt.baremetal.virtual_power_driver.VirtualPowerManager
- description: Bare metal power manager driver.
- type: string
- PowerSSHHost:
- default: 192.168.122.1
- description: SSH host to ssh to for power management operations.
- type: string
- PowerSSHPrivateKey:
- description: Private key for using to ssh to a virtual power host.
- type: string
- hidden: true
- PowerUserName:
- default: stack
- description: What username to ssh to the virtual power host with.
- type: string
- PxeDeployTimeout:
- default: 2400
- description: Timeout for PXE deployment of baremetal nodes
- type: number
-resources:
- undercloudNovaDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: undercloudNovaConfig}
- server: {get_resource: undercloud}
- signal_transport: NO_SIGNAL
- input_values:
- nova_arch: {get_param: BaremetalArch}
- power_manager: {get_param: PowerManager}
- pxe_deploy_timeout: {get_param: PxeDeployTimeout}
- nova_service_password: {get_param: NovaPassword}
- user: {get_param: PowerUserName}
- ssh_host: {get_param: PowerSSHHost}
- ssh_key: {get_param: PowerSSHPrivateKey}
diff --git a/docker/README-containers.md b/docker/README-containers.md
index 0e67c183..ff062a93 100644
--- a/docker/README-containers.md
+++ b/docker/README-containers.md
@@ -12,7 +12,7 @@ Download the fedora atomic image into glance:
```
wget https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Atomic-22-20150521.x86_64.qcow2
-glance image-create --name fedora-atomic --file Fedora-Cloud-Atomic-22-20150521.x86_64.qcow2 --disk-format qcow2 --container-format bare
+glance image-create --name atomic-image --file Fedora-Cloud-Atomic-22-20150521.x86_64.qcow2 --disk-format qcow2 --container-format bare
```
## Configuring TripleO
@@ -22,7 +22,12 @@ https://github.com/openstack/tripleo-common/blob/master/scripts/tripleo.sh
Create the Overcloud:
```
-$ openstack overcloud deploy --templates=tripleo-heat-templates -e tripleo-heat-templates/environments/docker-rdo.yaml --libvirt-type=qemu
+$ openstack overcloud deploy --templates=tripleo-heat-templates -e tripleo-heat-templates/environments/docker.yaml -e tripleo-heat-templates/environments/docker-network.yaml --libvirt-type=qemu
+```
+
+Using Network Isolation in the Overcloud:
+```
+$ openstack overcloud deploy --templates=tripleo-heat-templates -e tripleo-heat-templates/environments/docker.yaml -e tripleo-heat-templates/environments/docker-network-isolation.yaml --libvirt-type=qemu
```
Source the overcloudrc and then you can use the overcloud.
diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml
index 0d049ebc..8f786f72 100644
--- a/docker/compute-post.yaml
+++ b/docker/compute-post.yaml
@@ -1,5 +1,4 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: 2015-10-15
description: >
OpenStack compute node post deployment for Docker.
@@ -9,6 +8,9 @@ parameters:
NodeConfigIdentifiers:
type: json
description: Value which changes if the node configuration may need to be re-applied
+ DockerNamespace:
+ type: string
+ default: tripleoupstream
DockerComputeImage:
type: string
DockerComputeDataImage:
@@ -23,6 +25,26 @@ parameters:
type: string
DockerOpenvswitchDBImage:
type: string
+ LibvirtConfig:
+ type: string
+ default: "/etc/libvirt/libvirtd.conf"
+ NovaConfig:
+ type: string
+ default: "/etc/nova/nova.conf"
+ NeutronOpenvswitchAgentConfig:
+ type: string
+ default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/ml2/ml2_conf.ini"
+ NeutronAgentConfig:
+ type: string
+ default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
+ NeutronAgentPluginVolume:
+ type: string
+ description: The neutron agent plugin to mount into the neutron-agents container
+ default: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/ovs_neutron_plugin.ini:ro"
+ NeutronAgentOvsVolume:
+ type: string
+ description: The neutron agent ovs agents to mount into the neutron-agents container
+ default: " "
resources:
@@ -67,8 +89,90 @@ resources:
config: {get_resource: CopyEtcConfig}
servers: {get_param: servers}
+ CopyJsonConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: libvirt_config
+ - name: nova_config
+ - name: neutron_openvswitch_agent_config
+ - name: neutron_agent_config
+ config: |
+ #!/bin/python
+ import json
+ import os
+
+ data = {}
+ file_perms = '600'
+ libvirt_perms = '644'
+
+ libvirt_config = os.getenv('libvirt_config').split(',')
+ nova_config = os.getenv('nova_config').split(',')
+ neutron_openvswitch_agent_config = os.getenv('neutron_openvswitch_agent_config').split(',')
+ neutron_agent_config = os.getenv('neutron_agent_config').split(',')
+
+ # Command, Config_files, Owner, Perms
+ services = {'nova-libvirt': ['/usr/sbin/libvirtd', libvirt_config, 'root', libvirt_perms],
+ 'nova-compute': ['/usr/bin/nova-compute', nova_config, 'nova', file_perms],
+ 'neutron-openvswitch-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_openvswitch_agent_config, 'neutron', file_perms],
+ 'neutron-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_agent_config, 'neutron', file_perms],
+ 'ovs-vswitchd': ['/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/openvswitch/ovs-vswitchd.log'],
+ 'ovsdb-server': ['/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --log-file=/var/log/openvswitch/ovsdb-server.log']
+ }
+
+
+ def build_config_files(config, owner, perms):
+ config_source = '/var/lib/kolla/config_files/'
+ config_files_dict = {}
+ source = os.path.basename(config)
+ dest = config
+ config_files_dict.update({'source': config_source + source,
+ 'dest': dest,
+ 'owner': owner,
+ 'perm': perms})
+ return config_files_dict
+
+
+ for service in services:
+ if service != 'ovs-vswitchd' and service != 'ovsdb-server':
+ command = services.get(service)[0]
+ config_files = services.get(service)[1]
+ owner = services.get(service)[2]
+ perms = services.get(service)[3]
+ config_files_list = []
+ for config_file in config_files:
+ if service == 'nova-libvirt':
+ command = command + ' --config ' + config_file
+ else:
+ command = command + ' --config-file ' + config_file
+ data['command'] = command
+ config_files_dict = build_config_files(config_file, owner, perms)
+ config_files_list.append(config_files_dict)
+ data['config_files'] = config_files_list
+ else:
+ data['command'] = services.get(service)[0]
+ data['config_files'] = []
+
+ json_config_dir = '/var/lib/etc-data/json-config/'
+ with open(json_config_dir + service + '.json', 'w') as json_file:
+ json.dump(data, json_file, sort_keys=True, indent=4, separators=(',', ': '))
+
+ CopyJsonDeployment:
+ type: OS::Heat::SoftwareDeployments
+ depends_on: CopyEtcDeployment
+ properties:
+ config: {get_resource: CopyJsonConfig}
+ servers: {get_param: servers}
+ input_values:
+ libvirt_config: {get_param: LibvirtConfig}
+ nova_config: {get_param: NovaConfig}
+ neutron_openvswitch_agent_config: {get_param: NeutronOpenvswitchAgentConfig}
+ neutron_agent_config: {get_param: NeutronAgentConfig}
+
NovaComputeContainersDeploymentOVS:
type: OS::Heat::StructuredDeployments
+ depends_on: CopyJsonDeployment
properties:
config: {get_resource: NovaComputeContainersConfigOVS}
servers: {get_param: servers}
@@ -79,7 +183,10 @@ resources:
group: docker-compose
config:
ovsvswitchd:
- image: {get_param: DockerOvsVswitchdImage}
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOvsVswitchdImage} ]
container_name: ovs-vswitchd
net: host
privileged: true
@@ -87,18 +194,23 @@ resources:
volumes:
- /run:/run
- /lib/modules:/lib/modules:ro
+ - /var/lib/etc-data/json-config/ovs-vswitchd.json:/var/lib/kolla/config_files/config.json
environment:
- - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
openvswitchdb:
- image: {get_param: DockerOpenvswitchDBImage}
- container_name: ovs-db-server
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchDBImage} ]
+ container_name: ovsdb-server
net: host
restart: always
volumes:
- /run:/run
+ - /var/lib/etc-data/json-config/ovsdb-server.json:/var/lib/kolla/config_files/config.json
environment:
- - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
NovaComputeContainersDeploymentNetconfig:
type: OS::Heat::SoftwareDeployments
@@ -122,7 +234,7 @@ resources:
LibvirtContainersDeployment:
type: OS::Heat::StructuredDeployments
- depends_on: [CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig]
+ depends_on: [CopyJsonDeployment, CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig]
properties:
config: {get_resource: LibvirtContainersConfig}
servers: {get_param: servers}
@@ -133,11 +245,20 @@ resources:
group: docker-compose
config:
computedata:
- image: {get_param: DockerComputeDataImage}
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerComputeDataImage} ]
container_name: computedata
+ volumes:
+ - /var/lib/nova/instances
+ - /var/lib/libvirt
libvirt:
- image: {get_param: DockerLibvirtImage}
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
container_name: libvirt
net: host
pid: host
@@ -146,16 +267,17 @@ resources:
volumes:
- /run:/run
- /lib/modules:/lib/modules:ro
- - /var/lib/etc-data/libvirt/libvirtd.conf:/opt/kolla/libvirtd/libvirtd.conf
- - /var/lib/nova/instances:/var/lib/nova/instances
+ - /sys/fs/cgroup:/sys/fs/cgroup
+ - /var/lib/etc-data/json-config/nova-libvirt.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/etc-data/libvirt/libvirtd.conf:/var/lib/kolla/config_files/libvirtd.conf
environment:
- - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
volumes_from:
- computedata
NovaComputeContainersDeployment:
type: OS::Heat::StructuredDeployments
- depends_on: [CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig, LibvirtContainersDeployment]
+ depends_on: [CopyJsonDeployment, CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig, LibvirtContainersDeployment]
properties:
config: {get_resource: NovaComputeContainersConfig}
servers: {get_param: servers}
@@ -166,7 +288,10 @@ resources:
group: docker-compose
config:
openvswitch:
- image: {get_param: DockerOpenvswitchImage}
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
container_name: openvswitch
net: host
privileged: true
@@ -174,52 +299,58 @@ resources:
volumes:
- /run:/run
- /lib/modules:/lib/modules:ro
+ - /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json
- /var/lib/etc-data/neutron/neutron.conf:/etc/kolla/neutron-openvswitch-agent/:ro
- - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/etc/kolla/neutron-openvswitch-agent/:ro
+ - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro
+ - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro
environment:
- - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
volumes_from:
- computedata
- # FIXME: Kolla now uses a JSON model to run custom commands. We rebuilt a custom container to read in KOLLA_COMMAND_ARGS
- # FIXME: Here we're subjugating kolla's start scripts because we want our custom run command
neutronagent:
- image: {get_param: DockerOpenvswitchImage}
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
container_name: neutronagent
net: host
pid: host
privileged: true
restart: always
volumes:
- - /run:/run
- - /lib/modules:/lib/modules:ro
- - /var/lib/etc-data/neutron/neutron.conf:/etc/neutron/neutron.conf:ro
- - /var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini:ro
+ str_split:
+ - ","
+ - list_join:
+ - ","
+ - [ "/run:/run", "/lib/modules:/lib/modules:ro",
+ "/var/lib/etc-data/json-config/neutron-agent.json:/var/lib/kolla/config_files/config.json",
+ "/var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro",
+ {get_param: NeutronAgentPluginVolume},
+ {get_param: NeutronAgentOvsVolume} ]
environment:
- - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS
- # FIXME: Kolla now uses a JSON model to run custom commands. We rebuilt a custom container to read in KOLLA_COMMAND_ARGS
- - KOLLA_COMMAND_ARGS=--config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
volumes_from:
- computedata
novacompute:
- image: {get_param: DockerComputeImage}
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerComputeImage} ]
container_name: novacompute
net: host
privileged: true
restart: always
volumes:
- /run:/run
- - /sys/fs/cgroup:/sys/fs/cgroup
- /lib/modules:/lib/modules:ro
- - /var/lib/etc-data/:/etc/:ro
- - /var/lib/nova/instances:/var/lib/nova/instances
+ - /var/lib/etc-data/json-config/nova-compute.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/etc-data/nova/nova.conf:/var/lib/kolla/config_files/nova.conf:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
volumes_from:
- computedata
- # FIXME: this skips the kolla start.sh script and just starts Nova
- # Ideally we'd have an environment that switched the kolla container
- # to be externally configured.
- command: /usr/bin/nova-compute
ExtraConfig:
depends_on: NovaComputeContainersDeployment
diff --git a/docker/firstboot/install_docker_agents.yaml b/docker/firstboot/install_docker_agents.yaml
index 8adc8939..22a8ff92 100644
--- a/docker/firstboot/install_docker_agents.yaml
+++ b/docker/firstboot/install_docker_agents.yaml
@@ -4,6 +4,12 @@ parameters:
DockerAgentImage:
type: string
default: dprince/heat-docker-agents-centos
+ DockerNamespace:
+ type: string
+ default: kollaglue
+ DockerNamespaceIsRegistry:
+ type: boolean
+ default: false
resources:
@@ -21,6 +27,8 @@ resources:
str_replace:
params:
$agent_image: {get_param: DockerAgentImage}
+ $docker_registry: {get_param: DockerNamespace}
+ $docker_namespace_is_registry: {get_param: DockerNamespaceIsRegistry}
template: {get_file: ./start_docker_agents.sh}
outputs:
diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh
index caf511bd..a0e95d11 100644
--- a/docker/firstboot/start_docker_agents.sh
+++ b/docker/firstboot/start_docker_agents.sh
@@ -7,7 +7,7 @@ if ! hostname | grep compute &>/dev/null; then
exit 0
fi
-mkdir -p /var/lib/etc-data/ #FIXME: this should be a docker data container
+mkdir -p /var/lib/etc-data/json-config #FIXME: this should be a docker data container
# heat-docker-agents service
cat <<EOF > /etc/systemd/system/heat-docker-agents.service
@@ -38,23 +38,23 @@ EOF
#echo "ADD_REGISTRY='--registry-mirror $docker_registry'" >> /etc/sysconfig/docker
# Local docker registry 1.8
-#/bin/sed -i s/ADD_REGISTRY/#ADD_REGISTRY/ /etc/sysconfig/docker
+if [ $docker_namespace_is_registry ]; then
+ /bin/sed -i "s/# INSECURE_REGISTRY='--insecure-registry '/INSECURE_REGISTRY='--insecure-registry $docker_registry'/g" /etc/sysconfig/docker
+fi
/sbin/setenforce 0
/sbin/modprobe ebtables
-# Create /var/lib/etc-data for now. FIXME: This should go into a data container.
-#mkdir -p /var/lib/etc-data
-
echo nameserver 8.8.8.8 > /etc/resolv.conf
# We need hostname -f to return in a centos container for the puppet hook
HOSTNAME=$(hostname)
echo "127.0.0.1 $HOSTNAME.localdomain $HOSTNAME" >> /etc/hosts
-# Another hack.. we need latest docker..
+# Another hack.. we need a different docker version
+# (should obviously be dropped once the atomic image contains docker 1.8.2)
/usr/bin/systemctl stop docker.service
-/bin/curl -o /tmp/docker https://get.docker.com/builds/Linux/x86_64/docker-latest
+/bin/curl -o /tmp/docker https://get.docker.com/builds/Linux/x86_64/docker-1.8.2
/bin/mount -o remount,rw /usr
/bin/rm /bin/docker
/bin/cp /tmp/docker /bin/docker
diff --git a/environments/docker-network-isolation.yaml b/environments/docker-network-isolation.yaml
new file mode 100644
index 00000000..257d03dc
--- /dev/null
+++ b/environments/docker-network-isolation.yaml
@@ -0,0 +1,4 @@
+parameter_defaults:
+ NeutronAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini"
+ NeutronAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro"
+ NeutronAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro"
diff --git a/environments/docker-network.yaml b/environments/docker-network.yaml
new file mode 100644
index 00000000..f10ec389
--- /dev/null
+++ b/environments/docker-network.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Compute::Net::SoftwareConfig: ../net-config-bridge.yaml
diff --git a/environments/docker-rdo.yaml b/environments/docker-rdo.yaml
deleted file mode 100644
index d5791369..00000000
--- a/environments/docker-rdo.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-resource_registry:
- # Docker container with heat agents for containerized compute node.
- OS::TripleO::ComputePostDeployment: ../docker/compute-post.yaml
- OS::TripleO::NodeUserData: ../docker/firstboot/install_docker_agents.yaml
- OS::TripleO::Compute::Net::SoftwareConfig: ../net-config-bridge.yaml
-
-parameters:
- NovaImage: fedora-atomic
-
-parameter_defaults:
- DockerComputeImage: rthallisey/centos-binary-nova-compute:liberty
- DockerComputeDataImage: kollaglue/centos-rdo-nova-compute-data:liberty2
- DockerLibvirtImage: kollaglue/centos-rdo-nova-libvirt:liberty2
- DockerNeutronAgentImage: kollaglue/centos-rdo-neutron-agents:liberty2
- DockerOpenvswitchImage: rthallisey/centos-rdo-neutron-openvswitch-agent:latest
- DockerOvsVswitchdImage: kollaglue/centos-rdo-ovs-vswitchd:liberty2
- DockerOpenvswitchDBImage: kollaglue/centos-rdo-ovs-db-server:liberty2
diff --git a/environments/docker.yaml b/environments/docker.yaml
new file mode 100644
index 00000000..6376b749
--- /dev/null
+++ b/environments/docker.yaml
@@ -0,0 +1,22 @@
+resource_registry:
+ # Docker container with heat agents for containerized compute node.
+ OS::TripleO::ComputePostDeployment: ../docker/compute-post.yaml
+ OS::TripleO::NodeUserData: ../docker/firstboot/install_docker_agents.yaml
+
+parameters:
+ NovaImage: atomic-image
+
+parameter_defaults:
+ # Defaults to 'tripleoupstream'. Specify a local docker registry
+ # Example: 192.168.122.131:8787
+ DockerNamespace: tripleoupstream
+ # Enable local Docker registry
+ DockerNamespaceIsRegistry: false
+ # Compute Node Images
+ DockerComputeImage: centos-binary-nova-compute:latest
+ DockerComputeDataImage: centos-binary-data:latest
+ DockerLibvirtImage: centos-binary-nova-libvirt:latest
+ DockerNeutronAgentImage: centos-binary-neutron-agents:latest
+ DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:latest
+ DockerOvsVswitchdImage: centos-binary-openvswitch-vswitchd:latest
+ DockerOpenvswitchDBImage: centos-binary-openvswitch-db-server:latest
diff --git a/environments/enable-tls.yaml b/environments/enable-tls.yaml
new file mode 100644
index 00000000..bc4d1bef
--- /dev/null
+++ b/environments/enable-tls.yaml
@@ -0,0 +1,41 @@
+parameter_defaults:
+ SSLCertificate: |
+ The contents of your certificate go here
+ SSLIntermediateCertificate: ''
+ SSLKey: |
+ The contents of the private key go here
+ EndpointMap:
+ CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+ GlanceRegistryAdmin: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
+ GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
+ GlanceRegistryPublic: {protocol: 'https', port: '9191', host: 'IP_ADDRESS'} # Not set on the loadbalancer yet.
+ HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+ KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+ KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+ NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+ NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
+ NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
+ NovaEC2Public: {protocol: 'https', port: '13773', host: 'CLOUDNAME'}
+ SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+
+resource_registry:
+ OS::TripleO::NodeTLSData: ../puppet/extraconfig/tls/tls-cert-inject.yaml
diff --git a/environments/external-loadbalancer-vip.yaml b/environments/external-loadbalancer-vip.yaml
new file mode 100644
index 00000000..1cf59825
--- /dev/null
+++ b/environments/external-loadbalancer-vip.yaml
@@ -0,0 +1,37 @@
+resource_registry:
+ OS::TripleO::Network::Ports::NetVipMap: ../network/ports/net_vip_map_external.yaml
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/noop.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/noop.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/noop.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/noop.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/from_service.yaml
+ OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
+ OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+parameter_defaults:
+ # When using an external loadbalancer set the following in parameter_defaults
+ # to control your VIPs (currently one per network)
+ # NOTE: we will eventually move to one VIP per service
+ #
+ ControlPlaneIP: 192.0.2.251
+ ExternalNetworkVip: 10.0.0.251
+ InternalApiNetworkVip: 172.16.2.251
+ StorageNetworkVip: 172.16.1.251
+ StorageMgmtNetworkVip: 172.16.3.251
+ ServiceVips:
+ redis: 172.16.2.252
+ ControllerIPs:
+ external:
+ - 10.0.0.253
+ internal_api:
+ - 172.16.2.253
+ storage:
+ - 172.16.1.253
+ storage_mgmt:
+ - 172.16.3.253
+ tenant:
+ - 172.16.0.253
+ EnableLoadBalancer: false
diff --git a/environments/inject-trust-anchor.yaml b/environments/inject-trust-anchor.yaml
new file mode 100644
index 00000000..3ecb0d27
--- /dev/null
+++ b/environments/inject-trust-anchor.yaml
@@ -0,0 +1,6 @@
+parameter_defaults:
+ SSLRootCertificate: |
+ The contents of your root CA certificate go here
+
+resource_registry:
+ OS::TripleO::NodeTLSCAData: ../puppet/extraconfig/tls/ca-inject.yaml
diff --git a/environments/ips-from-pool.yaml b/environments/ips-from-pool.yaml
new file mode 100644
index 00000000..8c27fe4e
--- /dev/null
+++ b/environments/ips-from-pool.yaml
@@ -0,0 +1,20 @@
+resource_registry:
+ OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
+ OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+ OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+ OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+ OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+parameter_defaults:
+ ControllerIPs:
+ # Each controller will get an IP from the lists below, first controller, first IP
+ external:
+ - 10.0.0.251
+ internal_api:
+ - 172.16.2.251
+ storage:
+ - 172.16.1.251
+ storage_mgmt:
+ - 172.16.3.251
+ tenant:
+ - 172.16.0.251
diff --git a/environments/manage-firewall.yaml b/environments/manage-firewall.yaml
new file mode 100644
index 00000000..071f4108
--- /dev/null
+++ b/environments/manage-firewall.yaml
@@ -0,0 +1,2 @@
+parameters:
+ ManageFirewall: true
diff --git a/environments/net-bond-with-vlans-no-external.yaml b/environments/net-bond-with-vlans-no-external.yaml
new file mode 100644
index 00000000..0da119d9
--- /dev/null
+++ b/environments/net-bond-with-vlans-no-external.yaml
@@ -0,0 +1,26 @@
+# This template configures each role to use a pair of bonded nics (nic2 and
+# nic3) and configures an IP address on each relevant isolated network
+# for each role.
+
+# This template assumes use of network-isolation.yaml and should be specified
+# last on the CLI as a Heat environment so as to override specific
+# registry settings in the network-isolation registry.
+#
+# FIXME: if/when we add functionality to heatclient to include heat
+# environment files we should think about using it here to automatically
+# include network-isolation.yaml.
+resource_registry:
+
+ # Set external ports to noop
+ OS::TripleO::Network::External: ../network/noop.yaml
+ OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/noop.yaml
+
+ OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/cinder-storage.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/bond-with-vlans/compute.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-no-external.yaml
+ OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml
+ OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml
+
+# NOTE: with no external interface we should be able to use the
+# default Neutron l3_agent.ini setting for the external bridge (br-ex)
+# i.e. No need to set: NeutronExternalNetworkBridge: "''"
diff --git a/environments/net-single-nic-with-vlans-no-external.yaml b/environments/net-single-nic-with-vlans-no-external.yaml
new file mode 100644
index 00000000..a173df4e
--- /dev/null
+++ b/environments/net-single-nic-with-vlans-no-external.yaml
@@ -0,0 +1,25 @@
+# This template configures each role to use Vlans on a single nic for
+# each isolated network.
+# This template assumes use of network-isolation.yaml and should be specified
+# last on the CLI as a Heat environment so as to override specific
+# registry settings in the network-isolation registry.
+#
+# FIXME: if/when we add functionality to heatclient to include heat
+# environment files we should think about using it here to automatically
+# include network-isolation.yaml.
+resource_registry:
+
+ # Set external ports to noop
+ OS::TripleO::Network::External: ../network/noop.yaml
+ OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/noop.yaml
+
+ # Configure other ports as normal
+ OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/cinder-storage.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/single-nic-vlans/compute.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-no-external.yaml
+ OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml
+ OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml
+
+# NOTE: with no external interface we should be able to use the
+# default Neutron l3_agent.ini setting for the external bridge (br-ex)
+# i.e. No need to set: NeutronExternalNetworkBridge: "''"
diff --git a/environments/network-isolation-no-tunneling.yaml b/environments/network-isolation-no-tunneling.yaml
new file mode 100644
index 00000000..5d2a915b
--- /dev/null
+++ b/environments/network-isolation-no-tunneling.yaml
@@ -0,0 +1,37 @@
+# Enable the creation of Neutron networks for isolated Overcloud
+# traffic and configure each role to assign ports (related
+# to that role) on these networks. This version of the environment
+# has no dedicated VLAN for tunneling, for deployments that use
+# VLAN mode, flat provider networks, etc.
+resource_registry:
+ OS::TripleO::Network::External: ../network/external.yaml
+ OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
+ OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
+ OS::TripleO::Network::Storage: ../network/storage.yaml
+
+ # Port assignments for the controller role
+ OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
+ OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
+ OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+
+ # Port assignments for the compute role
+ OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
+
+ # Port assignments for the ceph storage role
+ OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
+ OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+
+ # Port assignments for the swift storage role
+ OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
+ OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
+ OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+
+ # Port assignments for the block storage role
+ OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
+ OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
+ OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+
+ # Port assignments for service virtual IPs for the controller role
+ OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml
diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml
index 937931d1..87fc22f5 100644
--- a/environments/network-isolation.yaml
+++ b/environments/network-isolation.yaml
@@ -1,12 +1,23 @@
# Enable the creation of Neutron networks for isolated Overcloud
# traffic and configure each role to assign ports (related
# to that role) on these networks.
+# Many networks are disabled by default because they are not used
+# in a typical configuration. Override via parameter_defaults.
resource_registry:
OS::TripleO::Network::External: ../network/external.yaml
OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
OS::TripleO::Network::Storage: ../network/storage.yaml
OS::TripleO::Network::Tenant: ../network/tenant.yaml
+ # Management network is optional and disabled by default
+ OS::TripleO::Network::Management: ../network/noop.yaml
+
+ # Port assignments for the VIPs
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
# Port assignments for the controller role
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
@@ -14,25 +25,39 @@ resource_registry:
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml
+ OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/noop.yaml
# Port assignments for the compute role
+ OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
+ OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml
+ OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/noop.yaml
# Port assignments for the ceph storage role
+ OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/noop.yaml
# Port assignments for the swift storage role
+ OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/noop.yaml
# Port assignments for the block storage role
+ OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
+ OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/noop.yaml
# Port assignments for service virtual IPs for the controller role
OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml
diff --git a/environments/network-management.yaml b/environments/network-management.yaml
new file mode 100644
index 00000000..2f0cff8b
--- /dev/null
+++ b/environments/network-management.yaml
@@ -0,0 +1,24 @@
+# Enable the creation of a system management network. This
+# creates a Neutron network for isolated Overcloud
+# system management traffic and configures each role to
+# assign a port (related to that role) on that network.
+# Note that the basic sample NIC configuration templates
+# do not include the management network, see the
+# single-nic-vlans-mgmt templates for an example.
+resource_registry:
+ OS::TripleO::Network::Management: ../network/management.yaml
+
+ # Port assignments for the controller role
+ OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
+
+ # Port assignments for the compute role
+ OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
+
+ # Port assignments for the ceph storage role
+ OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
+
+ # Port assignments for the swift storage role
+ OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
+
+ # Port assignments for the block storage role
+ OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
diff --git a/environments/neutron-midonet.yaml b/environments/neutron-midonet.yaml
new file mode 100644
index 00000000..726852a0
--- /dev/null
+++ b/environments/neutron-midonet.yaml
@@ -0,0 +1,20 @@
+# A Heat environment that can be used to deploy MidoNet Services
+resource_registry:
+ OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../net-config-linux-bridge.yaml # We have to avoid any ovs bridge. MidoNet is incompatible with its datapath
+
+parameter_defaults:
+ EnableZookeeperOnController: true
+ EnableCassandraOnController: true
+ NeutronCorePlugin: 'midonet.neutron.plugin_v1.MidonetPluginV2' # Overriding default core_plugin in Neutron. Don't touch it
+ NeutronEnableIsolatedMetadata: true # MidoNet 1.9 needs this one to work. Don't change it
+ NeutronEnableL3Agent: false
+ NeutronEnableOVSAgent: false
+
+ # Other available options for MidoNet Services
+ # TunnelZoneName: 'tunnelname'
+ # TunnelZoneType: 'gre'
+ # CassandraStoragePort: 7000
+ # CassandraSslStoragePort: 7009
+ # CassandraClientPort: 9042
+ # CassandraClientPortThrift: 9160
diff --git a/environments/neutron-nuage-config.yaml b/environments/neutron-nuage-config.yaml
new file mode 100644
index 00000000..50ba8f53
--- /dev/null
+++ b/environments/neutron-nuage-config.yaml
@@ -0,0 +1,15 @@
+# A Heat environment file which can be used to enable a
+# a Neutron Nuage backend on the controller, configured via puppet
+resource_registry:
+ OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml
+
+parameter_defaults:
+ NeutronNuageOSControllerIp: '0.0.0.0'
+ NeutronNuageNetPartitionName: 'default_name'
+ NeutronNuageVSDIp: '0.0.0.0:0'
+ NeutronNuageVSDUsername: 'username'
+ NeutronNuageVSDPassword: 'password'
+ NeutronNuageVSDOrganization: 'organization'
+ NeutronNuageBaseURIVersion: 'default_uri_version'
+ NeutronNuageCMSId: ''
+ UseForwardedFor: true
diff --git a/environments/nova-nuage-config.yaml b/environments/nova-nuage-config.yaml
new file mode 100644
index 00000000..56c64d15
--- /dev/null
+++ b/environments/nova-nuage-config.yaml
@@ -0,0 +1,8 @@
+# A Heat environment file which can be used to enable
+# Nuage backend on the compute, configured via puppet
+resource_registry:
+ OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
+
+parameter_defaults:
+ NuageActiveController: '0.0.0.0'
+ NuageStandbyController: '0.0.0.0'
diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml
index 3c7901cc..7f5b5080 100644
--- a/environments/puppet-ceph-external.yaml
+++ b/environments/puppet-ceph-external.yaml
@@ -3,7 +3,7 @@
resource_registry:
OS::TripleO::CephClusterConfig::SoftwareConfig: ../puppet/extraconfig/ceph/ceph-external-config.yaml
-parameters:
+parameter_defaults:
# NOTE: These example parameters are required when using Ceph External
#CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
#CephClientKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
@@ -13,6 +13,13 @@ parameters:
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
GlanceBackend: rbd
+ # If the Ceph pools which host VMs, Volumes and Images do not match these
+ # names OR the client keyring to use is not named 'openstack', edit the
+ # following as needed.
+ NovaRbdPoolName: vms
+ CinderRbdPoolName: volumes
+ GlanceRbdPoolName: images
+ CephClientUserName: openstack
# finally we disable the Cinder LVM backend
CinderEnableIscsiBackend: false
diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml
index f235cf8f..8986e35f 100644
--- a/environments/puppet-pacemaker.yaml
+++ b/environments/puppet-pacemaker.yaml
@@ -2,3 +2,5 @@
# Overcloud controller with Pacemaker.
resource_registry:
OS::TripleO::ControllerConfig: ../puppet/controller-config-pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
diff --git a/environments/updates/README.md b/environments/updates/README.md
new file mode 100644
index 00000000..8c03411d
--- /dev/null
+++ b/environments/updates/README.md
@@ -0,0 +1,9 @@
+This directory contains Heat environment file snippets which can
+be used to ensure smooth updates of the Overcloud.
+
+Contents
+--------
+
+**update-from-keystone-admin-internal-api.yaml**
+ To be used if the Keystone Admin API was originally deployed on the
+ Internal API network.
diff --git a/environments/updates/update-from-keystone-admin-internal-api.yaml b/environments/updates/update-from-keystone-admin-internal-api.yaml
new file mode 100644
index 00000000..3c71ef1b
--- /dev/null
+++ b/environments/updates/update-from-keystone-admin-internal-api.yaml
@@ -0,0 +1,33 @@
+# This environment file provides a default value for ServiceNetMap where
+# Keystone Admin API service is running on the Internal API network
+
+parameters:
+ ServiceNetMap:
+ NeutronTenantNetwork: tenant
+ CeilometerApiNetwork: internal_api
+ MongoDbNetwork: internal_api
+ CinderApiNetwork: internal_api
+ CinderIscsiNetwork: storage
+ GlanceApiNetwork: storage
+ GlanceRegistryNetwork: internal_api
+ KeystoneAdminApiNetwork: internal_api
+ KeystonePublicApiNetwork: internal_api
+ NeutronApiNetwork: internal_api
+ HeatApiNetwork: internal_api
+ NovaApiNetwork: internal_api
+ NovaMetadataNetwork: internal_api
+ NovaVncProxyNetwork: internal_api
+ SwiftMgmtNetwork: storage_mgmt
+ SwiftProxyNetwork: storage
+ HorizonNetwork: internal_api
+ MemcachedNetwork: internal_api
+ RabbitMqNetwork: internal_api
+ RedisNetwork: internal_api
+ MysqlNetwork: internal_api
+ CephClusterNetwork: storage_mgmt
+ CephPublicNetwork: storage
+ ControllerHostnameResolveNetwork: internal_api
+ ComputeHostnameResolveNetwork: internal_api
+ BlockStorageHostnameResolveNetwork: internal_api
+ ObjectStorageHostnameResolveNetwork: internal_api
+ CephStorageHostnameResolveNetwork: storage
diff --git a/extraconfig/tasks/noop.yaml b/extraconfig/tasks/noop.yaml
new file mode 100644
index 00000000..0cff7469
--- /dev/null
+++ b/extraconfig/tasks/noop.yaml
@@ -0,0 +1,10 @@
+heat_template_version: 2014-10-16
+description: 'No-op task'
+
+parameters:
+ servers:
+ type: json
+ input_values:
+ type: json
+ default: {}
+ description: input values for the software deployments
diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh
new file mode 100755
index 00000000..12201097
--- /dev/null
+++ b/extraconfig/tasks/pacemaker_resource_restart.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+set -eux
+
+pacemaker_status=$(systemctl is-active pacemaker)
+check_interval=3
+
+function check_resource {
+
+ service=$1
+ state=$2
+ timeout=$3
+ tstart=$(date +%s)
+ tend=$(( $tstart + $timeout ))
+
+ if [ "$state" = "stopped" ]; then
+ match_for_incomplete='Started'
+ else # started
+ match_for_incomplete='Stopped'
+ fi
+
+ while (( $(date +%s) < $tend )); do
+ node_states=$(pcs status --full | grep "$service" | grep -v Clone)
+ if echo "$node_states" | grep -q "$match_for_incomplete"; then
+ echo "$service not yet $state, sleeping $check_interval seconds."
+ sleep $check_interval
+ else
+ echo "$service has $state"
+ timeout -k 10 $timeout crm_resource --wait
+ return
+ fi
+ done
+
+ echo "$service never $state after $timeout seconds" | tee /dev/fd/2
+ exit 1
+
+}
+
+# Run if pacemaker is running, we're the bootstrap node,
+# and we're updating the deployment (not creating).
+if [ "$pacemaker_status" = "active" -a \
+ "$(hiera bootstrap_nodeid)" = "$(facter hostname)" -a \
+ "$(hiera update_identifier)" != "nil" ]; then
+
+ #ensure neutron constraints like
+ #https://review.openstack.org/#/c/245093/
+ if pcs constraint order show | grep "start neutron-server-clone then start neutron-ovs-cleanup-clone"; then
+ pcs constraint remove order-neutron-server-clone-neutron-ovs-cleanup-clone-mandatory
+ fi
+
+ pcs resource disable httpd
+ check_resource httpd stopped 300
+ pcs resource disable openstack-keystone
+ check_resource openstack-keystone stopped 1800
+
+ if pcs status | grep haproxy-clone; then
+ pcs resource restart haproxy-clone
+ fi
+ pcs resource restart redis-master
+ pcs resource restart mongod-clone
+ pcs resource restart rabbitmq-clone
+ pcs resource restart memcached-clone
+ pcs resource restart galera-master
+
+ pcs resource enable openstack-keystone
+ check_resource openstack-keystone started 1800
+ pcs resource enable httpd
+ check_resource httpd started 800
+
+fi
diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml
new file mode 100644
index 00000000..7de41d94
--- /dev/null
+++ b/extraconfig/tasks/post_puppet_pacemaker.yaml
@@ -0,0 +1,44 @@
+heat_template_version: 2014-10-16
+description: 'Post-Puppet Config for Pacemaker deployments'
+
+parameters:
+ servers:
+ type: json
+ input_values:
+ type: json
+ description: input values for the software deployments
+
+resources:
+
+ ControllerPostPuppetMaintenanceModeConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ pacemaker_status=$(systemctl is-active pacemaker)
+
+ if [ "$pacemaker_status" = "active" ]; then
+ pcs property set maintenance-mode=false
+ fi
+
+ ControllerPostPuppetMaintenanceModeDeployment:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: ControllerPostPuppetMaintenanceModeConfig}
+ input_values: {get_param: input_values}
+
+ ControllerPostPuppetRestartConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: pacemaker_resource_restart.sh}
+
+ ControllerPostPuppetRestartDeployment:
+ type: OS::Heat::SoftwareDeployments
+ depends_on: ControllerPostPuppetMaintenanceModeDeployment
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: ControllerPostPuppetRestartConfig}
+ input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/pre_puppet_pacemaker.yaml b/extraconfig/tasks/pre_puppet_pacemaker.yaml
new file mode 100644
index 00000000..2cfe92a7
--- /dev/null
+++ b/extraconfig/tasks/pre_puppet_pacemaker.yaml
@@ -0,0 +1,30 @@
+heat_template_version: 2014-10-16
+description: 'Pre-Puppet Config for Pacemaker deployments'
+
+parameters:
+ servers:
+ type: json
+ input_values:
+ type: json
+ description: input values for the software deployments
+
+resources:
+
+ ControllerPrePuppetMaintenanceModeConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ pacemaker_status=$(systemctl is-active pacemaker)
+
+ if [ "$pacemaker_status" = "active" ]; then
+ pcs property set maintenance-mode=true
+ fi
+
+ ControllerPrePuppetMaintenanceModeDeployment:
+ type: OS::Heat::SoftwareDeployments
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: ControllerPrePuppetMaintenanceModeConfig}
+ input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index 9125ca07..c6313d9d 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -22,7 +22,9 @@ mkdir -p $timestamp_dir
update_identifier=${update_identifier//[^a-zA-Z0-9-_]/}
# seconds to wait for this node to rejoin the cluster after update
-cluster_start_timeout=360
+cluster_start_timeout=600
+galera_sync_timeout=360
+cluster_settle_timeout=1800
timestamp_file="$timestamp_dir/$update_identifier"
if [[ -a "$timestamp_file" ]]; then
@@ -41,8 +43,97 @@ if [[ "$list_updates" == "" ]]; then
fi
pacemaker_status=$(systemctl is-active pacemaker)
+pacemaker_dumpfile=$(mktemp)
if [[ "$pacemaker_status" == "active" ]] ; then
+SERVICES="memcached
+httpd
+neutron-dhcp-agent
+neutron-l3-agent
+neutron-metadata-agent
+neutron-openvswitch-agent
+neutron-server
+openstack-ceilometer-alarm-evaluator
+openstack-ceilometer-alarm-notifier
+openstack-ceilometer-api
+openstack-ceilometer-central
+openstack-ceilometer-collector
+openstack-ceilometer-notification
+openstack-cinder-api
+openstack-cinder-scheduler
+openstack-cinder-volume
+openstack-glance-api
+openstack-glance-registry
+openstack-heat-api
+openstack-heat-api-cfn
+openstack-heat-api-cloudwatch
+openstack-heat-engine
+openstack-keystone
+openstack-nova-api
+openstack-nova-conductor
+openstack-nova-consoleauth
+openstack-nova-novncproxy
+openstack-nova-scheduler"
+
+ echo "Dumping Pacemaker config"
+ pcs cluster cib $pacemaker_dumpfile
+
+ echo "Checking for missing constraints"
+
+ if ! pcs constraint order show | grep "start openstack-nova-novncproxy-clone then start openstack-nova-api-clone"; then
+ pcs -f $pacemaker_dumpfile constraint order start openstack-nova-novncproxy-clone then openstack-nova-api-clone
+ fi
+
+ if ! pcs constraint order show | grep "start rabbitmq-clone then start openstack-keystone-clone"; then
+ pcs -f $pacemaker_dumpfile constraint order start rabbitmq-clone then openstack-keystone-clone
+ fi
+
+ if ! pcs constraint order show | grep "promote galera-master then start openstack-keystone-clone"; then
+ pcs -f $pacemaker_dumpfile constraint order promote galera-master then openstack-keystone-clone
+ fi
+
+ if pcs resource | grep "haproxy-clone"; then
+ SERVICES="$SERVICES haproxy"
+ if ! pcs constraint order show | grep "start haproxy-clone then start openstack-keystone-clone"; then
+ pcs -f $pacemaker_dumpfile constraint order start haproxy-clone then openstack-keystone-clone
+ fi
+ fi
+
+ if ! pcs constraint order show | grep "start memcached-clone then start openstack-keystone-clone"; then
+ pcs -f $pacemaker_dumpfile constraint order start memcached-clone then openstack-keystone-clone
+ fi
+
+ if ! pcs constraint order show | grep "promote redis-master then start openstack-ceilometer-central-clone"; then
+ pcs -f $pacemaker_dumpfile constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false
+ fi
+
+ # ensure neutron constraints https://review.openstack.org/#/c/229466
+ # remove ovs-cleanup after server and add openvswitch-agent instead
+ if pcs constraint order show | grep "start neutron-server-clone then start neutron-ovs-cleanup-clone"; then
+ pcs -f $pacemaker_dumpfile constraint remove order-neutron-server-clone-neutron-ovs-cleanup-clone-mandatory
+ fi
+ if ! pcs constraint order show | grep "start neutron-server-clone then start neutron-openvswitch-agent-clone"; then
+ pcs -f $pacemaker_dumpfile constraint order start neutron-server-clone then neutron-openvswitch-agent-clone
+ fi
+
+
+ if ! pcs resource defaults | grep "resource-stickiness: INFINITY"; then
+ pcs -f $pacemaker_dumpfile resource defaults resource-stickiness=INFINITY
+ fi
+
+ echo "Setting resource start/stop timeouts"
+ for service in $SERVICES; do
+ pcs -f $pacemaker_dumpfile resource update $service op start timeout=200s op stop timeout=200s
+ done
+ # mongod start timeout is higher, setting only stop timeout
+ pcs -f $pacemaker_dumpfile resource update mongod op start timeout=370s op stop timeout=200s
+
+ echo "Applying new Pacemaker config"
+ if ! pcs cluster cib-push $pacemaker_dumpfile; then
+ echo "ERROR failed to apply new pacemaker config"
+ exit 1
+ fi
+
echo "Pacemaker running, stopping cluster node and doing full package update"
node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")
if [[ "$node_count" == "1" ]] ; then
@@ -51,6 +142,13 @@ if [[ "$pacemaker_status" == "active" ]] ; then
else
pcs cluster stop
fi
+
+ # clean leftover keepalived and radvd instances from neutron
+ # (can be removed when we remove neutron-netns-cleanup from cluster services)
+ # see https://review.gerrithub.io/#/c/248931/1/neutron-netns-cleanup.init
+ killall neutron-keepalived-state-change 2>/dev/null || :
+ kill $(ps ax | grep -e "keepalived.*\.pid-vrrp" | awk '{print $1}') 2>/dev/null || :
+ kill $(ps ax | grep -e "radvd.*\.pid\.radvd" | awk '{print $1}') 2>/dev/null || :
else
echo "Excluding upgrading packages that are handled by config management tooling"
command_arguments="$command_arguments --skip-broken"
@@ -83,6 +181,23 @@ if [[ "$pacemaker_status" == "active" ]] ; then
exit 1
fi
done
+
+ tstart=$(date +%s)
+ while ! clustercheck; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > galera_sync_timeout )) ; then
+ echo "ERROR galera sync timed out"
+ exit 1
+ fi
+ done
+
+ echo "Waiting for pacemaker cluster to settle"
+ if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
+ echo "ERROR timed out while waiting for the cluster to settle"
+ exit 1
+ fi
+
pcs status
else
diff --git a/firstboot/userdata_heat_admin.yaml b/firstboot/userdata_heat_admin.yaml
index 73481c63..f8891b29 100644
--- a/firstboot/userdata_heat_admin.yaml
+++ b/firstboot/userdata_heat_admin.yaml
@@ -1,7 +1,7 @@
heat_template_version: 2014-10-16
parameters:
- # Can be overriden via parameter_defaults in the environment
+ # Can be overridden via parameter_defaults in the environment
node_admin_username:
type: string
default: heat-admin
diff --git a/net-config-bond.yaml b/net-config-bond.yaml
index 797df4bf..b624563f 100644
--- a/net-config-bond.yaml
+++ b/net-config-bond.yaml
@@ -28,6 +28,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet:
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
resources:
OsNetConfigImpl:
diff --git a/net-config-bridge.yaml b/net-config-bridge.yaml
index ad16ef0b..4f7a19dc 100644
--- a/net-config-bridge.yaml
+++ b/net-config-bridge.yaml
@@ -28,6 +28,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet:
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
resources:
OsNetConfigImpl:
diff --git a/net-config-linux-bridge.yaml b/net-config-linux-bridge.yaml
new file mode 100644
index 00000000..0646ffab
--- /dev/null
+++ b/net-config-linux-bridge.yaml
@@ -0,0 +1,73 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config for a simple bridge.
+
+parameters:
+ ControlPlaneIp:
+ default: ''
+ description: IP address/subnet on the ctlplane network
+ type: string
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ControlPlaneDefaultRoute: # Override this via parameter_defaults
+ description: The default route of the control plane network.
+ type: string
+ default: '192.0.2.1'
+ EC2MetadataIp: # Override this via parameter_defaults
+ description: The IP address of the EC2 metadata server.
+ type: string
+ default: '169.254.169.254/32'
+
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: linux_bridge
+ name: {get_input: bridge_name}
+ addresses:
+ -
+ ip_netmask: {get_param: ControlPlaneIp}
+ members:
+ -
+ type: interface
+ name: {get_input: interface_name}
+ # force the MAC address of the bridge to this interface
+ primary: true
+ routes:
+ -
+ ip_netmask: 0.0.0.0/0
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+ default: true
+ -
+ ip_netmask: {get_param: EC2MetadataIp}
+ next_hop: {get_param: ControlPlaneDefaultRoute}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/net-config-noop.yaml b/net-config-noop.yaml
index 30de5846..94c492c6 100644
--- a/net-config-noop.yaml
+++ b/net-config-noop.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet:
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
resources:
OsNetConfigImpl:
diff --git a/network/config/bond-with-vlans/README.md b/network/config/bond-with-vlans/README.md
index 1679df3c..afe71776 100644
--- a/network/config/bond-with-vlans/README.md
+++ b/network/config/bond-with-vlans/README.md
@@ -1,6 +1,11 @@
This directory contains Heat templates to help configure
Vlans on a bonded pair of NICs for each Overcloud role.
+There are two versions of the controller role template, one with
+an external network interface, and another without. If the
+external network interface is not configured, the ctlplane address
+ranges will be used for external (public) network traffic.
+
Configuration
-------------
@@ -13,3 +18,32 @@ something like this:
OS::TripleO::Controller::Net::SoftwareConfig: network/config/bond-with-vlans/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: network/config/bond-with-vlans/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: network/config/bond-with-vlans/ceph-storage.yaml
+
+Or use this Heat environment file:
+
+ environments/net-bond-with-vlans.yaml
+
+Configuration with no External Network
+--------------------------------------
+
+Same as above except set the following value for the controller role:
+
+ OS::TripleO::Controller::Net::SoftwareConfig: network/config/bond-with-vlans/controller-no-external.yaml
+
+Configuration with System Management Network
+--------------------------------------------
+
+To enable the optional System Management network, create a Heat environment
+that looks something like this:
+
+ resource\_registry:
+ OS::TripleO::Network::Management: ../network/management.yaml
+ OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
+
+Or use this Heat environment file:
+
+ environments/network-management.yaml
diff --git a/network/config/bond-with-vlans/ceph-storage.yaml b/network/config/bond-with-vlans/ceph-storage.yaml
index 620d1f7a..93db8666 100644
--- a/network/config/bond-with-vlans/ceph-storage.yaml
+++ b/network/config/bond-with-vlans/ceph-storage.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
BondInterfaceOvsOptions:
default: ''
description: The ovs_options string for the bond interface. Set things like
@@ -42,6 +46,10 @@ parameters:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
@@ -114,6 +122,14 @@ resources:
addresses:
-
ip_netmask: {get_param: StorageMgmtIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # device: bond1
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/bond-with-vlans/cinder-storage.yaml b/network/config/bond-with-vlans/cinder-storage.yaml
index f4c6de8f..bea98c19 100644
--- a/network/config/bond-with-vlans/cinder-storage.yaml
+++ b/network/config/bond-with-vlans/cinder-storage.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
BondInterfaceOvsOptions:
default: ''
description: The ovs_options string for the bond interface. Set things like
@@ -46,6 +50,10 @@ parameters:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
@@ -125,6 +133,14 @@ resources:
addresses:
-
ip_netmask: {get_param: StorageMgmtIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # device: bond1
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/bond-with-vlans/compute.yaml b/network/config/bond-with-vlans/compute.yaml
index 8cb3705b..774bf02d 100644
--- a/network/config/bond-with-vlans/compute.yaml
+++ b/network/config/bond-with-vlans/compute.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
BondInterfaceOvsOptions:
default: ''
description: The ovs_options string for the bond interface. Set things like
@@ -46,6 +50,10 @@ parameters:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
@@ -125,6 +133,14 @@ resources:
addresses:
-
ip_netmask: {get_param: TenantIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # device: bond1
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/bond-with-vlans/controller-no-external.yaml b/network/config/bond-with-vlans/controller-no-external.yaml
new file mode 100644
index 00000000..375d40be
--- /dev/null
+++ b/network/config/bond-with-vlans/controller-no-external.yaml
@@ -0,0 +1,131 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config with 2 bonded nics on a bridge
+ with VLANs attached for the controller role.
+
+parameters:
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ BondInterfaceOvsOptions:
+ default: ''
+ description: The ovs_options string for the bond interface. Set things like
+ lacp=active and/or bond_mode=balance-slb using this option.
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
+ ExternalInterfaceDefaultRoute:
+ default: '10.0.0.1'
+ description: default route for the external network
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: ovs_bridge
+ name: {get_input: bridge_name}
+ use_dhcp: true
+ members:
+ -
+ type: ovs_bond
+ name: bond1
+ ovs_options: {get_param: BondInterfaceOvsOptions}
+ members:
+ -
+ type: interface
+ name: nic2
+ primary: true
+ -
+ type: interface
+ name: nic3
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: InternalApiNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: StorageNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageMgmtIpSubnet}
+ -
+ type: vlan
+ device: bond1
+ vlan_id: {get_param: TenantNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # device: bond1
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/bond-with-vlans/controller.yaml b/network/config/bond-with-vlans/controller.yaml
index 4290be20..d3627ead 100644
--- a/network/config/bond-with-vlans/controller.yaml
+++ b/network/config/bond-with-vlans/controller.yaml
@@ -29,11 +29,14 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
BondInterfaceOvsOptions:
- default: 'bond_mode=balance-tcp lacp=active other-config:lacp-fallback-ab=true'
+ default: 'bond_mode=active-backup'
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
- Default wil attempt LACP, but will fall back to active-backup.
type: string
ExternalNetworkVlanID:
default: 10
@@ -55,6 +58,10 @@ parameters:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
@@ -120,7 +127,7 @@ resources:
ip_netmask: {get_param: ExternalIpSubnet}
routes:
-
- ip_netmask: 0.0.0.0/0
+ default: true
next_hop: {get_param: ExternalInterfaceDefaultRoute}
-
type: vlan
@@ -150,6 +157,14 @@ resources:
addresses:
-
ip_netmask: {get_param: TenantIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # device: bond1
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/bond-with-vlans/swift-storage.yaml b/network/config/bond-with-vlans/swift-storage.yaml
index f6b2a699..de9121e5 100644
--- a/network/config/bond-with-vlans/swift-storage.yaml
+++ b/network/config/bond-with-vlans/swift-storage.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
BondInterfaceOvsOptions:
default: ''
description: The ovs_options string for the bond interface. Set things like
@@ -46,6 +50,10 @@ parameters:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
@@ -125,6 +133,14 @@ resources:
addresses:
-
ip_netmask: {get_param: StorageMgmtIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # device: bond1
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/multiple-nics/README.md b/network/config/multiple-nics/README.md
index 3d81f0be..0d8a0f03 100644
--- a/network/config/multiple-nics/README.md
+++ b/network/config/multiple-nics/README.md
@@ -19,3 +19,21 @@ something like this:
Or use this Heat environment file:
environments/net-multiple-nics.yaml
+
+Configuration with System Management Network
+--------------------------------------------
+
+To enable the optional System Management network, create a Heat environment
+that looks something like this:
+
+ resource\_registry:
+ OS::TripleO::Network::Management: ../network/management.yaml
+ OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
+
+Or use this Heat environment file:
+
+ environments/network-management.yaml
diff --git a/network/config/multiple-nics/ceph-storage.yaml b/network/config/multiple-nics/ceph-storage.yaml
index a0508583..a2a6b40d 100644
--- a/network/config/multiple-nics/ceph-storage.yaml
+++ b/network/config/multiple-nics/ceph-storage.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -49,6 +53,10 @@ parameters:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
@@ -58,12 +66,12 @@ parameters:
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
- description: The subnet CIDR of the control plane network.
+ description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
- type: json
+ type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
@@ -92,6 +100,9 @@ resources:
-
ip_netmask: 169.254.169.254/32
next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
-
type: interface
name: nic2
@@ -106,6 +117,14 @@ resources:
addresses:
-
ip_netmask: {get_param: StorageMgmtIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: interface
+ # name: nic7
+ # use_dhcp: false
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/multiple-nics/cinder-storage.yaml b/network/config/multiple-nics/cinder-storage.yaml
index c84586bb..06b4b83f 100644
--- a/network/config/multiple-nics/cinder-storage.yaml
+++ b/network/config/multiple-nics/cinder-storage.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -49,6 +53,10 @@ parameters:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
@@ -58,12 +66,12 @@ parameters:
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
- description: The subnet CIDR of the control plane network.
+ description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
- type: json
+ type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
@@ -92,6 +100,9 @@ resources:
-
ip_netmask: 169.254.169.254/32
next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
-
type: interface
name: nic2
@@ -113,6 +124,14 @@ resources:
addresses:
-
ip_netmask: {get_param: InternalApiIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: interface
+ # name: nic7
+ # use_dhcp: false
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/multiple-nics/compute.yaml b/network/config/multiple-nics/compute.yaml
index 70a18081..97eef52b 100644
--- a/network/config/multiple-nics/compute.yaml
+++ b/network/config/multiple-nics/compute.yaml
@@ -29,6 +29,14 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
@@ -37,21 +45,33 @@ parameters:
default: 30
description: Vlan ID for the storage network traffic.
type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
+ ExternalInterfaceDefaultRoute:
+ default: '10.0.0.1'
+ description: default route for the external network
+ type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
- description: The subnet CIDR of the control plane network.
+ description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
- type: json
+ type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
@@ -80,6 +100,9 @@ resources:
-
ip_netmask: 169.254.169.254/32
next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
-
type: interface
name: nic2
@@ -109,6 +132,14 @@ resources:
use_dhcp: false
# force the MAC address of the bridge to this interface
primary: true
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: interface
+ # name: nic7
+ # use_dhcp: false
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/multiple-nics/controller.yaml b/network/config/multiple-nics/controller.yaml
index 63f53a1f..32851cfb 100644
--- a/network/config/multiple-nics/controller.yaml
+++ b/network/config/multiple-nics/controller.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -49,6 +53,10 @@ parameters:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
@@ -58,12 +66,12 @@ parameters:
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
- description: The subnet CIDR of the control plane network.
+ description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
- type: json
+ type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
@@ -131,13 +139,14 @@ resources:
-
type: ovs_bridge
name: {get_input: bridge_name}
+ dns_servers: {get_param: DnsServers}
use_dhcp: false
addresses:
-
ip_netmask: {get_param: ExternalIpSubnet}
routes:
-
- ip_netmask: 0.0.0.0/0
+ default: true
next_hop: {get_param: ExternalInterfaceDefaultRoute}
members:
-
@@ -145,6 +154,14 @@ resources:
name: nic6
# force the MAC address of the bridge to this interface
primary: true
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: interface
+ # name: nic7
+ # use_dhcp: false
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/multiple-nics/swift-storage.yaml b/network/config/multiple-nics/swift-storage.yaml
index 25ac75f2..4d5a7b99 100644
--- a/network/config/multiple-nics/swift-storage.yaml
+++ b/network/config/multiple-nics/swift-storage.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -49,6 +53,10 @@ parameters:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
@@ -58,12 +66,12 @@ parameters:
description: The subnet CIDR of the control plane network.
type: string
ControlPlaneDefaultRoute: # Override this via parameter_defaults
- description: The subnet CIDR of the control plane network.
+ description: The default route of the control plane network.
type: string
DnsServers: # Override this via parameter_defaults
default: []
description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
- type: json
+ type: comma_delimited_list
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
@@ -92,6 +100,9 @@ resources:
-
ip_netmask: 169.254.169.254/32
next_hop: {get_param: EC2MetadataIp}
+ -
+ default: true
+ next_hop: {get_param: ControlPlaneDefaultRoute}
-
type: interface
name: nic2
@@ -113,6 +124,14 @@ resources:
addresses:
-
ip_netmask: {get_param: InternalApiIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: interface
+ # name: nic7
+ # use_dhcp: false
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/single-nic-vlans/README.md b/network/config/single-nic-vlans/README.md
index e3e16574..f9c2e512 100644
--- a/network/config/single-nic-vlans/README.md
+++ b/network/config/single-nic-vlans/README.md
@@ -1,5 +1,10 @@
This directory contains Heat templates to help configure
-Vlans on a single NICs for each Overcloud role.
+Vlans on a single NIC for each Overcloud role.
+
+There are two versions of the controller role template, one with
+an external network interface, and another without. If the
+external network interface is not configured, the ctlplane address
+ranges will be used for external (public) network traffic.
Configuration
-------------
@@ -17,3 +22,28 @@ something like this:
Or use this Heat environment file:
environments/net-single-nic-with-vlans.yaml
+
+Configuration with no External Network
+--------------------------------------
+
+Same as above except set the following value for the controller role:
+
+ OS::TripleO::Controller::Net::SoftwareConfig: network/config/single-nic-vlans/controller-no-external.yaml
+
+Configuration with System Management Network
+--------------------------------------------
+
+To enable the optional System Management network, create a Heat environment
+that looks something like this:
+
+ resource\_registry:
+ OS::TripleO::Network::Management: ../network/management.yaml
+ OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
+ OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
+
+Or use this Heat environment file:
+
+ environments/network-management.yaml
diff --git a/network/config/single-nic-vlans/ceph-storage.yaml b/network/config/single-nic-vlans/ceph-storage.yaml
index 5148c520..80bc32d3 100644
--- a/network/config/single-nic-vlans/ceph-storage.yaml
+++ b/network/config/single-nic-vlans/ceph-storage.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
@@ -37,6 +41,10 @@ parameters:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
@@ -97,6 +105,13 @@ resources:
addresses:
-
ip_netmask: {get_param: StorageMgmtIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/single-nic-vlans/cinder-storage.yaml b/network/config/single-nic-vlans/cinder-storage.yaml
index e79a9f4b..e509443a 100644
--- a/network/config/single-nic-vlans/cinder-storage.yaml
+++ b/network/config/single-nic-vlans/cinder-storage.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
@@ -41,6 +45,10 @@ parameters:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
@@ -107,6 +115,13 @@ resources:
addresses:
-
ip_netmask: {get_param: StorageMgmtIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/single-nic-vlans/compute.yaml b/network/config/single-nic-vlans/compute.yaml
index 4e93b31c..8cf6825d 100644
--- a/network/config/single-nic-vlans/compute.yaml
+++ b/network/config/single-nic-vlans/compute.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
@@ -41,6 +45,10 @@ parameters:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
@@ -107,6 +115,13 @@ resources:
addresses:
-
ip_netmask: {get_param: TenantIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/single-nic-vlans/controller-no-external.yaml b/network/config/single-nic-vlans/controller-no-external.yaml
new file mode 100644
index 00000000..eb5e1e5a
--- /dev/null
+++ b/network/config/single-nic-vlans/controller-no-external.yaml
@@ -0,0 +1,114 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Software Config to drive os-net-config to configure VLANs for the
+ controller role. No external IP is configured.
+
+parameters:
+ ExternalIpSubnet:
+ default: ''
+ description: IP address/subnet on the external network
+ type: string
+ InternalApiIpSubnet:
+ default: ''
+ description: IP address/subnet on the internal API network
+ type: string
+ StorageIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage network
+ type: string
+ StorageMgmtIpSubnet:
+ default: ''
+ description: IP address/subnet on the storage mgmt network
+ type: string
+ TenantIpSubnet:
+ default: ''
+ description: IP address/subnet on the tenant network
+ type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
+ ExternalNetworkVlanID:
+ default: 10
+ description: Vlan ID for the external network traffic.
+ type: number
+ InternalApiNetworkVlanID:
+ default: 20
+ description: Vlan ID for the internal_api network traffic.
+ type: number
+ StorageNetworkVlanID:
+ default: 30
+ description: Vlan ID for the storage network traffic.
+ type: number
+ StorageMgmtNetworkVlanID:
+ default: 40
+ description: Vlan ID for the storage mgmt network traffic.
+ type: number
+ TenantNetworkVlanID:
+ default: 50
+ description: Vlan ID for the tenant network traffic.
+ type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
+ ExternalInterfaceDefaultRoute:
+ default: '10.0.0.1'
+ description: default route for the external network
+ type: string
+
+resources:
+ OsNetConfigImpl:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ os_net_config:
+ network_config:
+ -
+ type: ovs_bridge
+ name: {get_input: bridge_name}
+ use_dhcp: true
+ members:
+ -
+ type: interface
+ name: nic1
+ # force the MAC address of the bridge to this interface
+ primary: true
+ -
+ type: vlan
+ vlan_id: {get_param: InternalApiNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: InternalApiIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: StorageMgmtNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: StorageMgmtIpSubnet}
+ -
+ type: vlan
+ vlan_id: {get_param: TenantNetworkVlanID}
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
+
+outputs:
+ OS::stack_id:
+ description: The OsNetConfigImpl resource.
+ value: {get_resource: OsNetConfigImpl}
diff --git a/network/config/single-nic-vlans/controller.yaml b/network/config/single-nic-vlans/controller.yaml
index 3c536d67..3b22b36b 100644
--- a/network/config/single-nic-vlans/controller.yaml
+++ b/network/config/single-nic-vlans/controller.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
@@ -49,6 +53,10 @@ parameters:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: default route for the external network
@@ -129,6 +137,12 @@ resources:
addresses:
-
ip_netmask: {get_param: TenantIpSubnet}
+ #- # Uncomment when including environments/network-management.yaml
+ # type: vlan
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/config/single-nic-vlans/swift-storage.yaml b/network/config/single-nic-vlans/swift-storage.yaml
index 83b3304f..efc03393 100644
--- a/network/config/single-nic-vlans/swift-storage.yaml
+++ b/network/config/single-nic-vlans/swift-storage.yaml
@@ -29,6 +29,10 @@ parameters:
default: ''
description: IP address/subnet on the tenant network
type: string
+ ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+ default: ''
+ description: IP address/subnet on the management network
+ type: string
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
@@ -41,6 +45,10 @@ parameters:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
+ ManagementNetworkVlanID:
+ default: 60
+ description: Vlan ID for the management network traffic.
+ type: number
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
@@ -107,6 +115,13 @@ resources:
addresses:
-
ip_netmask: {get_param: StorageMgmtIpSubnet}
+ # Uncomment when including environments/network-management.yaml
+ #-
+ # type: vlan
+ # vlan_id: {get_param: ManagementNetworkVlanID}
+ # addresses:
+ # -
+ # ip_netmask: {get_param: ManagementIpSubnet}
outputs:
OS::stack_id:
diff --git a/network/endpoints/endpoint.yaml b/network/endpoints/endpoint.yaml
new file mode 100644
index 00000000..6246cfdd
--- /dev/null
+++ b/network/endpoints/endpoint.yaml
@@ -0,0 +1,60 @@
+heat_template_version: 2015-04-30
+
+description: >
+ OpenStack Endpoint
+
+parameters:
+ EndpointName:
+ type: string
+ description: The name of the Endpoint being evaluated
+ EndpointMap:
+ type: json
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ IP:
+ type: string
+ description: The IP address of the Neutron Port that the endpoint is attached to
+ UriSuffix:
+ type: string
+ default: ''
+ description: A suffix attached to the URL
+ CloudName:
+ type: string
+ default: ''
+ description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
+
+outputs:
+ endpoint:
+ description: >
+ A Hash containing a mapping of service endpoints to ports, protocols, uris
+ assigned IPs, and hostnames for a specific endpoint
+ value:
+ port: {get_param: [EndpointMap, {get_param: EndpointName }, port] }
+ protocol: {get_param: [EndpointMap, {get_param: EndpointName }, protocol] }
+ ip: {get_param: IP}
+ host:
+ str_replace:
+ template: {get_param: [EndpointMap, {get_param: EndpointName }, host]}
+ params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName}}
+ uri:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, {get_param: EndpointName }, protocol] }
+ - '://'
+ - str_replace:
+ template: {get_param: [EndpointMap, {get_param: EndpointName }, host]}
+ params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName }}
+ - ':'
+ - {get_param: [EndpointMap, {get_param: EndpointName }, port] }
+ - {get_param: UriSuffix }
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, {get_param: EndpointName }, protocol] }
+ - '://'
+ - str_replace:
+ template: {get_param: [EndpointMap, {get_param: EndpointName }, host]}
+ params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName} }
+ - ':'
+ - {get_param: [EndpointMap, {get_param: EndpointName }, port] }
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
new file mode 100644
index 00000000..0ff0a8e5
--- /dev/null
+++ b/network/endpoints/endpoint_map.yaml
@@ -0,0 +1,450 @@
+heat_template_version: 2015-04-30
+
+description: >
+ A Map of OpenStack Endpoints
+
+parameters:
+ CeilometerApiVirtualIP:
+ type: string
+ default: ''
+ CinderApiVirtualIP:
+ type: string
+ default: ''
+ GlanceApiVirtualIP:
+ type: string
+ default: ''
+ GlanceRegistryVirtualIP:
+ type: string
+ default: ''
+ HeatApiVirtualIP:
+ type: string
+ default: ''
+ KeystoneAdminApiVirtualIP:
+ type: string
+ default: ''
+ KeystonePublicApiVirtualIP:
+ type: string
+ default: ''
+ MysqlVirtualIP:
+ type: string
+ default: ''
+ NeutronApiVirtualIP:
+ type: string
+ default: ''
+ NovaApiVirtualIP:
+ type: string
+ default: ''
+ PublicVirtualIP:
+ type: string
+ default: ''
+ SwiftProxyVirtualIP:
+ type: string
+ default: ''
+ EndpointMap:
+ type: json
+ default:
+ CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerPublic: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderPublic: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlancePublic: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceRegistryAdmin: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
+ GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
+ GlanceRegistryPublic: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
+ HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatPublic: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HorizonPublic: {protocol: 'http', port: '80', host: 'IP_ADDRESS'}
+ KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+ KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ KeystonePublic: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronPublic: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaPublic: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
+ NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
+ NovaEC2Public: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
+ SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ CloudName:
+ type: string
+ default: overcloud
+ description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
+
+resources:
+
+ CeilometerInternal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: CeilometerInternal
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: CeilometerApiVirtualIP}
+ CeilometerPublic:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: CeilometerPublic
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: PublicVirtualIP}
+ CeilometerAdmin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: CeilometerAdmin
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: CeilometerApiVirtualIP}
+
+ CinderInternal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: CinderInternal
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: CinderApiVirtualIP}
+ UriSuffix: '/v1/%(tenant_id)s'
+ CinderPublic:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: CinderPublic
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: PublicVirtualIP}
+ UriSuffix: '/v1/%(tenant_id)s'
+ CinderAdmin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: CinderAdmin
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: CinderApiVirtualIP}
+ UriSuffix: '/v1/%(tenant_id)s'
+
+ CinderV2Internal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: CinderInternal
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: CinderApiVirtualIP}
+ UriSuffix: '/v2/%(tenant_id)s'
+ CinderV2Public:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: CinderPublic
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: PublicVirtualIP}
+ UriSuffix: '/v2/%(tenant_id)s'
+ CinderV2Admin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: CinderAdmin
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: CinderApiVirtualIP}
+ UriSuffix: '/v2/%(tenant_id)s'
+
+ GlanceInternal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: GlanceInternal
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: GlanceApiVirtualIP}
+ GlancePublic:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: GlancePublic
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: PublicVirtualIP}
+ GlanceAdmin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: GlanceAdmin
+ EndpointMap: { get_param: EndpointMap }
+ CloudName: {get_param: CloudName}
+ IP: {get_param: GlanceApiVirtualIP}
+ GlanceRegistryInternal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: GlanceInternal
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: GlanceRegistryVirtualIP}
+ GlanceRegistryPublic:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: GlancePublic
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: PublicVirtualIP}
+ GlanceRegistryAdmin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: GlanceAdmin
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: GlanceRegistryVirtualIP}
+
+ HeatInternal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: HeatInternal
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: HeatApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v1/%(tenant_id)s'
+ HeatPublic:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: HeatPublic
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: PublicVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v1/%(tenant_id)s'
+ HeatAdmin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: HeatAdmin
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: HeatApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v1/%(tenant_id)s'
+
+ HorizonPublic:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: HeatPublic
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: PublicVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/dashboard'
+
+ KeystoneInternal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: KeystoneInternal
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: KeystonePublicApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v2.0'
+ KeystonePublic:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: KeystonePublic
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: PublicVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v2.0'
+ KeystoneAdmin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: KeystoneAdmin
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: KeystoneAdminApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v2.0'
+ KeystoneEC2:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: KeystoneInternal
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: KeystonePublicApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v2.0/ec2tokens'
+
+ NeutronInternal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NeutronInternal
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: NeutronApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ NeutronPublic:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NeutronPublic
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: PublicVirtualIP}
+ CloudName: {get_param: CloudName}
+ NeutronAdmin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NeutronAdmin
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: NeutronApiVirtualIP}
+ CloudName: {get_param: CloudName}
+
+ NovaInternal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NovaInternal
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: NovaApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v2/%(tenant_id)s'
+ NovaPublic:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NovaPublic
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: PublicVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v2/%(tenant_id)s'
+ NovaAdmin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NovaAdmin
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: NovaApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v2/%(tenant_id)s'
+ NovaV3Internal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NovaInternal
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: NovaApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v3'
+ NovaV3Public:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NovaPublic
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: PublicVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v3'
+ NovaV3Admin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NovaAdmin
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: NovaApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v3'
+
+ NovaEC2Internal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NovaEC2Internal
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: NovaApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/services/Cloud'
+ NovaEC2Public:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NovaEC2Public
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: PublicVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/services/Cloud'
+ NovaEC2Admin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: NovaEC2Admin
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: NovaApiVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/services/Admin'
+
+ SwiftInternal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: SwiftInternal
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: SwiftProxyVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v1/AUTH_%(tenant_id)s'
+ SwiftPublic:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: SwiftPublic
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: PublicVirtualIP}
+ CloudName: {get_param: CloudName}
+ UriSuffix: '/v1/AUTH_%(tenant_id)s'
+ SwiftAdmin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: SwiftAdmin
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: SwiftProxyVirtualIP}
+ CloudName: {get_param: CloudName}
+ # No Suffix for the Admin interface
+ SwiftS3Internal:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: SwiftInternal
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: SwiftProxyVirtualIP}
+ CloudName: {get_param: CloudName}
+ SwiftS3Public:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: SwiftPublic
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: PublicVirtualIP}
+ CloudName: {get_param: CloudName}
+ SwiftS3Admin:
+ type: OS::TripleO::Endpoint
+ properties:
+ EndpointName: SwiftAdmin
+ EndpointMap: { get_param: EndpointMap }
+ IP: {get_param: SwiftProxyVirtualIP}
+ CloudName: {get_param: CloudName}
+
+outputs:
+ endpoint_map:
+ value:
+ CeilometerInternal: {get_attr: [ CeilometerInternal, endpoint] }
+ CeilometerPublic: {get_attr: [ CeilometerPublic, endpoint] }
+ CeilometerAdmin: {get_attr: [ CeilometerAdmin, endpoint] }
+ CinderInternal: {get_attr: [ CinderInternal, endpoint] }
+ CinderPublic: {get_attr: [ CinderPublic, endpoint] }
+ CinderAdmin: {get_attr: [ CinderAdmin, endpoint] }
+ CinderV2Internal: {get_attr: [ CinderV2Internal, endpoint] }
+ CinderV2Public: {get_attr: [ CinderV2Public, endpoint] }
+ CinderV2Admin: {get_attr: [ CinderV2Admin, endpoint] }
+ GlanceInternal: {get_attr: [ GlanceInternal, endpoint] }
+ GlancePublic: {get_attr: [ GlancePublic, endpoint] }
+ GlanceAdmin: {get_attr: [ GlanceAdmin, endpoint] }
+ GlanceRegistryInternal: {get_attr: [ GlanceRegistryInternal, endpoint] }
+ GlanceRegistryPublic: {get_attr: [ GlanceRegistryPublic, endpoint] }
+ GlanceRegistryAdmin: {get_attr: [ GlanceRegistryAdmin, endpoint] }
+ HeatInternal: {get_attr: [ HeatInternal, endpoint] }
+ HeatPublic: {get_attr: [ HeatPublic, endpoint] }
+ HeatAdmin: {get_attr: [ HeatAdmin, endpoint] }
+ HorizonPublic: {get_attr: [ HorizonPublic, endpoint] }
+ KeystoneInternal: {get_attr: [ KeystoneInternal, endpoint] }
+ KeystonePublic: {get_attr: [ KeystonePublic, endpoint] }
+ KeystoneAdmin: {get_attr: [ KeystoneAdmin, endpoint] }
+ KeystoneEC2: {get_attr: [ KeystoneEC2, endpoint] }
+ NeutronInternal: {get_attr: [ NeutronInternal, endpoint] }
+ NeutronPublic: {get_attr: [ NeutronPublic, endpoint] }
+ NeutronAdmin: {get_attr: [ NeutronAdmin, endpoint] }
+ NovaInternal: {get_attr: [ NovaInternal, endpoint] }
+ NovaPublic: {get_attr: [ NovaPublic, endpoint] }
+ NovaAdmin: {get_attr: [ NovaAdmin, endpoint] }
+ NovaV3Internal: {get_attr: [ NovaV3Internal, endpoint] }
+ NovaV3Public: {get_attr: [ NovaV3Public, endpoint] }
+ NovaV3Admin: {get_attr: [ NovaV3Admin, endpoint] }
+ NovaEC2Internal: {get_attr: [ NovaEC2Internal, endpoint] }
+ NovaEC2Public: {get_attr: [ NovaEC2Public, endpoint] }
+ NovaEC2Admin: {get_attr: [ NovaEC2Admin, endpoint] }
+ SwiftInternal: {get_attr: [ SwiftInternal, endpoint] }
+ SwiftPublic: {get_attr: [ SwiftPublic, endpoint] }
+ SwiftAdmin: {get_attr: [ SwiftAdmin, endpoint] }
+ SwiftS3Internal: {get_attr: [ SwiftS3Internal, endpoint] }
+ SwiftS3Public: {get_attr: [ SwiftS3Public, endpoint] }
+ SwiftS3Admin: {get_attr: [ SwiftS3Admin, endpoint] }
diff --git a/network/external.yaml b/network/external.yaml
index e8f92a5e..3b24da7e 100644
--- a/network/external.yaml
+++ b/network/external.yaml
@@ -15,7 +15,7 @@ parameters:
type: json
ExternalNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: This admin state of the network.
type: boolean
ExternalNetEnableDHCP:
default: false
diff --git a/network/internal_api.yaml b/network/internal_api.yaml
index 69154bef..6f8aa3a8 100644
--- a/network/internal_api.yaml
+++ b/network/internal_api.yaml
@@ -15,7 +15,7 @@ parameters:
type: json
InternalApiNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: This admin state of the network.
type: boolean
InternalApiNetEnableDHCP:
default: false
diff --git a/network/management.yaml b/network/management.yaml
new file mode 100644
index 00000000..9bfaafa2
--- /dev/null
+++ b/network/management.yaml
@@ -0,0 +1,64 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Management network. System administration, SSH, DNS, NTP, etc. This network
+ would usually be the default gateway for the non-controller nodes.
+
+parameters:
+ # the defaults here work for static IP assignment (IPAM) only
+ ManagementNetCidr:
+ default: '10.0.1.0/24'
+ description: Cidr for the management network.
+ type: string
+ ManagementNetValueSpecs:
+ default: {'provider:physical_network': 'management', 'provider:network_type': 'flat'}
+ description: Value specs for the management network.
+ type: string
+ ManagementNetAdminStateUp:
+ default: false
+ description: This admin state of of the network.
+ type: boolean
+ ManagementNetEnableDHCP:
+ default: false
+ description: Whether to enable DHCP on the associated subnet.
+ type: boolean
+ ManagementNetShared:
+ default: false
+ description: Whether this network is shared across all tenants.
+ type: boolean
+ ManagementNetName:
+ default: management
+ description: The name of the management network.
+ type: string
+ ManagementSubnetName:
+ default: management_subnet
+ description: The name of the management subnet in Neutron.
+ type: string
+ ManagementAllocationPools:
+ default: [{'start': '10.0.1.4', 'end': '10.0.1.250'}]
+ description: Ip allocation pool range for the management network.
+ type: json
+
+resources:
+ ManagementNetwork:
+ type: OS::Neutron::Net
+ properties:
+ admin_state_up: {get_param: ManagementNetAdminStateUp}
+ name: {get_param: ManagementNetName}
+ shared: {get_param: ManagementNetShared}
+ value_specs: {get_param: ManagementNetValueSpecs}
+
+ ManagementSubnet:
+ type: OS::Neutron::Subnet
+ properties:
+ cidr: {get_param: ManagementNetCidr}
+ enable_dhcp: {get_param: ManagementNetEnableDHCP}
+ name: {get_param: ManagementSubnetName}
+ network: {get_resource: ManagementNetwork}
+ allocation_pools: {get_param: ManagementAllocationPools}
+
+outputs:
+ OS::stack_id:
+ description: Neutron management network
+ value: {get_resource: ManagementNetwork}
+
diff --git a/network/networks.yaml b/network/networks.yaml
index 6618af38..ab50ae11 100644
--- a/network/networks.yaml
+++ b/network/networks.yaml
@@ -18,3 +18,6 @@ resources:
TenantNetwork:
type: OS::TripleO::Network::Tenant
+
+ ManagementNetwork:
+ type: OS::TripleO::Network::Management
diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml
index 0d2945bc..7a7043bd 100644
--- a/network/ports/ctlplane_vip.yaml
+++ b/network/ports/ctlplane_vip.yaml
@@ -5,6 +5,10 @@ description: >
The IP address will be chosen automatically if FixedIPs is empty.
parameters:
+ ServiceName: # Here for compatibility with from_service.yaml
+ description: Name of the service to lookup
+ default: ''
+ type: string
NetworkName:
description: # Here for compatibility with isolated networks
default: ctlplane
@@ -13,8 +17,9 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
ControlPlaneNetwork:
description: The name of the undercloud Neutron control plane
diff --git a/network/ports/external.yaml b/network/ports/external.yaml
index 63e3eeb3..7624eb9f 100644
--- a/network/ports/external.yaml
+++ b/network/ports/external.yaml
@@ -13,8 +13,9 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
ControlPlaneNetwork: # Here for compatibility with ctlplane_vip.yaml
description: The name of the undercloud Neutron control plane
@@ -26,6 +27,12 @@ parameters:
[{'ip_address':'1.2.3.4'}]
default: []
type: json
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
resources:
diff --git a/network/ports/external_from_pool.yaml b/network/ports/external_from_pool.yaml
new file mode 100644
index 00000000..8e9dc7c2
--- /dev/null
+++ b/network/ports/external_from_pool.yaml
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs
+
+parameters:
+ ExternalNetName:
+ description: Name of the external network
+ default: external
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ ExternalNetCidr:
+ default: '10.0.0.0/24'
+ description: Cidr for the external network.
+ type: string
+
+outputs:
+ ip_address:
+ description: external network IP
+ value: {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the external network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {get_param: [ExternalNetCidr, -2]}
+ - {get_param: [ExternalNetCidr, -1]}
diff --git a/network/ports/from_service.yaml b/network/ports/from_service.yaml
new file mode 100644
index 00000000..6b669f41
--- /dev/null
+++ b/network/ports/from_service.yaml
@@ -0,0 +1,34 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a service mapped list of IPs
+
+parameters:
+ ServiceName:
+ description: Name of the service to lookup
+ default: ''
+ type: string
+ NetworkName: # Here for compatability with ctlplane_vip.yaml
+ description: Name of the network where the VIP will be created
+ default: ctlplane
+ type: string
+ PortName: # Here for compatability with ctlplane_vip.yaml
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with ctlplane_vip.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ ControlPlaneNetwork: # Here for compatability with ctlplane_vip.yaml
+ description: The name of the undercloud Neutron control plane
+ default: ctlplane
+ type: string
+ ServiceVips:
+ default: {}
+ type: json
+
+outputs:
+ ip_address:
+ description: network IP
+ value: {get_param: [ServiceVips, {get_param: ServiceName}]}
diff --git a/network/ports/internal_api.yaml b/network/ports/internal_api.yaml
index da1b1856..f84e8f71 100644
--- a/network/ports/internal_api.yaml
+++ b/network/ports/internal_api.yaml
@@ -12,9 +12,22 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
+ FixedIPs:
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
resources:
@@ -23,6 +36,7 @@ resources:
properties:
network: {get_param: InternalApiNetName}
name: {get_param: PortName}
+ fixed_ips: {get_param: FixedIPs}
replacement_policy: AUTO
outputs:
diff --git a/network/ports/internal_api_from_pool.yaml b/network/ports/internal_api_from_pool.yaml
new file mode 100644
index 00000000..b98e1fb1
--- /dev/null
+++ b/network/ports/internal_api_from_pool.yaml
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs
+
+parameters:
+ InternalApiNetName:
+ description: Name of the internal API network
+ default: internal_api
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ InternalApiNetCidr:
+ default: '172.16.2.0/24'
+ description: Cidr for the internal API network.
+ type: string
+
+outputs:
+ ip_address:
+ description: internal API network IP
+ value: {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the internal API network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {get_param: [InternalApiNetCidr, -2]}
+ - {get_param: [InternalApiNetCidr, -1]}
diff --git a/network/ports/management.yaml b/network/ports/management.yaml
new file mode 100644
index 00000000..1d15ca60
--- /dev/null
+++ b/network/ports/management.yaml
@@ -0,0 +1,42 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Creates a port on the management network. The IP address will be chosen
+ automatically if FixedIPs is empty.
+
+parameters:
+ ManagementNetName:
+ description: Name of the management neutron network
+ default: management
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatibility with noop.yaml
+ description: IP address on the control plane
+ type: string
+
+resources:
+
+ ManagementPort:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: ManagementNetName}
+ name: {get_param: PortName}
+ replacement_policy: AUTO
+
+outputs:
+ ip_address:
+ description: management network IP
+ value: {get_attr: [ManagementPort, fixed_ips, 0, ip_address]}
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the management network IP
+ value:
+ list_join:
+ - ''
+ - - {get_attr: [ManagementPort, fixed_ips, 0, ip_address]}
+ - '/'
+ - {get_attr: [ManagementPort, subnets, 0, cidr, -2]}
+ - {get_attr: [ManagementPort, subnets, 0, cidr, -1]}
diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml
index 257d3f9b..32272bd6 100644
--- a/network/ports/net_ip_list_map.yaml
+++ b/network/ports/net_ip_list_map.yaml
@@ -19,6 +19,9 @@ parameters:
TenantIpList:
default: []
type: comma_delimited_list
+ ManagementIpList:
+ default: []
+ type: comma_delimited_list
outputs:
net_ip_map:
@@ -32,3 +35,4 @@ outputs:
storage: {get_param: StorageIpList}
storage_mgmt: {get_param: StorageMgmtIpList}
tenant: {get_param: TenantIpList}
+ management: {get_param: ManagementIpList}
diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml
index 7aaed160..c6386025 100644
--- a/network/ports/net_ip_map.yaml
+++ b/network/ports/net_ip_map.yaml
@@ -19,6 +19,9 @@ parameters:
TenantIp:
default: ''
type: string
+ ManagementIp:
+ default: ''
+ type: string
outputs:
net_ip_map:
@@ -32,3 +35,4 @@ outputs:
storage: {get_param: StorageIp}
storage_mgmt: {get_param: StorageMgmtIp}
tenant: {get_param: TenantIp}
+ management: {get_param: ManagementIp}
diff --git a/network/ports/net_ip_subnet_map.yaml b/network/ports/net_ip_subnet_map.yaml
index cf59adb3..2f933eaa 100644
--- a/network/ports/net_ip_subnet_map.yaml
+++ b/network/ports/net_ip_subnet_map.yaml
@@ -19,6 +19,9 @@ parameters:
TenantIpSubnet:
default: ''
type: string
+ ManagementIpSubnet:
+ default: ''
+ type: string
ControlPlaneSubnetCidr: # Override this via parameter_defaults
default: '24'
description: The subnet CIDR of the control plane network.
@@ -41,3 +44,4 @@ outputs:
storage: {get_param: StorageIpSubnet}
storage_mgmt: {get_param: StorageMgmtIpSubnet}
tenant: {get_param: TenantIpSubnet}
+ management: {get_param: ManagementIpSubnet}
diff --git a/network/ports/net_vip_map_external.yaml b/network/ports/net_vip_map_external.yaml
new file mode 100644
index 00000000..23e1f992
--- /dev/null
+++ b/network/ports/net_vip_map_external.yaml
@@ -0,0 +1,50 @@
+heat_template_version: 2015-04-30
+
+parameters:
+ # Set these via parameter defaults to configure external VIPs
+ ControlPlaneIP:
+ default: ''
+ type: string
+ ExternalNetworkVip:
+ default: ''
+ type: string
+ InternalApiNetworkVip:
+ default: ''
+ type: string
+ StorageNetworkVip:
+ default: ''
+ type: string
+ StorageMgmtNetworkVip:
+ default: ''
+ type: string
+ # The following are unused in this template
+ ControlPlaneIp:
+ default: ''
+ type: string
+ ExternalIp:
+ default: ''
+ type: string
+ InternalApiIp:
+ default: ''
+ type: string
+ StorageIp:
+ default: ''
+ type: string
+ StorageMgmtIp:
+ default: ''
+ type: string
+ TenantIp:
+ default: ''
+ type: string
+
+outputs:
+ net_ip_map:
+ description: >
+ A Hash containing a mapping of network names to assigned IPs
+ for a specific machine.
+ value:
+ ctlplane: {get_param: ControlPlaneIP}
+ external: {get_param: ExternalNetworkVip}
+ internal_api: {get_param: InternalApiNetworkVip}
+ storage: {get_param: StorageNetworkVip}
+ storage_mgmt: {get_param: StorageMgmtNetworkVip}
diff --git a/network/ports/noop.yaml b/network/ports/noop.yaml
index 31ee6f3c..ac946cd9 100644
--- a/network/ports/noop.yaml
+++ b/network/ports/noop.yaml
@@ -4,6 +4,10 @@ description: >
Returns the control plane port (provisioning network) as the ip_address.
parameters:
+ ServiceName: # Here for compatibility with from_service.yaml
+ description: Name of the service to lookup
+ default: ''
+ type: string
ControlPlaneIP:
description: IP address on the control plane
type: string
@@ -16,7 +20,7 @@ parameters:
default: ''
type: string
NetworkName:
- description: # Here for compatability with vip.yaml
+ description: # Here for compatibility with vip.yaml
default: ''
type: string
FixedIPs:
@@ -27,6 +31,14 @@ parameters:
default: '24'
description: The subnet CIDR of the control plane network.
type: string
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
outputs:
ip_address:
diff --git a/network/ports/storage.yaml b/network/ports/storage.yaml
index ecb20b8f..a07e5a4f 100644
--- a/network/ports/storage.yaml
+++ b/network/ports/storage.yaml
@@ -12,9 +12,22 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
+ FixedIPs:
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
resources:
@@ -23,6 +36,7 @@ resources:
properties:
network: {get_param: StorageNetName}
name: {get_param: PortName}
+ fixed_ips: {get_param: FixedIPs}
replacement_policy: AUTO
outputs:
@@ -39,4 +53,3 @@ outputs:
- '/'
- {get_attr: [StoragePort, subnets, 0, cidr, -2]}
- {get_attr: [StoragePort, subnets, 0, cidr, -1]}
-
diff --git a/network/ports/storage_from_pool.yaml b/network/ports/storage_from_pool.yaml
new file mode 100644
index 00000000..668bc6f6
--- /dev/null
+++ b/network/ports/storage_from_pool.yaml
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs
+
+parameters:
+ StorageNetName:
+ description: Name of the storage network
+ default: storage
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ StorageNetCidr:
+ default: '172.16.1.0/24'
+ description: Cidr for the storage network.
+ type: string
+
+outputs:
+ ip_address:
+ description: storage network IP
+ value: {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the storage network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {get_param: [StorageNetCidr, -2]}
+ - {get_param: [StorageNetCidr, -1]}
diff --git a/network/ports/storage_mgmt.yaml b/network/ports/storage_mgmt.yaml
index 2ab39f21..4890bf5a 100644
--- a/network/ports/storage_mgmt.yaml
+++ b/network/ports/storage_mgmt.yaml
@@ -12,9 +12,22 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
+ FixedIPs:
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
resources:
@@ -23,6 +36,7 @@ resources:
properties:
network: {get_param: StorageMgmtNetName}
name: {get_param: PortName}
+ fixed_ips: {get_param: FixedIPs}
replacement_policy: AUTO
outputs:
diff --git a/network/ports/storage_mgmt_from_pool.yaml b/network/ports/storage_mgmt_from_pool.yaml
new file mode 100644
index 00000000..bea87105
--- /dev/null
+++ b/network/ports/storage_mgmt_from_pool.yaml
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs
+
+parameters:
+ StorageMgmtNetName:
+ description: Name of the storage MGMT network
+ default: storage_mgmt
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ StorageMgmtNetCidr:
+ default: '172.16.3.0/24'
+ description: Cidr for the storage MGMT network.
+ type: string
+
+outputs:
+ ip_address:
+ description: storage MGMT network IP
+ value: {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the storage MGMT network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {get_param: [StorageMgmtNetCidr, -2]}
+ - {get_param: [StorageMgmtNetCidr, -1]}
diff --git a/network/ports/tenant.yaml b/network/ports/tenant.yaml
index aae12d46..86c58f2f 100644
--- a/network/ports/tenant.yaml
+++ b/network/ports/tenant.yaml
@@ -12,9 +12,22 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
+ FixedIPs:
+ description: >
+ Control the IP allocation for the VIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ default: []
+ type: json
+ IPPool: # Here for compatibility with from_pool.yaml
+ default: {}
+ type: json
+ NodeIndex: # Here for compatibility with from_pool.yaml
+ default: 0
+ type: number
resources:
@@ -23,6 +36,7 @@ resources:
properties:
network: {get_param: TenantNetName}
name: {get_param: PortName}
+ fixed_ips: {get_param: FixedIPs}
replacement_policy: AUTO
outputs:
diff --git a/network/ports/tenant_from_pool.yaml b/network/ports/tenant_from_pool.yaml
new file mode 100644
index 00000000..29303bb6
--- /dev/null
+++ b/network/ports/tenant_from_pool.yaml
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Returns an IP from a network mapped list of IPs
+
+parameters:
+ TenantNetName:
+ description: Name of the tenant network
+ default: tenant
+ type: string
+ PortName:
+ description: Name of the port
+ default: ''
+ type: string
+ ControlPlaneIP: # Here for compatability with noop.yaml
+ description: IP address on the control plane
+ default: ''
+ type: string
+ IPPool:
+ default: {}
+ description: A network mapped list of IPs
+ type: json
+ NodeIndex:
+ default: 0
+ description: Index of the IP to get from Pool
+ type: number
+ TenantNetCidr:
+ default: '172.16.0.0/24'
+ description: Cidr for the tenant network.
+ type: string
+
+outputs:
+ ip_address:
+ description: tenant network IP
+ value: {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
+ ip_subnet:
+ # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+ description: IP/Subnet CIDR for the tenant network IP
+ value:
+ list_join:
+ - ''
+ - - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
+ - '/'
+ - {get_param: [TenantNetCidr, -2]}
+ - {get_param: [TenantNetCidr, -1]}
diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml
index 299579dc..9bb6cde2 100644
--- a/network/ports/vip.yaml
+++ b/network/ports/vip.yaml
@@ -5,6 +5,10 @@ description: >
The IP address will be chosen automatically if FixedIPs is empty.
parameters:
+ ServiceName: # Here for compatibility with from_service.yaml
+ description: Name of the service to lookup
+ default: ''
+ type: string
NetworkName:
description: Name of the network where the VIP will be created
default: internal_api
@@ -13,8 +17,9 @@ parameters:
description: Name of the port
default: ''
type: string
- ControlPlaneIP: # Here for compatability with noop.yaml
+ ControlPlaneIP: # Here for compatibility with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
ControlPlaneNetwork:
description: The name of the undercloud Neutron control plane
diff --git a/network/storage.yaml b/network/storage.yaml
index 60b779e0..dc9f35ea 100644
--- a/network/storage.yaml
+++ b/network/storage.yaml
@@ -15,7 +15,7 @@ parameters:
type: json
StorageNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: This admin state of the network.
type: boolean
StorageNetEnableDHCP:
default: false
diff --git a/network/storage_mgmt.yaml b/network/storage_mgmt.yaml
index 043bc87b..59933c8c 100644
--- a/network/storage_mgmt.yaml
+++ b/network/storage_mgmt.yaml
@@ -15,7 +15,7 @@ parameters:
type: json
StorageMgmtNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: This admin state of the network.
type: boolean
StorageMgmtNetEnableDHCP:
default: false
diff --git a/network/tenant.yaml b/network/tenant.yaml
index daf5cb75..6fe96121 100644
--- a/network/tenant.yaml
+++ b/network/tenant.yaml
@@ -15,7 +15,7 @@ parameters:
type: json
TenantNetAdminStateUp:
default: false
- description: This admin state of of the network.
+ description: This admin state of the network.
type: boolean
TenantNetEnableDHCP:
default: false
diff --git a/os-apply-config/all-nodes-config.yaml b/os-apply-config/all-nodes-config.yaml
deleted file mode 100644
index 3f0bd61c..00000000
--- a/os-apply-config/all-nodes-config.yaml
+++ /dev/null
@@ -1,93 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'All Nodes Config'
-
-parameters:
- compute_hosts:
- type: comma_delimited_list
- controller_hosts:
- type: comma_delimited_list
- controller_ips:
- type: comma_delimited_list
- block_storage_hosts:
- type: comma_delimited_list
- object_storage_hosts:
- type: comma_delimited_list
- ceph_storage_hosts:
- type: comma_delimited_list
- controller_names:
- type: comma_delimited_list
- rabbit_node_ips:
- type: comma_delimited_list
- mongo_node_ips:
- type: comma_delimited_list
- redis_node_ips:
- type: comma_delimited_list
- memcache_node_ips:
- type: comma_delimited_list
- mysql_node_ips:
- type: comma_delimited_list
- horizon_node_ips:
- type: comma_delimited_list
- heat_api_node_ips:
- type: comma_delimited_list
- swift_proxy_node_ips:
- type: comma_delimited_list
- ceilometer_api_node_ips:
- type: comma_delimited_list
- nova_api_node_ips:
- type: comma_delimited_list
- nova_metadata_node_ips:
- type: comma_delimited_list
- glance_api_node_ips:
- type: comma_delimited_list
- glance_registry_node_ips:
- type: comma_delimited_list
- cinder_api_node_ips:
- type: comma_delimited_list
- neutron_api_node_ips:
- type: comma_delimited_list
- keystone_public_api_node_ips:
- type: comma_delimited_list
- keystone_admin_api_node_ips:
- type: comma_delimited_list
-
-resources:
-
- allNodesConfigImpl:
- type: OS::Heat::StructuredConfig
- properties:
- config:
- completion-signal: {get_input: deploy_signal_id}
- hosts:
- list_join:
- - "\n"
- - - list_join:
- - "\n"
- - {get_param: compute_hosts}
- - list_join:
- - "\n"
- - {get_param: controller_hosts}
- - list_join:
- - "\n"
- - {get_param: block_storage_hosts}
- - list_join:
- - "\n"
- - {get_param: object_storage_hosts}
- - list_join:
- - "\n"
- - {get_param: ceph_storage_hosts}
- rabbit:
- nodes:
- list_join:
- - ','
- - {get_param: controller_names}
- sysctl:
- net.ipv4.tcp_keepalive_time: 5
- net.ipv4.tcp_keepalive_probes: 5
- net.ipv4.tcp_keepalive_intvl: 1
-
-outputs:
- config_id:
- description: The ID of the allNodesConfigImpl resource.
- value:
- {get_resource: allNodesConfigImpl}
diff --git a/os-apply-config/ceph-cluster-config.yaml b/os-apply-config/ceph-cluster-config.yaml
deleted file mode 100644
index c3cf8e8a..00000000
--- a/os-apply-config/ceph-cluster-config.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Ceph Cluster config data'
-
-parameters:
- ceph_storage_count:
- default: 0
- type: number
- description: Number of Ceph storage nodes. Used to enable/disable managed Ceph installation.
- ceph_external_mon_ips:
- default: ''
- type: string
- description: List of external Ceph Mon host IPs.
- ceph_client_key:
- default: ''
- type: string
- description: Ceph key used to create the 'openstack' user keyring.
- ceph_fsid:
- default: ''
- type: string
- ceph_admin_key:
- default: ''
- type: string
- ceph_mon_key:
- default: ''
- type: string
- ceph_mon_names:
- type: comma_delimited_list
- ceph_mon_ips:
- type: comma_delimited_list
-
-resources:
- CephClusterConfigImpl:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- ceph_cluster:
- mon_initial_members:
- list_join:
- - ','
- - {get_param: ceph_mon_names}
- mon_host:
- list_join:
- - ','
- - {get_param: ceph_mon_ips}
- fsid: {get_param: ceph_fsid}
- admin_key: {get_param: ceph_admin_key}
- mon_key: {get_param: ceph_mon_key}
- # We would need a dedicated key for OSD bootstrap
- bootstrap_osd_key: {get_param: ceph_mon_key}
- osds: '{"/srv/data": {}}'
-
-outputs:
- config_id:
- description: The ID of the CephClusterConfigImpl resource.
- value:
- {get_resource: CephClusterConfigImpl}
diff --git a/os-apply-config/ceph-storage-post.yaml b/os-apply-config/ceph-storage-post.yaml
deleted file mode 100644
index 734f90bd..00000000
--- a/os-apply-config/ceph-storage-post.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Ceph Storage Post Deployment'
-# NOTE: this is a noop for os-apply-config style deployments because
-# post deployment ordering is controlled by tripleo-image-elements
-
-parameters:
- servers:
- type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
-
-resources:
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ExtraConfig:
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
-
diff --git a/os-apply-config/ceph-storage.yaml b/os-apply-config/ceph-storage.yaml
deleted file mode 100644
index fc321d88..00000000
--- a/os-apply-config/ceph-storage.yaml
+++ /dev/null
@@ -1,172 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Common Ceph Storage Configuration'
-parameters:
- Image:
- type: string
- default: overcloud-ceph-storage
- KeyName:
- default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
- type: string
- Flavor:
- default: baremetal
- description: Flavor for block storage nodes to request when deploying.
- type: string
- CephClusterFSID:
- default: ''
- type: string
- description: The Ceph cluster FSID. Must be a UUID.
- CephMonKey:
- default: ''
- description: The Ceph monitors key. Can be created with ceph-authtool --gen-print-key.
- type: string
- hidden: true
- CephAdminKey:
- default: ''
- description: The Ceph admin client key. Can be created with ceph-authtool --gen-print-key.
- type: string
- hidden: true
- CephMonitors:
- default: ''
- description: The list of ip/names to use as Ceph monitors
- type: json
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry.
- type: json
- UpdateIdentifier:
- default: ''
- type: string
- description: >
- Setting to a previously unused value during stack-update will trigger
- package update on all nodes
- Hostname:
- type: string
- default: '' # Defaults to Heat created hostname
- ExtraConfig:
- default: {}
- description: |
- Additional configuration to inject into the cluster. Note
- that CephStorageExtraConfig takes precedence over ExtraConfig.
- type: json
- CephStorageExtraConfig:
- default: {}
- description: |
- Role specific additional configuration to inject into the cluster.
- type: json
-
-
-resources:
- CephStorage:
- type: OS::Nova::Server
- properties:
- image:
- {get_param: Image}
- flavor: {get_param: OvercloudCephStorageFlavor}
- key_name: {get_param: KeyName}
- networks:
- - network: ctlplane
- user_data_format: SOFTWARE_CONFIG
- user_data: {get_resource: NodeUserData}
- name: {get_param: Hostname}
-
- NodeUserData:
- type: OS::TripleO::NodeUserData
-
- StoragePort:
- type: OS::TripleO::CephStorage::Ports::StoragePort
- properties:
- ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
-
- StorageMgmtPort:
- type: OS::TripleO::CephStorage::Ports::StorageMgmtPort
- properties:
- ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
-
- NetworkConfig:
- type: OS::TripleO::CephStorage::Net::SoftwareConfig
- properties:
- ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
- StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
- StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
-
- NetworkDeployment:
- type: OS::TripleO::SoftwareDeployment
- properties:
- config: {get_resource: NetworkConfig}
- server: {get_resource: CephStorage}
-
- CephStorageDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- server: {get_resource: CephStorage}
- config: {get_resource: CephStorageConfig}
- signal_transport: NO_SIGNAL
- CephStorageConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- ceph:
- fsid: {get_param: CephClusterFSID}
- keyrings:
- admin:
- key: {get_param: CephAdminKey}
- mon_nodes: {get_param: CephMonitors}
- ControllerCephDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- server: {get_resource: Controller}
- config: {get_resource: ControllerCephConfig}
- signal_transport: NO_SIGNAL
- ControllerCephConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- cinder:
- include_ceph_backend: true
- ceph:
- fsid: {get_param: CephClusterFSID}
- keyrings:
- mon:
- key: {get_param: CephMonKey}
- admin:
- key: {get_param: CephAdminKey}
- mon_nodes: {get_param: CephMonitors}
- NovaComputeCephDeployment:
- depends_on: [ControllerCephDeployment]
- type: OS::Heat::StructuredDeployment
- properties:
- server: {get_resource: NovaCompute}
- config: {get_resource: NovaComputeCephConfig}
- signal_transport: NO_SIGNAL
- NovaComputeCephConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- ceph:
- fsid: {get_param: CephClusterFSID}
- keyrings:
- admin:
- key: {get_param: CephAdminKey}
- mon_nodes: {get_param: CephMonitors}
-outputs:
- hosts_entry:
- value:
- str_replace:
- template: "IP HOST"
- params:
- IP: {get_attr: [CephStorage, networks, ctlplane, 0]}
- HOST: {get_attr: [CephStorage, name]}
- storage_ip_address:
- description: IP address of the server in the storage network
- value: {get_attr: [StoragePort, ip_address]}
- storage_mgmt_ip_address:
- description: IP address of the server in the storage_mgmt network
- value: {get_attr: [StorageMgmtPort, ip_address]}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value: "None - NO_SIGNAL"
diff --git a/os-apply-config/cinder-storage-post.yaml b/os-apply-config/cinder-storage-post.yaml
deleted file mode 100644
index ad4e0460..00000000
--- a/os-apply-config/cinder-storage-post.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Common Block Storage Post Deployment'
-# NOTE: this is a noop for os-apply-config style deployments because
-# post deployment ordering is controlled by tripleo-image-elements
-
-parameters:
- servers:
- type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
-
-resources:
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ExtraConfig:
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
-
diff --git a/os-apply-config/cinder-storage.yaml b/os-apply-config/cinder-storage.yaml
deleted file mode 100644
index 7f1164c4..00000000
--- a/os-apply-config/cinder-storage.yaml
+++ /dev/null
@@ -1,234 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Common Block Storage Configuration'
-parameters:
- Image:
- default: overcloud-cinder-volume
- type: string
- CinderEnableIscsiBackend:
- default: true
- description: Whether to enable or not the Iscsi backend for Cinder
- type: boolean
- CinderISCSIHelper:
- default: tgtadm
- description: The iSCSI helper to use with cinder.
- type: string
- CinderLVMLoopDeviceSize:
- default: 5000
- description: The size of the loopback file used by the cinder LVM driver.
- type: number
- CinderPassword:
- default: unset
- description: The password for the cinder service and db account, used by cinder-api.
- type: string
- hidden: true
- VirtualIP:
- default: ''
- type: string
- ExtraConfig:
- default: {}
- description: |
- Additional configuration to inject into the cluster. The JSON should have
- the following structure:
- {"FILEKEY":
- {"config":
- [{"section": "SECTIONNAME",
- "values":
- [{"option": "OPTIONNAME",
- "value": "VALUENAME"
- }
- ]
- }
- ]
- }
- }
- For instance:
- {"nova":
- {"config":
- [{"section": "default",
- "values":
- [{"option": "force_config_drive",
- "value": "always"
- }
- ]
- },
- {"section": "cells",
- "values":
- [{"option": "driver",
- "value": "nova.cells.rpc_driver.CellsRPCDriver"
- }
- ]
- }
- ]
- }
- }
- type: json
- BlockStorageExtraConfig:
- default: {}
- description: |
- Role specific additional configuration to inject into the cluster.
- type: json
- Flavor:
- description: Flavor for block storage nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- GlancePort:
- default: "9292"
- description: Glance port.
- type: string
- GlanceProtocol:
- default: http
- description: Protocol to use when connecting to glance, set to https for SSL.
- type: string
- KeyName:
- default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
- type: string
- RabbitPassword:
- default: 'guest'
- type: string
- hidden: true
- RabbitUserName:
- default: 'guest'
- type: string
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- default: unset
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
- UpdateIdentifier:
- default: ''
- type: string
- description: >
- Setting to a previously unused value during stack-update will trigger
- package update on all nodes
- Hostname:
- type: string
- default: '' # Defaults to Heat created hostname
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry.
- type: json
- GlanceApiVirtualIP:
- type: string
- default: ''
- MysqlVirtualIP:
- type: string
- default: ''
-
-resources:
- BlockStorage:
- type: OS::Nova::Server
- properties:
- image:
- {get_param: Image}
- flavor: {get_param: Flavor}
- key_name: {get_param: KeyName}
- networks:
- - network: ctlplane
- user_data_format: SOFTWARE_CONFIG
- user_data: {get_resource: NodeUserData}
- name: {get_param: Hostname}
-
- NodeUserData:
- type: OS::TripleO::NodeUserData
-
- InternalApiPort:
- type: OS::TripleO::BlockStorage::Ports::InternalApiPort
- properties:
- ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
-
- StoragePort:
- type: OS::TripleO::BlockStorage::Ports::StoragePort
- properties:
- ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
-
- StorageMgmtPort:
- type: OS::TripleO::BlockStorage::Ports::StorageMgmtPort
- properties:
- ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
-
- NetworkConfig:
- type: OS::TripleO::BlockStorage::Net::SoftwareConfig
- properties:
- ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]}
- InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
- StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
- StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
-
- NetworkDeployment:
- type: OS::TripleO::SoftwareDeployment
- properties:
- config: {get_resource: NetworkConfig}
- server: {get_resource: BlockStorage}
-
- BlockStorageDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- server: {get_resource: BlockStorage}
- config: {get_resource: BlockStorageConfig}
- input_values:
- controller_virtual_ip: {get_param: VirtualIP}
- cinder_dsn: {list_join: ['', ['mysql://cinder:', {get_param: CinderPassword}, '@', {get_param: VirtualIP} , '/cinder']]}
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
- signal_transport: NO_SIGNAL
- BlockStorageConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- keystone:
- host: {get_input: controller_virtual_ip}
- cinder:
- db: {get_input: cinder_dsn}
- volume_size_mb:
- get_param: CinderLVMLoopDeviceSize
- iscsi-helper:
- get_param: CinderISCSIHelper
- snmpd:
- export_MIB: UCD-SNMP-MIB
- readonly_user_name: {get_input: snmpd_readonly_user_name}
- readonly_user_password: {get_input: snmpd_readonly_user_password}
- rabbit:
- host: {get_input: controller_virtual_ip}
- username: {get_param: RabbitUserName}
- password: {get_param: RabbitPassword}
- glance:
- host: {get_input: controller_virtual_ip}
- port: {get_param: GlancePort}
-outputs:
- hosts_entry:
- value:
- str_replace:
- template: "IP HOST"
- params:
- IP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
- HOST: {get_attr: [BlockStorage, name]}
- internal_api_ip_address:
- description: IP address of the server in the internal_api network
- value: {get_attr: [InternalApiPort, ip_address]}
- storage_ip_address:
- description: IP address of the server in the storage network
- value: {get_attr: [StoragePort, ip_address]}
- storage_mgmt_ip_address:
- description: IP address of the server in the storage_mgmt network
- value: {get_attr: [StorageMgmtPort, ip_address]}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value: "None - NO_SIGNAL"
diff --git a/os-apply-config/compute-post.yaml b/os-apply-config/compute-post.yaml
deleted file mode 100644
index 695690d4..00000000
--- a/os-apply-config/compute-post.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Compute Post Deployment'
-# NOTE: this is a noop for os-apply-config style deployments because
-# post deployment ordering is controlled by tripleo-image-elements
-
-parameters:
- servers:
- type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
-
-resources:
-
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ExtraConfig:
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
-
diff --git a/os-apply-config/compute.yaml b/os-apply-config/compute.yaml
deleted file mode 100644
index ee55c587..00000000
--- a/os-apply-config/compute.yaml
+++ /dev/null
@@ -1,562 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
- OpenStack hypervisor node. Can be wrapped in a ResourceGroup for scaling.
-
-parameters:
- AdminPassword:
- default: unset
- description: The password for the keystone admin account, used for monitoring, querying neutron etc.
- type: string
- hidden: true
- CeilometerComputeAgent:
- description: Indicates whether the Compute agent is present and expects nova-compute to be configured accordingly
- type: string
- default: ''
- constraints:
- - allowed_values: ['', Present]
- CeilometerMeteringSecret:
- default: unset
- description: Secret shared by the ceilometer services.
- type: string
- hidden: true
- CeilometerPassword:
- default: unset
- description: The password for the ceilometer service account.
- type: string
- hidden: true
- CinderEnableNfsBackend:
- default: false
- description: Whether to enable or not the NFS backend for Cinder
- type: boolean
- CinderEnableRbdBackend:
- default: false
- description: Whether to enable or not the Rbd backend for Cinder
- type: boolean
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- ExtraConfig:
- default: {}
- description: |
- Additional configuration to inject into the cluster. The JSON should have
- the following structure:
- {"FILEKEY":
- {"config":
- [{"section": "SECTIONNAME",
- "values":
- [{"option": "OPTIONNAME",
- "value": "VALUENAME"
- }
- ]
- }
- ]
- }
- }
- For instance:
- {"nova":
- {"config":
- [{"section": "default",
- "values":
- [{"option": "force_config_drive",
- "value": "always"
- }
- ]
- },
- {"section": "cells",
- "values":
- [{"option": "driver",
- "value": "nova.cells.rpc_driver.CellsRPCDriver"
- }
- ]
- }
- ]
- }
- }
- type: json
- Flavor:
- description: Flavor for the nova compute node
- type: string
- constraints:
- - custom_constraint: nova.flavor
- GlanceHost:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- GlancePort:
- default: "9292"
- description: Glance port.
- type: string
- GlanceProtocol:
- default: http
- description: Protocol to use when connecting to glance, set to https for SSL.
- type: string
- Image:
- type: string
- default: overcloud-compute
- constraints:
- - custom_constraint: glance.image
- ImageUpdatePolicy:
- default: 'REBUILD_PRESERVE_EPHEMERAL'
- description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
- type: string
- KeyName:
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
- type: string
- default: default
- constraints:
- - custom_constraint: nova.keypair
- KeystoneAdminApiVirtualIP:
- type: string
- default: ''
- KeystonePublicApiVirtualIP:
- type: string
- default: ''
- NeutronBridgeMappings:
- description: >
- The OVS logical->physical bridge mappings to use. See the Neutron
- documentation for details. Defaults to mapping br-ex - the external
- bridge on hosts - to a physical name 'datacentre' which can be used
- to create provider networks (and we use this for the default floating
- network) - if changing this either use different post-install network
- scripts or be sure to keep 'datacentre' as a mapping network name.
- type: string
- default: "datacentre:br-ex"
- NeutronEnableTunnelling:
- type: string
- default: "True"
- NeutronFlatNetworks:
- type: string
- default: 'datacentre'
- description: >
- If set, flat networks to configure in neutron plugins.
- NeutronHost:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- NeutronNetworkType:
- type: string
- description: The tenant network type for Neutron, either gre or vxlan.
- default: 'vxlan'
- NeutronNetworkVLANRanges:
- default: 'datacentre'
- description: >
- The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
- Neutron documentation for permitted values. Defaults to permitting any
- VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
- type: comma_delimited_list
- NeutronPassword:
- default: unset
- description: The password for the neutron service account, used by neutron agents.
- type: string
- hidden: true
- NeutronPhysicalBridge:
- default: ''
- description: An OVS bridge to create for accessing external networks.
- type: string
- NeutronPublicInterface:
- default: nic1
- description: A port to add to the NeutronPhysicalBridge.
- type: string
- NeutronTunnelTypes:
- type: string
- description: |
- The tunnel types for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'gre,vxlan'
- default: 'vxlan'
- NeutronTunnelIdRanges:
- description: |
- Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
- of GRE tunnel IDs that are available for tenant network allocation
- default: ["1:1000", ]
- type: comma_delimited_list
- NeutronVniRanges:
- description: |
- Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges
- of VXLAN VNI IDs that are available for tenant network allocation
- default: ["1:1000", ]
- type: comma_delimited_list
- NeutronPublicInterfaceRawDevice:
- default: ''
- type: string
- NeutronDVR:
- default: 'False'
- type: string
- NeutronMetadataProxySharedSecret:
- default: 'unset'
- description: Shared secret to prevent spoofing
- type: string
- hidden: true
- NeutronCorePlugin:
- default: "ml2"
- description: |
- The core plugin for Neutron. The value should be the entrypoint to be loaded
- from neutron.core_plugins namespace.
- type: string
- NeutronServicePlugins:
- default: "router"
- description: |
- Comma-separated list of service plugin entrypoints to be loaded from the
- neutron.service_plugins namespace.
- type: comma_delimited_list
- NeutronTypeDrivers:
- default: "vxlan,vlan,flat,gre"
- description: |
- Comma-separated list of network type driver entrypoints to be loaded.
- type: comma_delimited_list
- NeutronMechanismDrivers:
- default: 'openvswitch'
- description: |
- The mechanism drivers for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'openvswitch,l2_population'
- type: string
- # Not relevant for Computes, should be removed
- NeutronAllowL3AgentFailover:
- default: 'True'
- description: Allow automatic l3-agent failover
- type: string
- # Not relevant for Computes, should be removed
- NeutronL3HA:
- default: 'False'
- description: Whether to enable l3-agent HA
- type: string
- NeutronAgentMode:
- default: 'dvr_snat'
- description: Agent mode for the neutron-l3-agent on the controller hosts
- type: string
- NovaApiHost:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- NovaComputeDriver:
- type: string
- default: libvirt.LibvirtDriver
- NovaComputeExtraConfig:
- default: {}
- description: |
- NovaCompute specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
- NovaComputeLibvirtType:
- type: string
- default: ''
- NovaEnableRbdBackend:
- default: false
- description: Whether to enable or not the Rbd backend for Nova
- type: boolean
- NovaPassword:
- default: unset
- description: The password for the nova service account, used by nova-api.
- type: string
- hidden: true
- NovaPublicIP:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- NtpServer:
- type: string
- default: ''
- RabbitHost:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- RabbitPassword:
- default: guest
- description: The password for RabbitMQ
- type: string
- hidden: true
- RabbitUserName:
- default: guest
- description: The username for RabbitMQ
- type: string
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- default: unset
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry.
- type: json
- UpdateIdentifier:
- default: ''
- type: string
- description: >
- Setting to a previously unused value during stack-update will trigger
- package update on all nodes
- Hostname:
- type: string
- default: '' # Defaults to Heat created hostname
-
-resources:
-
- NovaCompute:
- type: OS::Nova::Server
- properties:
- image:
- {get_param: Image}
- image_update_policy:
- get_param: ImageUpdatePolicy
- flavor: {get_param: Flavor}
- key_name: {get_param: KeyName}
- networks:
- - network: ctlplane
- user_data_format: SOFTWARE_CONFIG
- user_data: {get_resource: NodeUserData}
- name: {get_param: Hostname}
-
- NodeUserData:
- type: OS::TripleO::NodeUserData
-
- InternalApiPort:
- type: OS::TripleO::Compute::Ports::InternalApiPort
- properties:
- ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
-
- StoragePort:
- type: OS::TripleO::Compute::Ports::StoragePort
- properties:
- ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
-
- TenantPort:
- type: OS::TripleO::Compute::Ports::TenantPort
- properties:
- ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
-
- NetworkConfig:
- type: OS::TripleO::Compute::Net::SoftwareConfig
- properties:
- ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]}
- InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
- StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
- TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
-
- NetworkDeployment:
- type: OS::TripleO::SoftwareDeployment
- properties:
- signal_transport: NO_SIGNAL
- config: {get_resource: NetworkConfig}
- server: {get_resource: NovaCompute}
- input_values:
- bridge_name: {get_param: NeutronPhysicalBridge}
- interface_name: {get_param: NeutronPublicInterface}
-
- NovaComputeConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- nova:
- compute_driver: { get_input: nova_compute_driver }
- compute_libvirt_type: { get_input: nova_compute_libvirt_type }
- debug: {get_input: debug}
- host: {get_input: nova_api_host}
- public_ip: {get_input: nova_public_ip}
- service-password: {get_input: nova_password}
- ceilometer:
- debug: {get_input: debug}
- metering_secret: {get_input: ceilometer_metering_secret}
- service-password: {get_input: ceilometer_password}
- compute_agent: {get_input: ceilometer_compute_agent}
- snmpd:
- export_MIB: UCD-SNMP-MIB
- readonly_user_name: {get_input: snmpd_readonly_user_name}
- readonly_user_password: {get_input: snmpd_readonly_user_password}
- glance:
- debug: {get_input: debug}
- host: {get_input: glance_host}
- port: {get_input: glance_port}
- protocol: {get_input: glance_protocol}
- keystone:
- debug: {get_input: debug}
- host: {get_input: keystone_host}
- neutron:
- debug: {get_input: debug}
- flat-networks: {get_input: neutron_flat_networks}
- host: {get_input: neutron_host}
- router_distributed: {get_input: neutron_router_distributed}
- agent_mode: {get_input: neutron_agent_mode}
- ovs_db: {get_input: neutron_dsn}
- metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
- core_plugin: {get_input: neutron_core_plugin}
- service_plugins: {get_input: neutron_service_plugins}
- type_drivers: {get_input: neutron_type_drivers}
- mechanism_drivers: {get_input: neutron_mechanism_drivers}
- allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover}
- l3_ha: {get_input: neutron_l3_ha}
- ovs:
- local_ip: {get_input: neutron_local_ip}
- tenant_network_type: {get_input: neutron_tenant_network_type}
- tunnel_types: {get_input: neutron_tunnel_types}
- network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
- tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
- vni_ranges: {get_input: neutron_vni_ranges}
- bridge_mappings: {get_input: neutron_bridge_mappings}
- enable_tunneling: {get_input: neutron_enable_tunneling}
- physical_bridge: {get_input: neutron_physical_bridge}
- public_interface: {get_input: neutron_public_interface}
- public_interface_raw_device: {get_input: neutron_public_interface_raw_device}
- service-password: {get_input: neutron_password}
- admin-password: {get_input: admin_password}
- rabbit:
- host: {get_input: rabbit_host}
- username: {get_input: rabbit_username}
- password: {get_input: rabbit_password}
- ntp:
- servers:
- - {server: {get_input: ntp_server}}
-
- NovaComputeDeployment:
- type: OS::TripleO::SoftwareDeployment
- properties:
- signal_transport: NO_SIGNAL
- config: {get_resource: NovaComputeConfig}
- server: {get_resource: NovaCompute}
- input_values:
- debug: {get_param: Debug}
- nova_compute_driver: {get_param: NovaComputeDriver}
- nova_compute_libvirt_type: {get_param: NovaComputeLibvirtType}
- nova_public_ip: {get_param: NovaPublicIP}
- nova_api_host: {get_param: NovaApiHost}
- nova_password: {get_param: NovaPassword}
- ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
- ceilometer_password: {get_param: CeilometerPassword}
- ceilometer_compute_agent: {get_param: CeilometerComputeAgent}
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
- glance_host: {get_param: GlanceHost}
- glance_port: {get_param: GlancePort}
- glance_protocol: {get_param: GlanceProtocol}
- keystone_host: {get_param: KeystonePublicApiVirtualIP}
- neutron_flat_networks: {get_param: NeutronFlatNetworks}
- neutron_host: {get_param: NeutronHost}
- neutron_local_ip: {get_attr: [NovaCompute, networks, ctlplane, 0]}
- neutron_tenant_network_type: {get_param: NeutronNetworkType}
- neutron_tunnel_types: {get_param: NeutronTunnelTypes}
- neutron_tunnel_id_ranges:
- str_replace:
- template: "['RANGES']"
- params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronTunnelIdRanges}
- neutron_vni_ranges:
- str_replace:
- template: "['RANGES']"
- params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronVniRanges}
- neutron_network_vlan_ranges: {get_param: NeutronNetworkVLANRanges}
- neutron_bridge_mappings: {get_param: NeutronBridgeMappings}
- neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
- neutron_physical_bridge: {get_param: NeutronPhysicalBridge}
- neutron_public_interface: {get_param: NeutronPublicInterface}
- neutron_password: {get_param: NeutronPassword}
- neutron_agent_mode: {get_param: NeutronAgentMode}
- neutron_router_distributed: {get_param: NeutronDVR}
- neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
- neutron_core_plugin: {get_param: NeutronCorePlugin}
- neutron_service_plugins:
- str_replace:
- template: "['PLUGINS']"
- params:
- PLUGINS:
- list_join:
- - "','"
- - {get_param: NeutronServicePlugins}
- neutron_type_drivers:
- str_replace:
- template: "['DRIVERS']"
- params:
- DRIVERS:
- list_join:
- - "','"
- - {get_param: NeutronTypeDrivers}
- neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers}
- neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
- neutron_l3_ha: {get_param: NeutronL3HA}
- neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
- admin_password: {get_param: AdminPassword}
- rabbit_host: {get_param: RabbitHost}
- rabbit_username: {get_param: RabbitUserName}
- rabbit_password: {get_param: RabbitPassword}
- ntp_server: {get_param: NtpServer}
-
- NovaComputePassthrough:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config: {get_input: passthrough_config}
-
- NovaComputePassthroughSpecific:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config: {get_input: passthrough_config_specific}
-
- NovaComputePassthroughDeployment:
- depends_on: [NovaComputeDeployment]
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: NovaComputePassthrough}
- server: {get_resource: NovaCompute}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config: {get_param: ExtraConfig}
-
- NovaComputePassthroughDeploymentSpecific:
- depends_on: [NovaComputePassthroughDeployment]
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: NovaComputePassthroughSpecific}
- server: {get_resource: NovaCompute}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config_specific: {get_param: NovaComputeExtraConfig}
-
-outputs:
- ip_address:
- description: IP address of the server in the ctlplane network
- value: {get_attr: [NovaCompute, networks, ctlplane, 0]}
- internal_api_ip_address:
- description: IP address of the server in the internal_api network
- value: {get_attr: [InternalApiPort, ip_address]}
- storage_ip_address:
- description: IP address of the server in the storage network
- value: {get_attr: [StoragePort, ip_address]}
- tenant_ip_address:
- description: IP address of the server in the tenant network
- value: {get_attr: [TenantPort, ip_address]}
- hostname:
- description: Hostname of the server
- value: {get_attr: [NovaCompute, name]}
- hosts_entry:
- description: >
- Server's IP address and hostname in the /etc/hosts format
- value:
- str_replace:
- template: "IP HOST"
- params:
- IP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
- HOST: {get_attr: [NovaCompute, name]}
- nova_server_resource:
- description: Heat resource handle for the Nova compute server
- value:
- {get_resource: NovaCompute}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value: "None - NO_SIGNAL"
diff --git a/os-apply-config/controller-post.yaml b/os-apply-config/controller-post.yaml
deleted file mode 100644
index aac96357..00000000
--- a/os-apply-config/controller-post.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Controller Post Deployment'
-# NOTE: this is a noop for os-apply-config style deployments because
-# post deployment ordering is controlled by tripleo-image-elements
-
-parameters:
- servers:
- type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
-
-resources:
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ExtraConfig:
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
diff --git a/os-apply-config/controller.yaml b/os-apply-config/controller.yaml
deleted file mode 100644
index f289d9b5..00000000
--- a/os-apply-config/controller.yaml
+++ /dev/null
@@ -1,1213 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
- OpenStack control plane node. Can be wrapped in a ResourceGroup for scaling.
-
-parameters:
- AdminPassword:
- default: unset
- description: The password for the keystone admin account, used for monitoring, querying neutron etc.
- type: string
- hidden: true
- AdminToken:
- default: unset
- description: The keystone auth secret and db password.
- type: string
- hidden: true
- CeilometerBackend:
- default: 'mongodb'
- description: The ceilometer backend type.
- type: string
- CeilometerMeteringSecret:
- default: unset
- description: Secret shared by the ceilometer services.
- type: string
- hidden: true
- CeilometerPassword:
- default: unset
- description: The password for the ceilometer service and db account.
- type: string
- hidden: true
- CinderEnableNfsBackend:
- default: false
- description: Whether to enable or not the NFS backend for Cinder
- type: boolean
- CinderEnableIscsiBackend:
- default: true
- description: Whether to enable or not the Iscsi backend for Cinder
- type: boolean
- CinderEnableRbdBackend:
- default: false
- description: Whether to enable or not the Rbd backend for Cinder
- type: boolean
- CinderISCSIHelper:
- default: tgtadm
- description: The iSCSI helper to use with cinder.
- type: string
- CinderLVMLoopDeviceSize:
- default: 5000
- description: The size of the loopback file used by the cinder LVM driver.
- type: number
- CinderNfsMountOptions:
- default: ''
- description: >
- Mount options for NFS mounts used by Cinder NFS backend. Effective
- when CinderEnableNfsBackend is true.
- type: string
- CinderNfsServers:
- default: ''
- description: >
- NFS servers used by Cinder NFS backend. Effective when
- CinderEnableNfsBackend is true.
- type: comma_delimited_list
- CinderPassword:
- default: unset
- description: The password for the cinder service and db account, used by cinder-api.
- type: string
- hidden: true
- CloudName:
- default: ''
- description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
- type: string
- ControllerExtraConfig:
- default: {}
- description: |
- Controller specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
- ControlVirtualInterface:
- default: 'br-ex'
- description: Interface where virtual ip will be assigned.
- type: string
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- EnableFencing:
- default: false
- description: Whether to enable fencing in Pacemaker or not.
- type: boolean
- EnableGalera:
- default: true
- description: Whether to use Galera instead of regular MariaDB.
- type: boolean
- EnableCephStorage:
- default: false
- description: Whether to deploy Ceph Storage (OSD) on the Controller
- type: boolean
- EnableSwiftStorage:
- default: true
- description: Whether to enable Swift Storage on the Controller
- type: boolean
- ExtraConfig:
- default: {}
- description: |
- Additional configuration to inject into the cluster. The JSON should have
- the following structure:
- {"FILEKEY":
- {"config":
- [{"section": "SECTIONNAME",
- "values":
- [{"option": "OPTIONNAME",
- "value": "VALUENAME"
- }
- ]
- }
- ]
- }
- }
- For instance:
- {"nova":
- {"config":
- [{"section": "default",
- "values":
- [{"option": "compute_manager",
- "value": "ironic.nova.compute.manager.ClusterComputeManager"
- }
- ]
- },
- {"section": "cells",
- "values":
- [{"option": "driver",
- "value": "nova.cells.rpc_driver.CellsRPCDriver"
- }
- ]
- }
- ]
- }
- }
- type: json
- FencingConfig:
- default: {}
- description: |
- Pacemaker fencing configuration. The JSON should have
- the following structure:
- {
- "devices": [
- {
- "agent": "AGENT_NAME",
- "host_mac": "HOST_MAC_ADDRESS",
- "params": {"PARAM_NAME": "PARAM_VALUE"}
- }
- ]
- }
- For instance:
- {
- "devices": [
- {
- "agent": "fence_xvm",
- "host_mac": "52:54:00:aa:bb:cc",
- "params": {
- "multicast_address": "225.0.0.12",
- "port": "baremetal_0",
- "manage_fw": true,
- "manage_key_file": true,
- "key_file": "/etc/fence_xvm.key",
- "key_file_password": "abcdef"
- }
- }
- ]
- }
- type: json
- Flavor:
- description: Flavor for control nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- GlanceNotifierStrategy:
- description: Strategy to use for Glance notification queue
- type: string
- default: noop
- GlanceLogFile:
- description: The filepath of the file to use for logging messages from Glance.
- type: string
- default: ''
- GlancePassword:
- default: unset
- description: The password for the glance service and db account, used by the glance services.
- type: string
- hidden: true
- GlancePort:
- default: "9292"
- description: Glance port.
- type: string
- GlanceProtocol:
- default: http
- description: Protocol to use when connecting to glance, set to https for SSL.
- type: string
- GlanceBackend:
- default: swift
- description: The short name of the Glance backend to use. Should be one
- of swift, rbd, or file
- type: string
- constraints:
- - allowed_values: ['swift', 'file', 'rbd']
- HeatPassword:
- default: unset
- description: The password for the Heat service and db account, used by the Heat services.
- type: string
- hidden: true
- HeatStackDomainAdminPassword:
- description: Password for heat_domain_admin user.
- type: string
- default: ''
- hidden: true
- HeatAuthEncryptionKey:
- description: Auth encryption key for heat-engine
- type: string
- hidden: true
- HorizonSecret:
- description: Secret key for Django
- type: string
- hidden: true
- Image:
- type: string
- default: overcloud-control
- constraints:
- - custom_constraint: glance.image
- ImageUpdatePolicy:
- default: 'REBUILD_PRESERVE_EPHEMERAL'
- description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
- type: string
- KeyName:
- default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
- type: string
- constraints:
- - custom_constraint: nova.keypair
- KeystoneCACertificate:
- default: ''
- description: Keystone self-signed certificate authority certificate.
- type: string
- KeystoneSigningCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSigningKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- KeystoneSSLCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSSLCertificateKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- KeystoneNotificationDriver:
- description: Comma-separated list of Oslo notification drivers used by Keystone
- default: ['messaging']
- type: comma_delimited_list
- KeystoneNotificationFormat:
- description: The Keystone notification format
- default: 'basic'
- type: string
- constraints:
- - allowed_values: [ 'basic', 'cadf' ]
- MysqlClusterUniquePart:
- description: A unique identifier of the MySQL cluster the controller is in.
- type: string
- default: 'unset' # Has to be here because of the ignored empty value bug
- # Drop the validation: https://bugs.launchpad.net/tripleo/+bug/1405446
- # constraints:
- # - length: {min: 4, max: 10}
- MysqlInnodbBufferPoolSize:
- description: >
- Specifies the size of the buffer pool in megabytes. Setting to
- zero should be interpreted as "no value" and will defer to the
- lower level default.
- type: number
- default: 0
- MysqlMaxConnections:
- description: Configures MySQL max_connections config setting
- type: number
- default: 4096
- MysqlRootPassword:
- type: string
- hidden: true
- default: '' # Has to be here because of the ignored empty value bug
- NeutronExternalNetworkBridge:
- description: Name of bridge used for external network traffic.
- type: string
- default: 'br-ex'
- NeutronBridgeMappings:
- description: >
- The OVS logical->physical bridge mappings to use. See the Neutron
- documentation for details. Defaults to mapping br-ex - the external
- bridge on hosts - to a physical name 'datacentre' which can be used
- to create provider networks (and we use this for the default floating
- network) - if changing this either use different post-install network
- scripts or be sure to keep 'datacentre' as a mapping network name.
- type: string
- default: "datacentre:br-ex"
- NeutronDnsmasqOptions:
- default: 'dhcp-option-force=26,1400'
- description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the gre tunnel overhead.
- type: string
- NeutronAgentMode:
- default: 'dvr_snat'
- description: Agent mode for the neutron-l3-agent on the controller hosts
- type: string
- NeutronDVR:
- default: 'False'
- description: Whether to configure Neutron Distributed Virtual Routers
- type: string
- NeutronMetadataProxySharedSecret:
- default: 'unset'
- description: Shared secret to prevent spoofing
- type: string
- hidden: true
- NeutronCorePlugin:
- default: 'ml2'
- description: |
- The core plugin for Neutron. The value should be the entrypoint to be loaded
- from neutron.core_plugins namespace.
- type: string
- NeutronServicePlugins:
- default: "router"
- description: |
- Comma-separated list of service plugin entrypoints to be loaded from the
- neutron.service_plugins namespace.
- type: comma_delimited_list
- NeutronTypeDrivers:
- default: "vxlan,vlan,flat,gre"
- description: |
- Comma-separated list of network type driver entrypoints to be loaded.
- type: comma_delimited_list
- NeutronMechanismDrivers:
- default: 'openvswitch'
- description: |
- The mechanism drivers for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'openvswitch,l2_population'
- type: string
- NeutronAllowL3AgentFailover:
- default: 'True'
- description: Allow automatic l3-agent failover
- type: string
- NeutronL3HA:
- default: 'False'
- description: Whether to enable l3-agent HA
- type: string
- NeutronDhcpAgentsPerNetwork:
- type: number
- default: 3
- description: The number of neutron dhcp agents to schedule per network
- NeutronEnableTunnelling:
- type: string
- default: "True"
- NeutronFlatNetworks:
- type: string
- default: 'datacentre'
- description: If set, flat networks to configure in neutron plugins.
- NeutronNetworkType:
- default: 'vxlan'
- description: The tenant network type for Neutron, either gre or vxlan.
- type: string
- NeutronNetworkVLANRanges:
- default: 'datacentre'
- description: >
- The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
- Neutron documentation for permitted values. Defaults to permitting any
- VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
- type: comma_delimited_list
- NeutronPassword:
- default: unset
- description: The password for the neutron service and db account, used by neutron agents.
- type: string
- hidden: true
- NeutronPublicInterface:
- default: nic1
- description: What interface to bridge onto br-ex for network nodes.
- type: string
- NeutronPublicInterfaceTag:
- default: ''
- description: >
- VLAN tag for creating a public VLAN. The tag will be used to
- create an access port on the exterior bridge for each control plane node,
- and that port will be given the IP address returned by neutron from the
- public network. Set CONTROLEXTRA=overcloud-vlan-port.yaml when compiling
- overcloud.yaml to include the deployment of VLAN ports to the control
- plane.
- type: string
- NeutronPublicInterfaceDefaultRoute:
- default: ''
- description: A custom default route for the NeutronPublicInterface.
- type: string
- NeutronPublicInterfaceIP:
- default: ''
- description: A custom IP address to put onto the NeutronPublicInterface.
- type: string
- NeutronPublicInterfaceRawDevice:
- default: ''
- description: If set, the public interface is a vlan with this device as the raw device.
- type: string
- NeutronTunnelTypes:
- default: 'vxlan'
- description: |
- The tunnel types for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'gre,vxlan'
- type: string
- NeutronTunnelIdRanges:
- description: |
- Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
- of GRE tunnel IDs that are available for tenant network allocation
- default: ["1:1000", ]
- type: comma_delimited_list
- NeutronVniRanges:
- description: |
- Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges
- of VXLAN VNI IDs that are available for tenant network allocation
- default: ["1:1000", ]
- type: comma_delimited_list
- NovaPassword:
- default: unset
- description: The password for the nova service and db account, used by nova-api.
- type: string
- hidden: true
- MongoDbNoJournal:
- default: false
- description: Should MongoDb journaling be disabled
- type: boolean
- NtpServer:
- type: string
- default: ''
- PcsdPassword:
- type: string
- description: The password for the 'pcsd' user.
- hidden: true
- PublicVirtualInterface:
- default: 'br-ex'
- description: >
- Specifies the interface where the public-facing virtual ip will be assigned.
- This should be int_public when a VLAN is being used.
- type: string
- PublicVirtualIP:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- RabbitCookie:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- hidden: true
- RabbitPassword:
- default: guest
- description: The password for RabbitMQ
- type: string
- hidden: true
- RabbitUserName:
- default: guest
- description: The username for RabbitMQ
- type: string
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
- RabbitFDLimit:
- default: 16384
- description: Configures RabbitMQ FD limit
- type: string
- RedisVirtualIP:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- default: unset
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
- SSLCACertificate:
- default: ''
- description: If set, the contents of an SSL certificate authority file.
- type: string
- SSLCertificate:
- default: ''
- description: If set, the contents of an SSL certificate .crt file for encrypting SSL endpoints.
- type: string
- hidden: true
- SSLKey:
- default: ''
- description: If set, the contents of an SSL certificate .key file for encrypting SSL endpoints.
- type: string
- hidden: true
- SwiftHashSuffix:
- default: unset
- description: A random string to be used as a salt when hashing to determine mappings
- in the ring.
- hidden: true
- type: string
- SwiftMountCheck:
- default: 'false'
- description: Value of mount_check in Swift account/container/object -server.conf
- type: boolean
- SwiftMinPartHours:
- type: number
- default: 1
- description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance.
- SwiftPartPower:
- default: 10
- description: Partition Power to use when building Swift rings
- type: number
- SwiftPassword:
- default: unset
- description: The password for the swift service account, used by the swift proxy
- services.
- hidden: true
- type: string
- SwiftReplicas:
- type: number
- default: 3
- description: How many replicas to use in the swift rings.
- VirtualIP:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
- HeatApiVirtualIP:
- type: string
- default: ''
- GlanceApiVirtualIP:
- type: string
- default: ''
- MysqlVirtualIP:
- type: string
- default: ''
- KeystoneAdminApiVirtualIP:
- type: string
- default: ''
- KeystonePublicApiVirtualIP:
- type: string
- default: ''
- NeutronApiVirtualIP:
- type: string
- default: ''
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry.
- type: json
- UpdateIdentifier:
- default: ''
- type: string
- description: >
- Setting to a previously unused value during stack-update will trigger
- package update on all nodes
- Hostname:
- type: string
- default: '' # Defaults to Heat created hostname
-
-resources:
-
- Controller:
- type: OS::Nova::Server
- properties:
- image: {get_param: Image}
- image_update_policy: {get_param: ImageUpdatePolicy}
- flavor: {get_param: Flavor}
- key_name: {get_param: KeyName}
- networks:
- - network: ctlplane
- user_data_format: SOFTWARE_CONFIG
- user_data: {get_resource: NodeUserData}
- name: {get_param: Hostname}
-
- NodeUserData:
- type: OS::TripleO::NodeUserData
-
- ExternalPort:
- type: OS::TripleO::Controller::Ports::ExternalPort
- properties:
- ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
-
- InternalApiPort:
- type: OS::TripleO::Controller::Ports::InternalApiPort
- properties:
- ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
-
- StoragePort:
- type: OS::TripleO::Controller::Ports::StoragePort
- properties:
- ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
-
- StorageMgmtPort:
- type: OS::TripleO::Controller::Ports::StorageMgmtPort
- properties:
- ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
-
- TenantPort:
- type: OS::TripleO::Controller::Ports::TenantPort
- properties:
- ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
-
- NetworkConfig:
- type: OS::TripleO::Controller::Net::SoftwareConfig
- properties:
- ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]}
- ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
- InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
- StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
- StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
- TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
-
- NetworkDeployment:
- type: OS::TripleO::SoftwareDeployment
- properties:
- signal_transport: NO_SIGNAL
- config: {get_resource: NetworkConfig}
- server: {get_resource: Controller}
- input_values:
- bridge_name: br-ex
- interface_name: {get_param: NeutronPublicInterface}
-
- ControllerPassthroughConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config: {get_input: passthrough_config}
-
- ControllerPassthroughConfigSpecific:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config: {get_input: passthrough_config_specific}
-
- ControllerConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- admin-password: {get_input: admin_password}
- admin-token: {get_input: admin_token}
- bootstack:
- public_interface_ip: {get_input: neutron_public_interface_ip}
- bootstrap_host:
- nodeid: {get_input: bootstack_nodeid}
- cinder:
- db: {get_input: cinder_dsn}
- debug: {get_input: debug}
- volume_size_mb: {get_input: cinder_lvm_loop_device_size}
- service-password: {get_input: cinder_password}
- iscsi-helper: {get_input: CinderISCSIHelper}
- controller-address: {get_input: controller_host}
- corosync:
- bindnetaddr: {get_input: controller_host}
- mcastport: 5577
- pacemaker:
- stonith_enabled : false
- recheck_interval : 5
- quorum_policy : ignore
- db-password: unset
- glance:
- registry:
- host: {get_input: controller_virtual_ip}
- backend: swift
- db: {get_input: glance_dsn}
- debug: {get_input: debug}
- host: {get_input: controller_virtual_ip}
- port: {get_input: glance_port}
- protocol: {get_input: glance_protocol}
- service-password: {get_input: glance_password}
- swift-store-user: service:glance
- swift-store-key: {get_input: glance_password}
- notifier-strategy: {get_input: glance_notifier_strategy}
- log-file: {get_input: glance_log_file}
- heat:
- admin_password: {get_input: heat_password}
- admin_tenant_name: service
- admin_user: heat
- auth_encryption_key: {get_input: heat_auth_encryption_key}
- db: {get_input: heat_dsn}
- debug: {get_input: debug}
- stack_domain_admin_password: {get_input: heat_stack_domain_admin_password}
- watch_server_url: {get_input: heat.watch_server_url}
- metadata_server_url: {get_input: heat.metadata_server_url}
- waitcondition_server_url: {get_input: heat.waitcondition_server_url}
- keystone:
- db: {get_input: keystone_dsn}
- debug: {get_input: debug}
- host: {get_input: controller_virtual_ip}
- ca_certificate: {get_input: keystone_ca_certificate}
- signing_key: {get_input: keystone_signing_key}
- signing_certificate: {get_input: keystone_signing_certificate}
- ssl:
- certificate: {get_input: keystone_ssl_certificate}
- certificate_key: {get_input: keystone_ssl_certificate_key}
- mysql:
- innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size}
- local_bind: true
- root-password: {get_input: mysql_root_password}
- cluster_name: {get_input: mysql_cluster_name}
- neutron:
- debug: {get_input: debug}
- flat-networks: {get_input: neutron_flat_networks}
- host: {get_input: controller_virtual_ip}
- metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
- agent_mode: {get_input: neutron_agent_mode}
- router_distributed: {get_input: neutron_router_distributed}
- core_plugin: {get_input: neutron_core_plugin}
- service_plugins: {get_input: neutron_service_plugins}
- type_drivers: {get_input: neutron_type_drivers}
- mechanism_drivers: {get_input: neutron_mechanism_drivers}
- allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover}
- l3_ha: {get_input: neutron_l3_ha}
- dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network}
- ovs:
- enable_tunneling: {get_input: neutron_enable_tunneling}
- local_ip: {get_input: controller_host}
- network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
- bridge_mappings: {get_input: neutron_bridge_mappings}
- public_interface: {get_input: neutron_public_interface}
- public_interface_raw_device: {get_input: neutron_public_interface_raw_device}
- public_interface_route: {get_input: neutron_public_interface_default_route}
- public_interface_tag: {get_input: neutron_public_interface_tag}
- physical_bridge: br-ex
- tenant_network_type: {get_input: neutron_tenant_network_type}
- tunnel_types: {get_input: neutron_tunnel_types}
- tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
- vni_ranges: {get_input: neutron_vni_ranges}
- ovs_db: {get_input: neutron_dsn}
- service-password: {get_input: neutron_password}
- dnsmasq-options: {get_input: neutron_dnsmasq_options}
- ceilometer:
- db: {get_input: ceilometer_dsn}
- debug: {get_input: debug}
- metering_secret: {get_input: ceilometer_metering_secret}
- service-password: {get_input: ceilometer_password}
- snmpd:
- export_MIB: UCD-SNMP-MIB
- readonly_user_name: {get_input: snmpd_readonly_user_name}
- readonly_user_password: {get_input: snmpd_readonly_user_password}
- nova:
- compute_driver: libvirt.LibvirtDriver
- db: {get_input: nova_dsn}
- default_floating_pool:
- ext-net
- host: {get_input: controller_virtual_ip}
- metadata-proxy: true
- service-password: {get_input: nova_password}
- mongodb:
- nojournal: {get_input: mongodb_no_journal}
- rabbit:
- host: {get_input: controller_virtual_ip}
- username: {get_input: rabbit_username}
- password: {get_input: rabbit_password}
- cookie: {get_input: rabbit_cookie}
- rabbit_client_use_ssl: {get_input: rabbit_client_use_ssl}
- rabbit_port: {get_input: rabbit_client_port}
- ntp:
- servers:
- - {server: {get_input: ntp_server}}
- virtual_interfaces:
- instances:
- - vrrp_instance_name: VI_CONTROL
- virtual_router_id: 51
- keepalive_interface: {get_input: control_virtual_interface}
- priority: 101
- virtual_ips:
- - ip: {get_input: controller_virtual_ip}
- interface: {get_input: control_virtual_interface}
- - vrrp_instance_name: VI_PUBLIC
- virtual_router_id: 52
- keepalive_interface: {get_input: public_virtual_interface}
- priority: 101
- virtual_ips:
- - ip: {get_input: public_virtual_ip}
- interface: {get_input: public_virtual_interface}
- vrrp_sync_groups:
- - name: VG1
- members:
- - VI_CONTROL
- - VI_PUBLIC
- keepalived:
- keepalive_interface: {get_input: public_virtual_interface}
- priority: 101
- virtual_ips:
- -
- ip: {get_input: controller_virtual_ip}
- interface: {get_input: control_virtual_interface}
- -
- ip: {get_input: public_virtual_ip}
- interface: {get_input: public_virtual_interface}
- haproxy:
- net_binds:
- - ip: {get_input: controller_virtual_ip}
- options:
- - option httpchk GET /
- services:
- - name: keystone_admin
- port: 35357
- net_binds: &public_binds
- - ip: {get_input: controller_virtual_ip}
- - ip: {get_input: public_virtual_ip}
- - name: keystone_public
- port: 5000
- net_binds: *public_binds
- - name: horizon
- port: 80
- net_binds: *public_binds
- - name: neutron
- port: 9696
- net_binds: *public_binds
- - name: cinder
- port: 8776
- net_binds: *public_binds
- - name: glance_api
- port: 9292
- net_binds: *public_binds
- - name: glance_registry
- port: 9191
- net_binds: *public_binds
- options: # overwrite options as glace_reg needs auth for http req
- - name: heat_api
- port: 8004
- net_binds: *public_binds
- - name: heat_cloudwatch
- port: 8003
- net_binds: *public_binds
- - name: heat_cfn
- port: 8000
- net_binds: *public_binds
- - name: mysql
- port: 3306
- extra_server_params:
- - backup
- options:
- - timeout client 0
- - timeout server 0
- - name: nova_ec2
- port: 8773
- - name: nova_osapi
- port: 8774
- net_binds: *public_binds
- - name: nova_metadata
- port: 8775
- net_binds: *public_binds
- - name: nova_novncproxy
- port: 6080
- net_binds: *public_binds
- - name: ceilometer
- port: 8777
- net_binds: *public_binds
- options: # overwrite options as ceil needs auth for http req
- - name: swift_proxy_server
- port: 8080
- net_binds: *public_binds
- options:
- - option httpchk GET /info
- - name: rabbitmq
- port: 5672
- options:
- - timeout client 0
- - timeout server 0
- - maxconn 1500
-
- ControllerDeployment:
- type: OS::TripleO::SoftwareDeployment
- properties:
- signal_transport: NO_SIGNAL
- config: {get_resource: ControllerConfig}
- server: {get_resource: Controller}
- input_values:
- bootstack_nodeid: {get_attr: [Controller, name]}
- controller_host: {get_attr: [Controller, networks, ctlplane, 0]}
- controller_virtual_ip: {get_param: VirtualIP}
- neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
- heat.watch_server_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: VirtualIP}
- - ':8003'
- heat.metadata_server_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: VirtualIP}
- - ':8000'
- heat.waitcondition_server_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: VirtualIP}
- - ':8000/v1/waitcondition'
- admin_password: {get_param: AdminPassword}
- admin_token: {get_param: AdminToken}
- neutron_public_interface_ip: {get_param: NeutronPublicInterfaceIP}
- debug: {get_param: Debug}
- cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
- cinder_password: {get_param: CinderPassword}
- cinder_iscsi_helper: {get_param: CinderISCSIHelper}
- cinder_dsn:
- list_join:
- - ''
- - - 'mysql://cinder:'
- - {get_param: CinderPassword}
- - '@'
- - {get_param: VirtualIP}
- - '/cinder'
- glance_port: {get_param: GlancePort}
- glance_protocol: {get_param: GlanceProtocol}
- glance_password: {get_param: GlancePassword}
- glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
- glance_log_file: {get_param: GlanceLogFile}
- glance_dsn:
- list_join:
- - ''
- - - 'mysql://glance:'
- - {get_param: GlancePassword}
- - '@'
- - {get_param: VirtualIP}
- - '/glance'
- heat_password: {get_param: HeatPassword}
- heat_stack_domain_admin_password: {get_param: HeatStackDomainAdminPassword}
- heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey}
- heat_dsn:
- list_join:
- - ''
- - - 'mysql://heat:'
- - {get_param: HeatPassword}
- - '@'
- - {get_param: VirtualIP}
- - '/heat'
- keystone_ca_certificate: {get_param: KeystoneCACertificate}
- keystone_signing_key: {get_param: KeystoneSigningKey}
- keystone_signing_certificate: {get_param: KeystoneSigningCertificate}
- keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
- keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
- keystone_dsn:
- list_join:
- - ''
- - - 'mysql://keystone:'
- - {get_param: AdminToken}
- - '@'
- - {get_param: VirtualIP}
- - '/keystone'
- mongodb_no_journal: {get_param: MongoDbNoJournal}
- mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize}
- mysql_root_password: {get_param: MysqlRootPassword}
- mysql_cluster_name:
- str_replace:
- template: tripleo-CLUSTER
- params:
- CLUSTER: {get_param: MysqlClusterUniquePart}
- neutron_flat_networks: {get_param: NeutronFlatNetworks}
- neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
- neutron_agent_mode: {get_param: NeutronAgentMode}
- neutron_router_distributed: {get_param: NeutronDVR}
- neutron_core_plugin: {get_param: NeutronCorePlugin}
- neutron_service_plugins:
- str_replace:
- template: "['PLUGINS']"
- params:
- PLUGINS:
- list_join:
- - "','"
- - {get_param: NeutronServicePlugins}
- neutron_type_drivers:
- str_replace:
- template: "['DRIVERS']"
- params:
- DRIVERS:
- list_join:
- - "','"
- - {get_param: NeutronTypeDrivers}
- neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers}
- neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
- neutron_l3_ha: {get_param: NeutronL3HA}
- neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
- neutron_network_vlan_ranges: {get_param: NeutronNetworkVLANRanges}
- neutron_bridge_mappings: {get_param: NeutronBridgeMappings}
- neutron_public_interface: {get_param: NeutronPublicInterface}
- neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
- neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute}
- neutron_public_interface_tag: {get_param: NeutronPublicInterfaceTag}
- neutron_tenant_network_type: {get_param: NeutronNetworkType}
- neutron_tunnel_types: {get_param: NeutronTunnelTypes}
- neutron_tunnel_id_ranges:
- str_replace:
- template: "['RANGES']"
- params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronTunnelIdRanges}
- neutron_vni_ranges:
- str_replace:
- template: "['RANGES']"
- params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronVniRanges}
- neutron_password: {get_param: NeutronPassword}
- neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions}
- neutron_dsn:
- list_join:
- - ''
- - - 'mysql://neutron:'
- - {get_param: NeutronPassword}
- - '@'
- - {get_param: VirtualIP}
- - '/ovs_neutron?charset=utf8'
- ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
- ceilometer_password: {get_param: CeilometerPassword}
- ceilometer_dsn:
- list_join:
- - ''
- - - 'mysql://ceilometer:'
- - {get_param: CeilometerPassword}
- - '@'
- - {get_param: VirtualIP}
- - '/ceilometer'
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
- nova_password: {get_param: NovaPassword}
- nova_dsn:
- list_join:
- - ''
- - - 'mysql://nova:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: VirtualIP}
- - '/nova'
- rabbit_username: {get_param: RabbitUserName}
- rabbit_password: {get_param: RabbitPassword}
- rabbit_cookie: {get_param: RabbitCookie}
- rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
- rabbit_client_port: {get_param: RabbitClientPort}
- ntp_server: {get_param: NtpServer}
- control_virtual_interface: {get_param: ControlVirtualInterface}
- public_virtual_interface: {get_param: PublicVirtualInterface}
- public_virtual_ip: {get_param: PublicVirtualIP}
-
- SSLConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- ssl:
- ca_certificate: {get_input: ssl_ca_certificate}
- stunnel:
- cert: {get_input: ssl_certificate}
- key: {get_input: ssl_key}
- cacert: {get_input: ssl_ca_certificate}
- ports:
- - name: 'ec2'
- accept: 13773
- connect: 8773
- connect_host: {get_input: controller_host}
- - name: 'image'
- accept: 13292
- connect: 9292
- connect_host: {get_input: controller_host}
- - name: 'identity'
- accept: 13000
- connect: 5000
- connect_host: {get_input: controller_host}
- - name: 'network'
- accept: 13696
- connect: 9696
- connect_host: {get_input: controller_host}
- - name: 'compute'
- accept: 13774
- connect: 8774
- connect_host: {get_input: controller_host}
- - name: 'swift-proxy'
- accept: 13080
- connect: 8080
- connect_host: {get_input: controller_host}
- - name: 'cinder'
- accept: 13776
- connect: 8776
- connect_host: {get_input: controller_host}
- - name: 'ceilometer'
- accept: 13777
- connect: 8777
- connect_host: {get_input: controller_host}
-
- ControllerSSLDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: SSLConfig}
- server: {get_resource: Controller}
- signal_transport: NO_SIGNAL
- input_values:
- controller_host: {get_attr: [Controller, networks, ctlplane, 0]}
- ssl_certificate: {get_param: SSLCertificate}
- ssl_key: {get_param: SSLKey}
- ssl_ca_certificate: {get_param: SSLCACertificate}
-
- ControllerPassthroughDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: ControllerPassthroughConfig}
- server: {get_resource: Controller}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config: {get_param: ExtraConfig}
-
- ControllerPassthroughSpecificDeployment:
- depends_on: [ControllerPassthroughDeployment]
- type: OS::Heat::StructuredDeployment
- properties:
- config: {get_resource: ControllerPassthroughConfigSpecific}
- server: {get_resource: Controller}
- signal_transport: NO_SIGNAL
- input_values:
- passthrough_config_specific: {get_param: ControllerExtraConfig}
-
- SwiftConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- swift:
- hash: { get_input: swift_hash_suffix }
- part-power: { get_input: swift_part_power }
- mount-check: { get_input: swift_mount_check }
- min-part-hours: { get_input: swift_min_part_hours }
- replicas: {get_input: swift_replicas }
- service-password: { get_input: swift_password }
-
- SwiftStorageDeploy:
- type: OS::Heat::StructuredDeployment
- properties:
- server: {get_resource: Controller}
- config: {get_resource: SwiftConfig}
- signal_transport: NO_SIGNAL
- input_values:
- swift_hash_suffix: {get_param: SwiftHashSuffix}
- swift_mount_check: {get_param: SwiftMountCheck}
- swift_password: {get_param: SwiftPassword}
- swift_min_part_hours: {get_param: SwiftMinPartHours}
- swift_part_power: {get_param: SwiftPartPower}
- swift_replicas: { get_param: SwiftReplicas}
-
-outputs:
- ip_address:
- description: IP address of the server in the ctlplane network
- value: {get_attr: [Controller, networks, ctlplane, 0]}
- external_ip_address:
- description: IP address of the server in the external network
- value: {get_attr: [ExternalPort, ip_address]}
- internal_api_ip_address:
- description: IP address of the server in the internal_api network
- value: {get_attr: [InternalApiPort, ip_address]}
- storage_ip_address:
- description: IP address of the server in the storage network
- value: {get_attr: [StoragePort, ip_address]}
- storage_mgmt_ip_address:
- description: IP address of the server in the storage_mgmt network
- value: {get_attr: [StorageMgmtPort, ip_address]}
- tenant_ip_address:
- description: IP address of the server in the tenant network
- value: {get_attr: [TenantPort, ip_address]}
- hostname:
- description: Hostname of the server
- value: {get_attr: [Controller, name]}
- corosync_node:
- description: >
- Node object in the format {ip: ..., name: ...} format that the corosync
- element expects
- value:
- ip: {get_attr: [Controller, networks, ctlplane, 0]}
- name: {get_attr: [Controller, name]}
- hosts_entry:
- description: >
- Server's IP address and hostname in the /etc/hosts format
- value:
- str_replace:
- template: IP HOST CLOUDNAME
- params:
- IP: {get_attr: [Controller, networks, ctlplane, 0]}
- HOST: {get_attr: [Controller, name]}
- CLOUDNAME: {get_param: CloudName}
- nova_server_resource:
- description: Heat resource handle for the Nova compute server
- value:
- {get_resource: Controller}
- swift_device:
- description: Swift device formatted for swift-ring-builder
- value:
- str_replace:
- template: 'r1z1-IP:%PORT%/d1'
- params:
- IP: {get_attr: [Controller, networks, ctlplane, 0]}
- swift_proxy_memcache:
- description: Swift proxy-memcache value
- value:
- str_replace:
- template: "IP:11211"
- params:
- IP: {get_attr: [Controller, networks, ctlplane, 0]}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value: "None - NO_SIGNAL"
diff --git a/os-apply-config/swift-devices-and-proxy-config.yaml b/os-apply-config/swift-devices-and-proxy-config.yaml
deleted file mode 100644
index 4f01dbea..00000000
--- a/os-apply-config/swift-devices-and-proxy-config.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Swift Devices and Proxy Config'
-
-parameters:
- controller_swift_devices:
- type: comma_delimited_list
- object_store_swift_devices:
- type: comma_delimited_list
- controller_swift_proxy_memcaches:
- type: comma_delimited_list
-
-resources:
-
- SwiftDevicesAndProxyConfigImpl:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- swift:
- devices:
- list_join:
- - ", "
- - - list_join:
- - ", "
- - {get_param: controller_swift_devices}
- - list_join:
- - ", "
- - {get_param: object_store_swift_devices}
- proxy-memcache:
- list_join:
- - ","
- - {get_param: controller_swift_proxy_memcaches}
-
-outputs:
- config_id:
- description: The ID of the SwiftDevicesAndProxyConfigImpl resource.
- value:
- {get_resource: SwiftDevicesAndProxyConfigImpl}
diff --git a/os-apply-config/swift-storage-post.yaml b/os-apply-config/swift-storage-post.yaml
deleted file mode 100644
index 1b1c406d..00000000
--- a/os-apply-config/swift-storage-post.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Swift Storage Post Deployment'
-# NOTE: this is a noop for os-apply-config style deployments because
-# post deployment ordering is controlled by tripleo-image-elements
-
-parameters:
- servers:
- type: json
- NodeConfigIdentifiers:
- type: json
- description: Value which changes if the node configuration may need to be re-applied
-
-resources:
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ExtraConfig:
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: servers}
-
diff --git a/os-apply-config/swift-storage.yaml b/os-apply-config/swift-storage.yaml
deleted file mode 100644
index d62d7d1a..00000000
--- a/os-apply-config/swift-storage.yaml
+++ /dev/null
@@ -1,209 +0,0 @@
-heat_template_version: 2015-04-30
-description: 'Common Swift Storage Configuration'
-parameters:
- ExtraConfig:
- default: {}
- description: |
- Additional configuration to inject into the cluster. The JSON should have
- the following structure:
- {"FILEKEY":
- {"config":
- [{"section": "SECTIONNAME",
- "values":
- [{"option": "OPTIONNAME",
- "value": "VALUENAME"
- }
- ]
- }
- ]
- }
- }
- For instance:
- {"nova":
- {"config":
- [{"section": "default",
- "values":
- [{"option": "force_config_drive",
- "value": "always"
- }
- ]
- },
- {"section": "cells",
- "values":
- [{"option": "driver",
- "value": "nova.cells.rpc_driver.CellsRPCDriver"
- }
- ]
- }
- ]
- }
- }
- type: json
- ObjectStorageExtraConfig:
- default: {}
- description: |
- Role specific additional configuration to inject into the cluster.
- type: json
- Flavor:
- description: Flavor for Swift storage nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- HashSuffix:
- default: unset
- description: A random string to be used as a salt when hashing to determine mappings
- in the ring.
- hidden: true
- type: string
- Image:
- default: overcloud-swift-storage
- type: string
- KeyName:
- default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
- type: string
- MountCheck:
- default: 'false'
- description: Value of mount_check in Swift account/container/object -server.conf
- type: boolean
- MinPartHours:
- type: number
- default: 1
- description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance.
- PartPower:
- default: 10
- description: Partition Power to use when building Swift rings
- type: number
- Replicas:
- type: number
- default: 3
- description: How many replicas to use in the swift rings.
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- default: unset
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
- UpdateIdentifier:
- default: ''
- type: string
- description: >
- Setting to a previously unused value during stack-update will trigger
- package update on all nodes
- Hostname:
- type: string
- default: '' # Defaults to Heat created hostname
-
-resources:
- SwiftConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- snmpd:
- export_MIB: UCD-SNMP-MIB
- readonly_user_name: {get_input: snmpd_readonly_user_name}
- readonly_user_password: {get_input: snmpd_readonly_user_password}
- swift:
- hash: { get_input: swift_hash_suffix }
- part-power: { get_input: swift_part_power }
- min-part-hours: { get_input: swift_min_part_hours }
- mount-check: { get_input: swift_mount_check }
- replicas: {get_input: swift_replicas }
- neutron:
- ovs:
- local_ip: { get_input: neutron_local_ip }
- SwiftStorage:
- type: OS::Nova::Server
- properties:
- image: {get_param: Image}
- flavor: {get_param: Flavor}
- key_name: {get_param: KeyName}
- networks:
- - network: ctlplane
- user_data_format: SOFTWARE_CONFIG
- user_data: {get_resource: NodeUserData}
- name: {get_param: Hostname}
-
- NodeUserData:
- type: OS::TripleO::NodeUserData
-
- InternalApiPort:
- type: OS::TripleO::SwiftStorage::Ports::InternalApiPort
- properties:
- ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
-
- StoragePort:
- type: OS::TripleO::SwiftStorage::Ports::StoragePort
- properties:
- ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
-
- StorageMgmtPort:
- type: OS::TripleO::SwiftStorage::Ports::StorageMgmtPort
- properties:
- ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
-
- NetworkConfig:
- type: OS::TripleO::ObjectStorage::Net::SoftwareConfig
- properties:
- ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
- InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
- StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
- StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
-
- NetworkDeployment:
- type: OS::TripleO::SoftwareDeployment
- properties:
- config: {get_resource: NetworkConfig}
- server: {get_resource: SwiftStorage}
-
- SwiftStorageDeploy:
- type: OS::Heat::StructuredDeployment
- properties:
- server: {get_resource: SwiftStorage}
- config: {get_resource: SwiftConfig}
- signal_transport: NO_SIGNAL
- input_values:
- neutron_local_ip: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
- swift_hash_suffix: {get_param: HashSuffix}
- swift_mount_check: {get_param: MountCheck}
- swift_min_part_hours: {get_param: MinPartHours}
- swift_part_power: {get_param: PartPower}
- swift_replicas: { get_param: Replicas}
-
-outputs:
- hosts_entry:
- value:
- str_replace:
- template: "IP HOST"
- params:
- IP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
- HOST: {get_attr: [SwiftStorage, name]}
- nova_server_resource:
- description: Heat resource handle for the swift storage server
- value:
- {get_resource: SwiftStorage}
- swift_device:
- description: Swift device formatted for swift-ring-builder
- value:
- str_replace:
- template: 'r1z1-IP:%PORT%/d1'
- params:
- IP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
- internal_api_ip_address:
- description: IP address of the server in the internal_api network
- value: {get_attr: [InternalApiPort, ip_address]}
- storage_ip_address:
- description: IP address of the server in the storage network
- value: {get_attr: [StoragePort, ip_address]}
- storage_mgmt_ip_address:
- description: IP address of the server in the storage_mgmt network
- value: {get_attr: [StorageMgmtPort, ip_address]}
- config_identifier:
- description: identifier which changes if the node configuration may need re-applying
- value: "None - NO_SIGNAL"
diff --git a/os-apply-config/vip-config.yaml b/os-apply-config/vip-config.yaml
deleted file mode 100644
index 8f984ab7..00000000
--- a/os-apply-config/vip-config.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
- Configure hieradata for service -> virtual IP mappings.
-
-resources:
- VipConfigImpl:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- # by default does nothing
-
-outputs:
- OS::stack_id:
- description: The VipConfigImpl resource.
- value: {get_resource: VipConfigImpl}
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
index 7e65d4b1..888a3c89 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.yaml
@@ -21,7 +21,11 @@ resource_registry:
OS::TripleO::CephClusterConfig::SoftwareConfig: puppet/ceph-cluster-config.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
OS::TripleO::BootstrapNode::SoftwareConfig: puppet/bootstrap-config.yaml
+
+ # Tasks (for internal TripleO usage)
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
+ OS::TripleO::Tasks::ControllerPrePuppet: extraconfig/tasks/noop.yaml
+ OS::TripleO::Tasks::ControllerPostPuppet: extraconfig/tasks/noop.yaml
# This creates the "heat-admin" user for all OS images by default
# To disable, replace with firstboot/userdata_default.yaml
@@ -33,6 +37,8 @@ resource_registry:
# NodeExtraConfig == All nodes configuration pre service deployment
# NodeExtraConfigPost == All nodes configuration post service deployment
OS::TripleO::NodeUserData: firstboot/userdata_default.yaml
+ OS::TripleO::NodeTLSCAData: puppet/extraconfig/tls/no-ca.yaml
+ OS::TripleO::NodeTLSData: puppet/extraconfig/tls/no-tls.yaml
OS::TripleO::ControllerExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::ComputeExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::CephStorageExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
@@ -55,39 +61,63 @@ resource_registry:
OS::TripleO::Network::StorageMgmt: network/noop.yaml
OS::TripleO::Network::Storage: network/noop.yaml
OS::TripleO::Network::Tenant: network/noop.yaml
+ OS::TripleO::Network::Management: network/noop.yaml
+ OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml
OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml
OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml
OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml
+ # Port assignments for the VIPs
+ OS::TripleO::Network::Ports::ExternalVipPort: network/ports/noop.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: network/ports/noop.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: network/ports/noop.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
+
# Port assignments for the controller role
OS::TripleO::Controller::Ports::ExternalPort: network/ports/noop.yaml
OS::TripleO::Controller::Ports::InternalApiPort: network/ports/noop.yaml
OS::TripleO::Controller::Ports::StoragePort: network/ports/noop.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: network/ports/noop.yaml
OS::TripleO::Controller::Ports::TenantPort: network/ports/noop.yaml
+ OS::TripleO::Controller::Ports::ManagementPort: network/ports/noop.yaml
# Port assignments for the compute role
+ OS::TripleO::Compute::Ports::ExternalPort: network/ports/noop.yaml
OS::TripleO::Compute::Ports::InternalApiPort: network/ports/noop.yaml
OS::TripleO::Compute::Ports::StoragePort: network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::StorageMgmtPort: network/ports/noop.yaml
OS::TripleO::Compute::Ports::TenantPort: network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::ManagementPort: network/ports/noop.yaml
# Port assignments for the ceph storage role
+ OS::TripleO::CephStorage::Ports::ExternalPort: network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::InternalApiPort: network/ports/noop.yaml
OS::TripleO::CephStorage::Ports::StoragePort: network/ports/noop.yaml
OS::TripleO::CephStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::TenantPort: network/ports/noop.yaml
+ OS::TripleO::CephStorage::Ports::ManagementPort: network/ports/noop.yaml
# Port assignments for the swift storage role
+ OS::TripleO::SwiftStorage::Ports::ExternalPort: network/ports/noop.yaml
OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml
OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml
OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::TenantPort: network/ports/noop.yaml
+ OS::TripleO::SwiftStorage::Ports::ManagementPort: network/ports/noop.yaml
# Port assignments for the block storage role
+ OS::TripleO::BlockStorage::Ports::ExternalPort: network/ports/noop.yaml
OS::TripleO::BlockStorage::Ports::InternalApiPort: network/ports/noop.yaml
OS::TripleO::BlockStorage::Ports::StoragePort: network/ports/noop.yaml
OS::TripleO::BlockStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
+ OS::TripleO::BlockStorage::Ports::TenantPort: network/ports/noop.yaml
+ OS::TripleO::BlockStorage::Ports::ManagementPort: network/ports/noop.yaml
- # Port assignments for service virtual IPs for the controller role
- OS::TripleO::Controller::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
+ # Service Endpoint Mappings
+ OS::TripleO::Endpoint: network/endpoints/endpoint.yaml
+ OS::TripleO::EndpointMap: network/endpoints/endpoint_map.yaml
# validation resources
OS::TripleO::AllNodes::Validation: all-nodes-validation.yaml
diff --git a/overcloud-resource-registry.yaml b/overcloud-resource-registry.yaml
deleted file mode 100644
index d6eb97f9..00000000
--- a/overcloud-resource-registry.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-resource_registry:
- OS::TripleO::BlockStorage: os-apply-config/cinder-storage.yaml
- OS::TripleO::BlockStorage::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::Compute: os-apply-config/compute.yaml
- OS::TripleO::Compute::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment
- OS::TripleO::Controller: os-apply-config/controller.yaml
- OS::TripleO::Controller::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::ObjectStorage: os-apply-config/swift-storage.yaml
- OS::TripleO::ObjectStorage::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::CephStorage: os-apply-config/ceph-storage.yaml
- OS::TripleO::CephStorage::Net::SoftwareConfig: net-config-noop.yaml
- OS::TripleO::ControllerPostDeployment: os-apply-config/controller-post.yaml
- OS::TripleO::ComputePostDeployment: os-apply-config/compute-post.yaml
- OS::TripleO::ObjectStoragePostDeployment: os-apply-config/swift-storage-post.yaml
- OS::TripleO::BlockStoragePostDeployment: os-apply-config/cinder-storage-post.yaml
- OS::TripleO::CephStoragePostDeployment: os-apply-config/ceph-storage-post.yaml
- OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig: os-apply-config/swift-devices-and-proxy-config.yaml
- OS::TripleO::CephClusterConfig::SoftwareConfig: os-apply-config/ceph-cluster-config.yaml
- OS::TripleO::AllNodes::SoftwareConfig: os-apply-config/all-nodes-config.yaml
- OS::TripleO::BootstrapNode::SoftwareConfig: bootstrap-config.yaml
- OS::TripleO::NodeUserData: firstboot/userdata_default.yaml
- OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
- OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
-
- # "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
- # phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
- # configuration with knowledge of all nodes in the cluster is required vs single
- # node configuration in the pre_deploy step.
- OS::TripleO::AllNodesExtraConfig: extraconfig/all_nodes/default.yaml
-
- # TripleO overcloud networks
- OS::TripleO::Network: network/networks.yaml
- OS::TripleO::VipConfig: os-apply-config/vip-config.yaml
-
- OS::TripleO::Network::External: network/noop.yaml
- OS::TripleO::Network::InternalApi: network/noop.yaml
- OS::TripleO::Network::StorageMgmt: network/noop.yaml
- OS::TripleO::Network::Storage: network/noop.yaml
- OS::TripleO::Network::Tenant: network/noop.yaml
-
- OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml
- OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml
- OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml
-
- # Port assignments for the controller role
- OS::TripleO::Controller::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::Controller::Ports::TenantPort: network/ports/noop.yaml
-
- # Port assignments for the compute role
- OS::TripleO::Compute::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::Compute::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::Compute::Ports::TenantPort: network/ports/noop.yaml
-
- # Port assignments for the ceph storage role
- OS::TripleO::CephStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-
- # Port assignments for the swift storage role
- OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-
- # Port assignments for the block storage role
- OS::TripleO::BlockStorage::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-
- # Port assignments for service virtual IPs for the controller role
- OS::TripleO::Controller::Ports::RedisVipPort: network/ports/noop.yaml
-
- # validation resources
- OS::TripleO::AllNodes::Validation: os-apply-config/all-nodes-validation.yaml
diff --git a/overcloud-without-mergepy.yaml b/overcloud-without-mergepy.yaml
index 9c915c4a..23dc6464 100644..120000
--- a/overcloud-without-mergepy.yaml
+++ b/overcloud-without-mergepy.yaml
@@ -1,1452 +1 @@
-heat_template_version: 2015-04-30
-
-description: >
- Deploy an OpenStack environment, consisting of several node types (roles),
- Controller, Compute, BlockStorage, SwiftStorage and CephStorage. The Storage
- roles enable independent scaling of the storage components, but the minimal
- deployment is one Controller and one Compute node.
-
-
-# TODO(shadower): we should probably use the parameter groups to put
-# some order in here.
-parameters:
-
- # Common parameters (not specific to a role)
- AdminPassword:
- default: unset
- description: The password for the keystone admin account, used for monitoring, querying neutron etc.
- type: string
- hidden: true
- CeilometerBackend:
- default: 'mongodb'
- description: The ceilometer backend type.
- type: string
- CeilometerMeteringSecret:
- default: unset
- description: Secret shared by the ceilometer services.
- type: string
- hidden: true
- CeilometerPassword:
- default: unset
- description: The password for the ceilometer service account.
- type: string
- hidden: true
- # This has to be an UUID so for now we generate it outside the template
- CephClusterFSID:
- default: ''
- type: string
- description: The Ceph cluster FSID. Must be a UUID.
- CephMonKey:
- default: ''
- description: The Ceph monitors key. Can be created with ceph-authtool --gen-print-key.
- type: string
- hidden: true
- CephAdminKey:
- default: ''
- description: The Ceph admin client key. Can be created with ceph-authtool --gen-print-key.
- type: string
- hidden: true
- CinderEnableNfsBackend:
- default: false
- description: Whether to enable or not the NFS backend for Cinder
- type: boolean
- CephClientKey:
- default: ''
- description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
- type: string
- hidden: true
- CephExternalMonHost:
- default: ''
- type: string
- description: List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments.
- CinderEnableIscsiBackend:
- default: true
- description: Whether to enable or not the Iscsi backend for Cinder
- type: boolean
- CinderEnableRbdBackend:
- default: false
- description: Whether to enable or not the Rbd backend for Cinder
- type: boolean
- CloudName:
- default: ''
- description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
- type: string
- ControlFixedIPs:
- default: []
- description: Should be used for arbitrary ips.
- type: json
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- GlancePort:
- default: "9292"
- description: Glance port.
- type: string
- GlanceProtocol:
- default: http
- description: Protocol to use when connecting to glance, set to https for SSL.
- type: string
- HAProxySyslogAddress:
- default: /dev/log
- description: Syslog address where HAproxy will send its log
- type: string
- HorizonAllowedHosts:
- default: '*'
- description: A list of IP/Hostname allowed to connect to horizon
- type: comma_delimited_list
- ImageUpdatePolicy:
- default: 'REBUILD_PRESERVE_EPHEMERAL'
- description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
- type: string
- KeyName:
- default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
- type: string
- constraints:
- - custom_constraint: nova.keypair
- NeutronExternalNetworkBridge:
- description: Name of bridge used for external network traffic.
- type: string
- default: 'br-ex'
- NeutronBridgeMappings:
- description: >
- The OVS logical->physical bridge mappings to use. See the Neutron
- documentation for details. Defaults to mapping br-ex - the external
- bridge on hosts - to a physical name 'datacentre' which can be used
- to create provider networks (and we use this for the default floating
- network) - if changing this either use different post-install network
- scripts or be sure to keep 'datacentre' as a mapping network name.
- type: string
- default: "datacentre:br-ex"
- NeutronControlPlaneID:
- default: 'ctlplane'
- type: string
- description: Neutron ID or name for ctlplane network.
- NeutronEnableTunnelling:
- type: string
- default: "True"
- NeutronFlatNetworks:
- type: string
- default: 'datacentre'
- description: >
- If set, flat networks to configure in neutron plugins. Defaults to
- 'datacentre' to permit external network creation.
- NeutronNetworkType:
- default: 'vxlan'
- description: The tenant network type for Neutron, either gre or vxlan.
- type: string
- NeutronPassword:
- default: unset
- description: The password for the neutron service account, used by neutron agents.
- type: string
- hidden: true
- NeutronPublicInterface:
- default: nic1
- description: What interface to bridge onto br-ex for network nodes.
- type: string
- NeutronPublicInterfaceTag:
- default: ''
- description: >
- VLAN tag for creating a public VLAN. The tag will be used to
- create an access port on the exterior bridge for each control plane node,
- and that port will be given the IP address returned by neutron from the
- public network. Set CONTROLEXTRA=overcloud-vlan-port.yaml when compiling
- overcloud.yaml to include the deployment of VLAN ports to the control
- plane.
- type: string
- NeutronComputeAgentMode:
- default: 'dvr'
- description: Agent mode for the neutron-l3-agent on the compute hosts
- type: string
- NeutronAgentMode:
- default: 'dvr_snat'
- description: Agent mode for the neutron-l3-agent on the controller hosts
- type: string
- NeutronDVR:
- default: 'False'
- description: Whether to configure Neutron Distributed Virtual Routers
- type: string
- NeutronMetadataProxySharedSecret:
- default: 'unset'
- description: Shared secret to prevent spoofing
- type: string
- hidden: true
- NeutronTunnelTypes:
- default: 'vxlan'
- description: |
- The tunnel types for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'gre,vxlan'
- type: string
- NeutronTunnelIdRanges:
- description: |
- Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
- of GRE tunnel IDs that are available for tenant network allocation
- default: ["1:1000", ]
- type: comma_delimited_list
- NeutronVniRanges:
- description: |
- Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges
- of VXLAN VNI IDs that are available for tenant network allocation
- default: ["1:1000", ]
- type: comma_delimited_list
- NeutronCorePlugin:
- default: 'ml2'
- description: |
- The core plugin for Neutron. The value should be the entrypoint to be loaded
- from neutron.core_plugins namespace.
- type: string
- NeutronServicePlugins:
- default: "router"
- description: |
- Comma-separated list of service plugin entrypoints to be loaded from the
- neutron.service_plugins namespace.
- type: comma_delimited_list
- NeutronTypeDrivers:
- default: "vxlan,vlan,flat,gre"
- description: |
- Comma-separated list of network type driver entrypoints to be loaded.
- type: comma_delimited_list
- NeutronMechanismDrivers:
- default: 'openvswitch'
- description: |
- The mechanism drivers for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'openvswitch,l2_population'
- type: string
- NeutronAllowL3AgentFailover:
- default: 'True'
- description: Allow automatic l3-agent failover
- type: string
- NeutronL3HA:
- default: 'False'
- description: Whether to enable l3-agent HA
- type: string
- NeutronDhcpAgentsPerNetwork:
- type: number
- default: 3
- description: The number of neutron dhcp agents to schedule per network
- NovaPassword:
- default: unset
- description: The password for the nova service account, used by nova-api.
- type: string
- hidden: true
- NtpServer:
- type: string
- default: ''
- MongoDbNoJournal:
- default: false
- description: Should MongoDb journaling be disabled
- type: boolean
- PublicVirtualFixedIPs:
- default: []
- description: >
- Control the IP allocation for the PublicVirtualInterface port. E.g.
- [{'ip_address':'1.2.3.4'}]
- type: json
- RabbitCookieSalt:
- type: string
- default: unset
- description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
- # FIXME: 'guest' is provisioned in RabbitMQ by default, we should create a user if these are changed
- RabbitUserName:
- default: guest
- description: The username for RabbitMQ
- type: string
- RabbitPassword:
- default: guest
- description: The password for RabbitMQ
- type: string
- hidden: true
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
- # We need to set this as string because 'unlimited' is a valid setting
- RabbitFDLimit:
- default: 16384
- description: Configures RabbitMQ FD limit
- type: string
- SnmpdReadonlyUserName:
- default: ro_snmp_user
- description: The user name for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- SnmpdReadonlyUserPassword:
- default: unset
- description: The user password for SNMPd with readonly rights running on all Overcloud nodes
- type: string
- hidden: true
-
- # Controller-specific params
- AdminToken:
- default: unset
- description: The keystone auth secret.
- type: string
- hidden: true
- CinderLVMLoopDeviceSize:
- default: 5000
- description: The size of the loopback file used by the cinder LVM driver.
- type: number
- CinderNfsMountOptions:
- default: ''
- description: >
- Mount options for NFS mounts used by Cinder NFS backend. Effective
- when CinderEnableNfsBackend is true.
- type: string
- CinderNfsServers:
- default: ''
- description: >
- NFS servers used by Cinder NFS backend. Effective when
- CinderEnableNfsBackend is true.
- type: comma_delimited_list
- CinderPassword:
- default: unset
- description: The password for the cinder service account, used by cinder-api.
- type: string
- hidden: true
- CinderISCSIHelper:
- default: tgtadm
- description: The iSCSI helper to use with cinder.
- type: string
- ControllerCount:
- type: number
- default: 1
- constraints:
- - range: {min: 1}
- controllerExtraConfig:
- default: {}
- description: |
- Controller specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
- controllerImage:
- type: string
- default: overcloud-control
- constraints:
- - custom_constraint: glance.image
- OvercloudControlFlavor:
- description: Flavor for control nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- ControlVirtualInterface:
- default: 'br-ex'
- description: Interface where virtual ip will be assigned.
- type: string
- EnableFencing:
- default: false
- description: Whether to enable fencing in Pacemaker or not.
- type: boolean
- EnableGalera:
- default: true
- description: Whether to use Galera instead of regular MariaDB.
- type: boolean
- ControllerEnableCephStorage:
- default: false
- description: Whether to deploy Ceph Storage (OSD) on the Controller
- type: boolean
- ControllerEnableSwiftStorage:
- default: true
- description: Whether to enable Swift Storage on the Controller
- type: boolean
- ExtraConfig:
- default: {}
- description: |
- Additional configuration to inject into the cluster. The format required
- may be implementation specific, e.g puppet hieradata. Any role specific
- ExtraConfig, e.g controllerExtraConfig takes precedence over ExtraConfig.
- type: json
- FencingConfig:
- default: {}
- description: |
- Pacemaker fencing configuration. The JSON should have
- the following structure:
- {
- "devices": [
- {
- "agent": "AGENT_NAME",
- "host_mac": "HOST_MAC_ADDRESS",
- "params": {"PARAM_NAME": "PARAM_VALUE"}
- }
- ]
- }
- For instance:
- {
- "devices": [
- {
- "agent": "fence_xvm",
- "host_mac": "52:54:00:aa:bb:cc",
- "params": {
- "multicast_address": "225.0.0.12",
- "port": "baremetal_0",
- "manage_fw": true,
- "manage_key_file": true,
- "key_file": "/etc/fence_xvm.key",
- "key_file_password": "abcdef"
- }
- }
- ]
- }
- type: json
- GlanceLogFile:
- description: The filepath of the file to use for logging messages from Glance.
- type: string
- default: ''
- GlanceNotifierStrategy:
- description: Strategy to use for Glance notification queue
- type: string
- default: noop
- GlancePassword:
- default: unset
- description: The password for the glance service account, used by the glance services.
- type: string
- hidden: true
- GlanceBackend:
- default: swift
- description: The short name of the Glance backend to use. Should be one
- of swift, rbd or file
- type: string
- constraints:
- - allowed_values: ['swift', 'file', 'rbd']
- HeatPassword:
- default: unset
- description: The password for the Heat service account, used by the Heat services.
- type: string
- hidden: true
- HeatStackDomainAdminPassword:
- description: Password for heat_domain_admin user.
- type: string
- default: ''
- hidden: true
- KeystoneCACertificate:
- default: ''
- description: Keystone self-signed certificate authority certificate.
- type: string
- KeystoneSigningCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSigningKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- KeystoneSSLCertificate:
- default: ''
- description: Keystone certificate for verifying token validity.
- type: string
- KeystoneSSLCertificateKey:
- default: ''
- description: Keystone key for signing tokens.
- type: string
- hidden: true
- KeystoneNotificationDriver:
- description: Comma-separated list of Oslo notification drivers used by Keystone
- default: ['messaging']
- type: comma_delimited_list
- KeystoneNotificationFormat:
- description: The Keystone notification format
- default: 'basic'
- type: string
- constraints:
- - allowed_values: [ 'basic', 'cadf' ]
- MysqlInnodbBufferPoolSize:
- description: >
- Specifies the size of the buffer pool in megabytes. Setting to
- zero should be interpreted as "no value" and will defer to the
- lower level default.
- type: number
- default: 0
- MysqlMaxConnections:
- description: Configures MySQL max_connections config setting
- type: number
- default: 4096
- NeutronDnsmasqOptions:
- default: 'dhcp-option-force=26,1400'
- description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the tunnel overhead.
- type: string
- NeutronPublicInterfaceDefaultRoute:
- default: ''
- description: A custom default route for the NeutronPublicInterface.
- type: string
- NeutronPublicInterfaceIP:
- default: ''
- description: A custom IP address to put onto the NeutronPublicInterface.
- type: string
- NeutronPublicInterfaceRawDevice:
- default: ''
- description: If set, the public interface is a vlan with this device as the raw device.
- type: string
- PublicVirtualInterface:
- default: 'br-ex'
- description: >
- Specifies the interface where the public-facing virtual ip will be assigned.
- This should be int_public when a VLAN is being used.
- type: string
- SSLCertificate:
- default: ''
- description: If set, the contents of an SSL certificate .crt file for encrypting SSL endpoints.
- type: string
- hidden: true
- SSLKey:
- default: ''
- description: If set, the contents of an SSL certificate .key file for encrypting SSL endpoints.
- type: string
- hidden: true
- SSLCACertificate:
- default: ''
- description: If set, the contents of an SSL certificate authority file.
- type: string
- SwiftHashSuffix:
- default: unset
- description: A random string to be used as a salt when hashing to determine mappings in the ring.
- type: string
- hidden: true
- SwiftPassword:
- default: unset
- description: The password for the swift service account, used by the swift proxy services.
- type: string
- hidden: true
- SwiftMountCheck:
- default: 'false'
- description: Value of mount_check in Swift account/container/object -server.conf
- type: boolean
- SwiftMinPartHours:
- type: number
- default: 1
- description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance.
- SwiftPartPower:
- default: 10
- description: Partition Power to use when building Swift rings
- type: number
- SwiftReplicas:
- type: number
- default: 3
- description: How many replicas to use in the swift rings.
-
-# Compute-specific params
- CeilometerComputeAgent:
- description: Indicates whether the Compute agent is present and expects nova-compute to be configured accordingly
- type: string
- default: ''
- constraints:
- - allowed_values: ['', Present]
- ComputeCount:
- type: number
- default: 1
- HypervisorNeutronPhysicalBridge:
- default: 'br-ex'
- description: >
- An OVS bridge to create on each hypervisor. This defaults to br-ex the
- same as the control plane nodes, as we have a uniform configuration of
- the openvswitch agent. Typically should not need to be changed.
- type: string
- HypervisorNeutronPublicInterface:
- default: nic1
- description: What interface to add to the HypervisorNeutronPhysicalBridge.
- type: string
- NeutronNetworkVLANRanges:
- default: 'datacentre'
- description: >
- The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
- Neutron documentation for permitted values. Defaults to permitting any
- VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
- type: comma_delimited_list
- NovaComputeDriver:
- type: string
- default: libvirt.LibvirtDriver
- NovaComputeExtraConfig:
- default: {}
- description: |
- NovaCompute specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
- NovaComputeLibvirtType:
- default: ''
- type: string
- NovaEnableRbdBackend:
- default: false
- description: Whether to enable or not the Rbd backend for Nova
- type: boolean
- NovaImage:
- type: string
- default: overcloud-compute
- constraints:
- - custom_constraint: glance.image
- OvercloudComputeFlavor:
- description: Use this flavor
- type: string
- constraints:
- - custom_constraint: nova.flavor
- ServiceNetMap:
- default:
- NeutronTenantNetwork: tenant
- CeilometerApiNetwork: internal_api
- MongoDbNetwork: internal_api
- CinderApiNetwork: internal_api
- CinderIscsiNetwork: storage
- GlanceApiNetwork: storage
- GlanceRegistryNetwork: internal_api
- KeystoneAdminApiNetwork: ctlplane # allows undercloud to config endpoints
- KeystonePublicApiNetwork: internal_api
- NeutronApiNetwork: internal_api
- HeatApiNetwork: internal_api
- NovaApiNetwork: internal_api
- NovaMetadataNetwork: internal_api
- NovaVncProxyNetwork: internal_api
- SwiftMgmtNetwork: storage_mgmt
- SwiftProxyNetwork: storage
- HorizonNetwork: internal_api
- MemcachedNetwork: internal_api
- RabbitMqNetwork: internal_api
- RedisNetwork: internal_api
- MysqlNetwork: internal_api
- CephClusterNetwork: storage_mgmt
- CephPublicNetwork: storage
- ControllerHostnameResolveNetwork: internal_api
- ComputeHostnameResolveNetwork: internal_api
- BlockStorageHostnameResolveNetwork: internal_api
- ObjectStorageHostnameResolveNetwork: internal_api
- CephStorageHostnameResolveNetwork: storage
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-# Block storage specific parameters
- BlockStorageCount:
- type: number
- default: 0
- BlockStorageImage:
- default: overcloud-cinder-volume
- type: string
- OvercloudBlockStorageFlavor:
- description: Flavor for block storage nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- BlockStorageExtraConfig:
- default: {}
- description: |
- BlockStorage specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
-
-# Object storage specific parameters
- ObjectStorageCount:
- type: number
- default: 0
- OvercloudSwiftStorageFlavor:
- description: Flavor for Swift storage nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- SwiftStorageImage:
- default: overcloud-swift-storage
- type: string
- ObjectStorageExtraConfig:
- default: {}
- description: |
- ObjectStorage specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
-
-
-# Ceph storage specific parameters
- CephStorageCount:
- type: number
- default: 0
- CephStorageImage:
- default: overcloud-ceph-storage
- type: string
- OvercloudCephStorageFlavor:
- default: baremetal
- description: Flavor for Ceph storage nodes to request when deploying.
- type: string
- constraints:
- - custom_constraint: nova.flavor
- CephStorageExtraConfig:
- default: {}
- description: |
- CephStorage specific configuration to inject into the cluster. Same
- structure as ExtraConfig.
- type: json
-
- # Hostname format for each role
- # Note %index% is translated into the index of the node, e.g 0/1/2 etc
- # and %stackname% is replaced with OS::stack_name in the template below.
- # If you want to use the heat generated names, pass '' (empty string).
- ControllerHostnameFormat:
- type: string
- description: Format for Controller node hostnames
- default: '%stackname%-controller-%index%'
- ComputeHostnameFormat:
- type: string
- description: Format for Compute node hostnames
- default: '%stackname%-novacompute-%index%'
- BlockStorageHostnameFormat:
- type: string
- description: Format for BlockStorage node hostnames
- default: '%stackname%-blockstorage-%index%'
- ObjectStorageHostnameFormat:
- type: string
- description: Format for SwiftStorage node hostnames
- default: '%stackname%-objectstorage-%index%'
- CephStorageHostnameFormat:
- type: string
- description: Format for CephStorage node hostnames
- default: '%stackname%-cephstorage-%index%'
-
- # Identifiers to trigger tasks on nodes
- UpdateIdentifier:
- default: ''
- type: string
- description: >
- Setting to a previously unused value during stack-update will trigger
- package update on all nodes
-
- # If you want to remove a specific node from a resource group, you can pass
- # the node name or id as a <Group>RemovalPolicies parameter, for example:
- # ComputeRemovalPolicies: [{'resource_list': ['0']}]
- ControllerRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from ControllerResourceGroup when
- doing an update which requires removal of specific resources.
- ComputeRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from ComputeResourceGroup when
- doing an update which requires removal of specific resources.
- BlockStorageRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from BlockStorageResourceGroup when
- doing an update which requires removal of specific resources.
- ObjectStorageRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from ObjectStorageResourceGroup when
- doing an update which requires removal of specific resources.
- CephStorageRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from CephStorageResourceGroup when
- doing an update which requires removal of specific resources.
-
-
-resources:
-
- HeatAuthEncryptionKey:
- type: OS::Heat::RandomString
-
- PcsdPassword:
- type: OS::Heat::RandomString
- properties:
- length: 16
-
- HorizonSecret:
- type: OS::Heat::RandomString
- properties:
- length: 10
-
- Controller:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: ControllerCount}
- removal_policies: {get_param: ControllerRemovalPolicies}
- resource_def:
- type: OS::TripleO::Controller
- properties:
- AdminPassword: {get_param: AdminPassword}
- AdminToken: {get_param: AdminToken}
- CeilometerBackend: {get_param: CeilometerBackend}
- CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret}
- CeilometerPassword: {get_param: CeilometerPassword}
- CinderLVMLoopDeviceSize: {get_param: CinderLVMLoopDeviceSize}
- CinderNfsMountOptions: {get_param: CinderNfsMountOptions}
- CinderNfsServers: {get_param: CinderNfsServers}
- CinderPassword: {get_param: CinderPassword}
- CinderISCSIHelper: {get_param: CinderISCSIHelper}
- CinderEnableNfsBackend: {get_param: CinderEnableNfsBackend}
- CinderEnableIscsiBackend: {get_param: CinderEnableIscsiBackend}
- CinderEnableRbdBackend: {get_param: CinderEnableRbdBackend}
- CloudName: {get_param: CloudName}
- ControlVirtualInterface: {get_param: ControlVirtualInterface}
- ControllerExtraConfig: {get_param: controllerExtraConfig}
- Debug: {get_param: Debug}
- EnableFencing: {get_param: EnableFencing}
- EnableGalera: {get_param: EnableGalera}
- EnableCephStorage: {get_param: ControllerEnableCephStorage}
- EnableSwiftStorage: {get_param: ControllerEnableSwiftStorage}
- ExtraConfig: {get_param: ExtraConfig}
- FencingConfig: {get_param: FencingConfig}
- Flavor: {get_param: OvercloudControlFlavor}
- GlancePort: {get_param: GlancePort}
- GlanceProtocol: {get_param: GlanceProtocol}
- GlancePassword: {get_param: GlancePassword}
- GlanceBackend: {get_param: GlanceBackend}
- GlanceNotifierStrategy: {get_param: GlanceNotifierStrategy}
- GlanceLogFile: {get_param: GlanceLogFile}
- HAProxySyslogAddress: {get_param: HAProxySyslogAddress}
- HeatPassword: {get_param: HeatPassword}
- HeatStackDomainAdminPassword: {get_param: HeatStackDomainAdminPassword}
- HeatAuthEncryptionKey: {get_resource: HeatAuthEncryptionKey}
- HorizonAllowedHosts: {get_param: HorizonAllowedHosts}
- HorizonSecret: {get_resource: HorizonSecret}
- Image: {get_param: controllerImage}
- ImageUpdatePolicy: {get_param: ImageUpdatePolicy}
- KeyName: {get_param: KeyName}
- KeystoneCACertificate: {get_param: KeystoneCACertificate}
- KeystoneSigningCertificate: {get_param: KeystoneSigningCertificate}
- KeystoneSigningKey: {get_param: KeystoneSigningKey}
- KeystoneSSLCertificate: {get_param: KeystoneSSLCertificate}
- KeystoneSSLCertificateKey: {get_param: KeystoneSSLCertificateKey}
- KeystoneNotificationDriver: {get_param: KeystoneNotificationDriver}
- KeystoneNotificationFormat: {get_param: KeystoneNotificationFormat}
- MysqlClusterUniquePart: {get_attr: [MysqlClusterUniquePart, value]}
- MysqlInnodbBufferPoolSize: {get_param: MysqlInnodbBufferPoolSize}
- MysqlMaxConnections: {get_param: MysqlMaxConnections}
- MysqlRootPassword: {get_attr: [MysqlRootPassword, value]}
- NeutronPublicInterfaceIP: {get_param: NeutronPublicInterfaceIP}
- NeutronFlatNetworks: {get_param: NeutronFlatNetworks}
- NeutronBridgeMappings: {get_param: NeutronBridgeMappings}
- NeutronExternalNetworkBridge: {get_param: NeutronExternalNetworkBridge}
- NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling}
- NeutronNetworkVLANRanges: {get_param: NeutronNetworkVLANRanges}
- NeutronPublicInterface: {get_param: NeutronPublicInterface}
- NeutronPublicInterfaceDefaultRoute: {get_param: NeutronPublicInterfaceDefaultRoute}
- NeutronPublicInterfaceRawDevice: {get_param: NeutronPublicInterfaceRawDevice}
- NeutronPassword: {get_param: NeutronPassword}
- NeutronDnsmasqOptions: {get_param: NeutronDnsmasqOptions}
- NeutronDVR: {get_param: NeutronDVR}
- NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret}
- NeutronAgentMode: {get_param: NeutronAgentMode}
- NeutronCorePlugin: {get_param: NeutronCorePlugin}
- NeutronServicePlugins: {get_param: NeutronServicePlugins}
- NeutronTypeDrivers: {get_param: NeutronTypeDrivers}
- NeutronMechanismDrivers: {get_param: NeutronMechanismDrivers}
- NeutronAllowL3AgentFailover: {get_param: NeutronAllowL3AgentFailover}
- NeutronL3HA: {get_param: NeutronL3HA}
- NeutronDhcpAgentsPerNetwork: {get_param: NeutronDhcpAgentsPerNetwork}
- NeutronNetworkType: {get_param: NeutronNetworkType}
- NeutronTunnelTypes: {get_param: NeutronTunnelTypes}
- NovaPassword: {get_param: NovaPassword}
- NtpServer: {get_param: NtpServer}
- MongoDbNoJournal: {get_param: MongoDbNoJournal}
- PcsdPassword: {get_resource: PcsdPassword}
- PublicVirtualInterface: {get_param: PublicVirtualInterface}
- RabbitPassword: {get_param: RabbitPassword}
- RabbitUserName: {get_param: RabbitUserName}
- RabbitCookie: {get_attr: [RabbitCookie, value]}
- RabbitClientUseSSL: {get_param: RabbitClientUseSSL}
- RabbitClientPort: {get_param: RabbitClientPort}
- RabbitFDLimit: {get_param: RabbitFDLimit}
- SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName}
- SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword}
- RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
- SSLCertificate: {get_param: SSLCertificate}
- SSLKey: {get_param: SSLKey}
- SSLCACertificate: {get_param: SSLCACertificate}
- SwiftHashSuffix: {get_param: SwiftHashSuffix}
- SwiftMountCheck: {get_param: SwiftMountCheck}
- SwiftMinPartHours: {get_param: SwiftMinPartHours}
- SwiftPartPower: {get_param: SwiftPartPower}
- SwiftPassword: {get_param: SwiftPassword}
- SwiftReplicas: { get_param: SwiftReplicas}
- VirtualIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} # deprecated. Use per service VIP settings instead now.
- PublicVirtualIP: {get_attr: [PublicVirtualIP, ip_address]}
- ServiceNetMap: {get_param: ServiceNetMap}
- CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
- CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
- HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
- GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
- GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
- NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
- SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
- MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
- KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
- KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
- NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
- UpdateIdentifier: {get_param: UpdateIdentifier}
- Hostname:
- str_replace:
- template: {get_param: ControllerHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
-
- Compute:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: ComputeCount}
- removal_policies: {get_param: ComputeRemovalPolicies}
- resource_def:
- type: OS::TripleO::Compute
- properties:
- AdminPassword: {get_param: AdminPassword}
- CeilometerComputeAgent: {get_param: CeilometerComputeAgent}
- CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret}
- CeilometerPassword: {get_param: CeilometerPassword}
- CinderEnableNfsBackend: {get_param: CinderEnableNfsBackend}
- CinderEnableRbdBackend: {get_param: CinderEnableRbdBackend}
- Debug: {get_param: Debug}
- ExtraConfig: {get_param: ExtraConfig}
- Flavor: {get_param: OvercloudComputeFlavor}
- GlanceHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
- GlancePort: {get_param: GlancePort}
- GlanceProtocol: {get_param: GlanceProtocol}
- Image: {get_param: NovaImage}
- ImageUpdatePolicy: {get_param: ImageUpdatePolicy}
- KeyName: {get_param: KeyName}
- KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
- KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
- NeutronBridgeMappings: {get_param: NeutronBridgeMappings}
- NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling}
- NeutronFlatNetworks: {get_param: NeutronFlatNetworks}
- NeutronHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
- NeutronNetworkType: {get_param: NeutronNetworkType}
- NeutronTunnelTypes: {get_param: NeutronTunnelTypes}
- NeutronNetworkVLANRanges: {get_param: NeutronNetworkVLANRanges}
- NeutronPassword: {get_param: NeutronPassword}
- NeutronPhysicalBridge: {get_param: HypervisorNeutronPhysicalBridge}
- NeutronPublicInterface: {get_param: HypervisorNeutronPublicInterface}
- NeutronDVR: {get_param: NeutronDVR}
- NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret}
- NeutronAgentMode: {get_param: NeutronComputeAgentMode}
- NeutronPublicInterfaceRawDevice: {get_param: NeutronPublicInterfaceRawDevice}
- NeutronCorePlugin: {get_param: NeutronCorePlugin}
- NeutronServicePlugins: {get_param: NeutronServicePlugins}
- NeutronTypeDrivers: {get_param: NeutronTypeDrivers}
- NeutronMechanismDrivers: {get_param: NeutronMechanismDrivers}
- # L3 HA and Failover is not relevant for Computes, should be removed
- NeutronAllowL3AgentFailover: {get_param: NeutronAllowL3AgentFailover}
- NeutronL3HA: {get_param: NeutronL3HA}
- NovaApiHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
- NovaComputeDriver: {get_param: NovaComputeDriver}
- NovaComputeExtraConfig: {get_param: NovaComputeExtraConfig}
- NovaComputeLibvirtType: {get_param: NovaComputeLibvirtType}
- NovaEnableRbdBackend: {get_param: NovaEnableRbdBackend}
- NovaPublicIP: {get_attr: [PublicVirtualIP, ip_address]}
- NovaPassword: {get_param: NovaPassword}
- NtpServer: {get_param: NtpServer}
- RabbitHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
- RabbitPassword: {get_param: RabbitPassword}
- RabbitUserName: {get_param: RabbitUserName}
- RabbitClientUseSSL: {get_param: RabbitClientUseSSL}
- RabbitClientPort: {get_param: RabbitClientPort}
- SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName}
- SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword}
- ServiceNetMap: {get_param: ServiceNetMap}
- UpdateIdentifier: {get_param: UpdateIdentifier}
- Hostname:
- str_replace:
- template: {get_param: ComputeHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
-
- BlockStorage:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: BlockStorageCount}
- removal_policies: {get_param: BlockStorageRemovalPolicies}
- resource_def:
- type: OS::TripleO::BlockStorage
- properties:
- Debug: {get_param: Debug}
- Image: {get_param: BlockStorageImage}
- CinderISCSIHelper: {get_param: CinderISCSIHelper}
- CinderLVMLoopDeviceSize: {get_param: CinderLVMLoopDeviceSize}
- # Purpose of the dedicated BlockStorage nodes should be to use their local LVM
- CinderEnableIscsiBackend: {get_param: CinderEnableIscsiBackend}
- CinderPassword: {get_param: CinderPassword}
- KeyName: {get_param: KeyName}
- Flavor: {get_param: OvercloudBlockStorageFlavor}
- VirtualIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- GlancePort: {get_param: GlancePort}
- GlanceProtocol: {get_param: GlanceProtocol}
- GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
- RabbitPassword: {get_param: RabbitPassword}
- RabbitUserName: {get_param: RabbitUserName}
- RabbitClientUseSSL: {get_param: RabbitClientUseSSL}
- RabbitClientPort: {get_param: RabbitClientPort}
- NtpServer: {get_param: NtpServer}
- UpdateIdentifier: {get_param: UpdateIdentifier}
- Hostname:
- str_replace:
- template: {get_param: BlockStorageHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- ServiceNetMap: {get_param: ServiceNetMap}
- MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
- ExtraConfig: {get_param: ExtraConfig}
- BlockStorageExtraConfig: {get_param: BlockStorageExtraConfig}
-
- ObjectStorage:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: ObjectStorageCount}
- removal_policies: {get_param: ObjectStorageRemovalPolicies}
- resource_def:
- type: OS::TripleO::ObjectStorage
- properties:
- KeyName: {get_param: KeyName}
- Flavor: {get_param: OvercloudSwiftStorageFlavor}
- HashSuffix: {get_param: SwiftHashSuffix}
- MountCheck: {get_param: SwiftMountCheck}
- MinPartHours: {get_param: SwiftMinPartHours}
- PartPower: {get_param: SwiftPartPower}
- Image: {get_param: SwiftStorageImage}
- Replicas: { get_param: SwiftReplicas}
- NtpServer: {get_param: NtpServer}
- UpdateIdentifier: {get_param: UpdateIdentifier}
- ServiceNetMap: {get_param: ServiceNetMap}
- Hostname:
- str_replace:
- template: {get_param: ObjectStorageHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- ExtraConfig: {get_param: ExtraConfig}
- ObjectStorageExtraConfig: {get_param: ObjectStorageExtraConfig}
-
- CephStorage:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: CephStorageCount}
- removal_policies: {get_param: CephStorageRemovalPolicies}
- resource_def:
- type: OS::TripleO::CephStorage
- properties:
- Image: {get_param: CephStorageImage}
- KeyName: {get_param: KeyName}
- Flavor: {get_param: OvercloudCephStorageFlavor}
- NtpServer: {get_param: NtpServer}
- ServiceNetMap: {get_param: ServiceNetMap}
- UpdateIdentifier: {get_param: UpdateIdentifier}
- Hostname:
- str_replace:
- template: {get_param: CephStorageHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- ExtraConfig: {get_param: ExtraConfig}
- CephStorageExtraConfig: {get_param: CephStorageExtraConfig}
-
- ControllerIpListMap:
- type: OS::TripleO::Network::Ports::NetIpListMap
- properties:
- ControlPlaneIpList: {get_attr: [Controller, ip_address]}
- ExternalIpList: {get_attr: [Controller, external_ip_address]}
- InternalApiIpList: {get_attr: [Controller, internal_api_ip_address]}
- StorageIpList: {get_attr: [Controller, storage_ip_address]}
- StorageMgmtIpList: {get_attr: [Controller, storage_mgmt_ip_address]}
- TenantIpList: {get_attr: [Controller, tenant_ip_address]}
-
- allNodesConfig:
- type: OS::TripleO::AllNodes::SoftwareConfig
- properties:
- compute_hosts: {get_attr: [Compute, hosts_entry]}
- controller_hosts: {get_attr: [Controller, hosts_entry]}
- controller_ips: {get_attr: [Controller, ip_address]}
- block_storage_hosts: {get_attr: [BlockStorage, hosts_entry]}
- object_storage_hosts: {get_attr: [ObjectStorage, hosts_entry]}
- ceph_storage_hosts: {get_attr: [CephStorage, hosts_entry]}
- controller_names: {get_attr: [Controller, hostname]}
- rabbit_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
- mongo_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, MongoDbNetwork]}]}
- redis_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]}
- memcache_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
- mysql_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
- horizon_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
- heat_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
- swift_proxy_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
- ceilometer_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
- nova_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
- nova_metadata_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
- glance_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
- glance_registry_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
- cinder_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
- neutron_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
- keystone_public_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
- keystone_admin_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
-
- MysqlRootPassword:
- type: OS::Heat::RandomString
- properties:
- length: 10
-
- MysqlClusterUniquePart:
- type: OS::Heat::RandomString
- properties:
- length: 10
-
- RabbitCookie:
- type: OS::Heat::RandomString
- properties:
- length: 20
- salt: {get_param: RabbitCookieSalt}
-
- # creates the network architecture
- Networks:
- type: OS::TripleO::Network
-
- ControlVirtualIP:
- type: OS::Neutron::Port
- depends_on: Networks
- properties:
- name: control_virtual_ip
- network: {get_param: NeutronControlPlaneID}
- fixed_ips: {get_param: ControlFixedIPs}
- replacement_policy: AUTO
-
- RedisVirtualIP:
- depends_on: Networks
- type: OS::TripleO::Controller::Ports::RedisVipPort
- properties:
- ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
- PortName: redis_virtual_ip
- NetworkName: {get_param: [ServiceNetMap, RedisNetwork]}
-
- # The public VIP is on the External net, falls back to ctlplane
- PublicVirtualIP:
- depends_on: Networks
- type: OS::TripleO::Controller::Ports::ExternalPort
- properties:
- ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
- PortName: public_virtual_ip
- FixedIPs: {get_param: PublicVirtualFixedIPs}
-
- InternalApiVirtualIP:
- depends_on: Networks
- type: OS::TripleO::Controller::Ports::InternalApiPort
- properties:
- ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- PortName: internal_api_virtual_ip
-
- StorageVirtualIP:
- depends_on: Networks
- type: OS::TripleO::Controller::Ports::StoragePort
- properties:
- ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- PortName: storage_virtual_ip
-
- StorageMgmtVirtualIP:
- depends_on: Networks
- type: OS::TripleO::Controller::Ports::StorageMgmtPort
- properties:
- ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- PortName: storage_management_virtual_ip
-
- VipMap:
- type: OS::TripleO::Network::Ports::NetIpMap
- properties:
- ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- ExternalIp: {get_attr: [PublicVirtualIP, ip_address]}
- InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]}
- StorageIp: {get_attr: [StorageVirtualIP, ip_address]}
- StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]}
- # No tenant VIP required
-
- VipConfig:
- type: OS::TripleO::VipConfig
-
- VipDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_resource: VipConfig}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
- input_values:
- # service VIP mappings
- keystone_admin_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
- keystone_public_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
- neutron_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
- cinder_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
- glance_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
- glance_registry_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
- swift_proxy_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
- nova_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
- nova_metadata_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
- ceilometer_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
- heat_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
- horizon_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
- redis_vip: {get_attr: [RedisVirtualIP, ip_address]}
- mysql_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
- rabbit_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
- # direct configuration of Virtual IPs for each network
- control_virtual_ip: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- public_virtual_ip: {get_attr: [PublicVirtualIP, ip_address]}
- internal_api_virtual_ip: {get_attr: [InternalApiVirtualIP, ip_address]}
- storage_virtual_ip: {get_attr: [StorageVirtualIP, ip_address]}
- storage_mgmt_virtual_ip: {get_attr: [StorageMgmtVirtualIP, ip_address]}
-
- ControllerBootstrapNodeConfig:
- type: OS::TripleO::BootstrapNode::SoftwareConfig
- properties:
- bootstrap_nodeid: {get_attr: [Controller, resource.0.hostname]}
- bootstrap_nodeid_ip: {get_attr: [Controller, resource.0.ip_address]}
-
- ControllerBootstrapNodeDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [ControllerBootstrapNodeConfig, config_id]}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
-
- ControllerSwiftDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [SwiftDevicesAndProxyConfig, config_id]}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
-
- ObjectStorageSwiftDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [SwiftDevicesAndProxyConfig, config_id]}
- servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
-
- SwiftDevicesAndProxyConfig:
- type: OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig
- properties:
- controller_swift_devices: {get_attr: [Controller, swift_device]}
- object_store_swift_devices: {get_attr: [ObjectStorage, swift_device]}
- controller_swift_proxy_memcaches: {get_attr: [Controller, swift_proxy_memcache]}
-
- ComputeCephDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [CephClusterConfig, config_id]}
- servers: {get_attr: [Compute, attributes, nova_server_resource]}
-
- ControllerCephDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [CephClusterConfig, config_id]}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
-
- CephStorageCephDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [CephClusterConfig, config_id]}
- servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
-
- CephClusterConfig:
- type: OS::TripleO::CephClusterConfig::SoftwareConfig
- properties:
- ceph_storage_count: {get_param: CephStorageCount}
- ceph_fsid: {get_param: CephClusterFSID}
- ceph_mon_key: {get_param: CephMonKey}
- ceph_admin_key: {get_param: CephAdminKey}
- ceph_client_key: {get_param: CephClientKey}
- ceph_external_mon_ips: {get_param: CephExternalMonHost}
- ceph_mon_names: {get_attr: [Controller, hostname]}
- ceph_mon_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]}
-
- ControllerClusterConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: os-apply-config
- config:
- corosync:
- nodes: {get_attr: [Controller, corosync_node]}
- horizon:
- caches:
- memcached:
- nodes: {get_attr: [Controller, hostname]}
- mysql:
- nodes: {get_attr: [Controller, corosync_node]}
- haproxy:
- nodes: {get_attr: [Controller, corosync_node]}
-
- ControllerClusterDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_resource: ControllerClusterConfig}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
-
- ControllerAllNodesDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
-
- ComputeAllNodesDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [Compute, attributes, nova_server_resource]}
-
- BlockStorageAllNodesDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
-
- ObjectStorageAllNodesDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
-
- CephStorageAllNodesDeployment:
- type: OS::Heat::StructuredDeployments
- properties:
- config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
-
- # All Nodes Validations
- AllNodesValidationConfig:
- type: OS::TripleO::AllNodes::Validation
- properties:
- PingTestIps:
- list_join:
- - ' '
- - - {get_attr: [Controller, resource.0.external_ip_address]}
- - {get_attr: [Controller, resource.0.internal_api_ip_address]}
- - {get_attr: [Controller, resource.0.storage_ip_address]}
- - {get_attr: [Controller, resource.0.storage_mgmt_ip_address]}
- - {get_attr: [Controller, resource.0.tenant_ip_address]}
-
- ControllerAllNodesValidationDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: ControllerAllNodesDeployment
- properties:
- config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
-
- ComputeAllNodesValidationDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: ComputeAllNodesDeployment
- properties:
- config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [Compute, attributes, nova_server_resource]}
-
- BlockStorageAllNodesValidationDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: BlockStorageAllNodesDeployment
- properties:
- config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
-
- ObjectStorageAllNodesValidationDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: ObjectStorageAllNodesDeployment
- properties:
- config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
-
- CephStorageAllNodesValidationDeployment:
- type: OS::Heat::StructuredDeployments
- depends_on: CephStorageAllNodesDeployment
- properties:
- config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
-
- # Optional ExtraConfig for all nodes - all roles are passed in here, but
- # the nested template may configure each role differently (or not at all)
- AllNodesExtraConfig:
- type: OS::TripleO::AllNodesExtraConfig
- properties:
- controller_servers: {get_attr: [Controller, attributes, nova_server_resource]}
- compute_servers: {get_attr: [Compute, attributes, nova_server_resource]}
- blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
- objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
- cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
-
- # Nested stack deployment runs after all other controller deployments
- ControllerNodesPostDeployment:
- type: OS::TripleO::ControllerPostDeployment
- depends_on: [ControllerBootstrapNodeDeployment, ControllerAllNodesDeployment, ControllerSwiftDeployment, ControllerCephDeployment]
- properties:
- servers: {get_attr: [Controller, attributes, nova_server_resource]}
- NodeConfigIdentifiers:
- allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
- controller_config: {get_attr: [Controller, attributes, config_identifier]}
-
- ComputeNodesPostDeployment:
- type: OS::TripleO::ComputePostDeployment
- depends_on: [ComputeAllNodesDeployment, ComputeCephDeployment]
- properties:
- servers: {get_attr: [Compute, attributes, nova_server_resource]}
- NodeConfigIdentifiers:
- allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
- compute_config: {get_attr: [Compute, attributes, config_identifier]}
-
- ObjectStorageNodesPostDeployment:
- type: OS::TripleO::ObjectStoragePostDeployment
- depends_on: [ObjectStorageSwiftDeployment, ObjectStorageAllNodesDeployment]
- properties:
- servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
- NodeConfigIdentifiers:
- allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
- objectstorage_config: {get_attr: [ObjectStorage, attributes, config_identifier]}
-
- BlockStorageNodesPostDeployment:
- type: OS::TripleO::BlockStoragePostDeployment
- depends_on: [ControllerNodesPostDeployment, BlockStorageAllNodesDeployment]
- properties:
- servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
- NodeConfigIdentifiers:
- allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
- blockstorage_config: {get_attr: [BlockStorage, attributes, config_identifier]}
-
- CephStorageNodesPostDeployment:
- type: OS::TripleO::CephStoragePostDeployment
- depends_on: [ControllerNodesPostDeployment, CephStorageCephDeployment, CephStorageAllNodesDeployment]
- properties:
- servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
- NodeConfigIdentifiers:
- allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
- cephstorage_config: {get_attr: [CephStorage, attributes, config_identifier]}
-
-outputs:
- KeystoneURL:
- description: URL for the Overcloud Keystone service
- value:
- list_join:
- - ''
- - - http://
- - {get_attr: [PublicVirtualIP, ip_address]}
- - :5000/v2.0/
- KeystoneAdminVip:
- description: Keystone Admin VIP endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
- PublicVip:
- description: Controller VIP for public API endpoints
- value: {get_attr: [PublicVirtualIP, ip_address]}
- CeilometerInternalVip:
- description: VIP for Ceilometer API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
- CinderInternalVip:
- description: VIP for Cinder API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
- GlanceInternalVip:
- description: VIP for Glance API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
- HeatInternalVip:
- description: VIP for Heat API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
- KeystoneInternalVip:
- description: VIP for Keystone API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
- NeutronInternalVip:
- description: VIP for Neutron API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
- NovaInternalVip:
- description: VIP for Nova API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
- SwiftInternalVip:
- description: VIP for Swift Proxy internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
+overcloud.yaml \ No newline at end of file
diff --git a/overcloud.yaml b/overcloud.yaml
new file mode 100644
index 00000000..738dcfb6
--- /dev/null
+++ b/overcloud.yaml
@@ -0,0 +1,1538 @@
+heat_template_version: 2015-04-30
+
+description: >
+ Deploy an OpenStack environment, consisting of several node types (roles),
+ Controller, Compute, BlockStorage, SwiftStorage and CephStorage. The Storage
+ roles enable independent scaling of the storage components, but the minimal
+ deployment is one Controller and one Compute node.
+
+
+# TODO(shadower): we should probably use the parameter groups to put
+# some order in here.
+parameters:
+
+ # Common parameters (not specific to a role)
+ AdminPassword:
+ description: The password for the keystone admin account, used for monitoring, querying neutron etc.
+ type: string
+ hidden: true
+ CeilometerBackend:
+ default: 'mongodb'
+ description: The ceilometer backend type.
+ type: string
+ CeilometerMeteringSecret:
+ description: Secret shared by the ceilometer services.
+ type: string
+ hidden: true
+ CeilometerPassword:
+ description: The password for the ceilometer service account.
+ type: string
+ hidden: true
+ # This has to be an UUID so for now we generate it outside the template
+ CephClusterFSID:
+ default: ''
+ type: string
+ description: The Ceph cluster FSID. Must be a UUID.
+ CephMonKey:
+ default: ''
+ description: The Ceph monitors key. Can be created with ceph-authtool --gen-print-key.
+ type: string
+ hidden: true
+ CephAdminKey:
+ default: ''
+ description: The Ceph admin client key. Can be created with ceph-authtool --gen-print-key.
+ type: string
+ hidden: true
+ CinderEnableNfsBackend:
+ default: false
+ description: Whether to enable or not the NFS backend for Cinder
+ type: boolean
+ CephClientKey:
+ default: ''
+ description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
+ type: string
+ hidden: true
+ CephExternalMonHost:
+ default: ''
+ type: string
+ description: List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments.
+ CinderEnableIscsiBackend:
+ default: true
+ description: Whether to enable or not the Iscsi backend for Cinder
+ type: boolean
+ CinderEnableRbdBackend:
+ default: false
+ description: Whether to enable or not the Rbd backend for Cinder
+ type: boolean
+ CloudName:
+ default: overcloud
+ description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
+ type: string
+ ControlFixedIPs:
+ default: []
+ description: Should be used for arbitrary ips.
+ type: json
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ HAProxySyslogAddress:
+ default: /dev/log
+ description: Syslog address where HAproxy will send its log
+ type: string
+ HorizonAllowedHosts:
+ default: '*'
+ description: A list of IP/Hostname allowed to connect to horizon
+ type: comma_delimited_list
+ ImageUpdatePolicy:
+ default: 'REBUILD_PRESERVE_EPHEMERAL'
+ description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
+ type: string
+ KeyName:
+ default: default
+ description: Name of an existing Nova key pair to enable SSH access to the instances
+ type: string
+ constraints:
+ - custom_constraint: nova.keypair
+ NeutronExternalNetworkBridge:
+ description: Name of bridge used for external network traffic.
+ type: string
+ default: 'br-ex'
+ NeutronBridgeMappings:
+ description: >
+ The OVS logical->physical bridge mappings to use. See the Neutron
+ documentation for details. Defaults to mapping br-ex - the external
+ bridge on hosts - to a physical name 'datacentre' which can be used
+ to create provider networks (and we use this for the default floating
+ network) - if changing this either use different post-install network
+ scripts or be sure to keep 'datacentre' as a mapping network name.
+ type: comma_delimited_list
+ default: "datacentre:br-ex"
+ NeutronControlPlaneID:
+ default: 'ctlplane'
+ type: string
+ description: Neutron ID or name for ctlplane network.
+ NeutronEnableIsolatedMetadata:
+ default: 'False'
+ description: If True, DHCP provide metadata route to VM.
+ type: string
+ NeutronEnableTunnelling:
+ type: string
+ default: "True"
+ NeutronEnableL2Pop:
+ type: string
+ description: >
+ Enable/disable the L2 population feature in the Neutron agents.
+ default: "False"
+ NeutronFlatNetworks:
+ type: comma_delimited_list
+ default: 'datacentre'
+ description: >
+ If set, flat networks to configure in neutron plugins. Defaults to
+ 'datacentre' to permit external network creation.
+ NeutronNetworkType:
+ default: 'vxlan'
+ description: The tenant network type for Neutron.
+ type: comma_delimited_list
+ NeutronPassword:
+ description: The password for the neutron service account, used by neutron agents.
+ type: string
+ hidden: true
+ NeutronPublicInterface:
+ default: nic1
+ description: What interface to bridge onto br-ex for network nodes.
+ type: string
+ NeutronPublicInterfaceTag:
+ default: ''
+ description: >
+ VLAN tag for creating a public VLAN. The tag will be used to
+ create an access port on the exterior bridge for each control plane node,
+ and that port will be given the IP address returned by neutron from the
+ public network. Set CONTROLEXTRA=overcloud-vlan-port.yaml when compiling
+ overcloud.yaml to include the deployment of VLAN ports to the control
+ plane.
+ type: string
+ NeutronComputeAgentMode:
+ default: 'dvr'
+ description: Agent mode for the neutron-l3-agent on the compute hosts
+ type: string
+ NeutronAgentMode:
+ default: 'dvr_snat'
+ description: Agent mode for the neutron-l3-agent on the controller hosts
+ type: string
+ NeutronDVR:
+ default: 'False'
+ description: Whether to configure Neutron Distributed Virtual Routers
+ type: string
+ NeutronMetadataProxySharedSecret:
+ description: Shared secret to prevent spoofing
+ type: string
+ hidden: true
+ NeutronTunnelTypes:
+ default: 'vxlan'
+ description: |
+ The tunnel types for the Neutron tenant network.
+ type: comma_delimited_list
+ NeutronTunnelIdRanges:
+ description: |
+ Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
+ of GRE tunnel IDs that are available for tenant network allocation
+ default: ["1:1000", ]
+ type: comma_delimited_list
+ NeutronVniRanges:
+ description: |
+ Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges
+ of VXLAN VNI IDs that are available for tenant network allocation
+ default: ["1:1000", ]
+ type: comma_delimited_list
+ NeutronCorePlugin:
+ default: 'ml2'
+ description: |
+ The core plugin for Neutron. The value should be the entrypoint to be loaded
+ from neutron.core_plugins namespace.
+ type: string
+ NeutronServicePlugins:
+ default: "router"
+ description: |
+ Comma-separated list of service plugin entrypoints to be loaded from the
+ neutron.service_plugins namespace.
+ type: comma_delimited_list
+ NeutronTypeDrivers:
+ default: "vxlan,vlan,flat,gre"
+ description: |
+ Comma-separated list of network type driver entrypoints to be loaded.
+ type: comma_delimited_list
+ NeutronMechanismDrivers:
+ default: 'openvswitch'
+ description: |
+ The mechanism drivers for the Neutron tenant network.
+ type: comma_delimited_list
+ NeutronAllowL3AgentFailover:
+ default: 'False'
+ description: Allow automatic l3-agent failover
+ type: string
+ NeutronL3HA:
+ default: 'False'
+ description: Whether to enable l3-agent HA
+ type: string
+ NeutronDhcpAgentsPerNetwork:
+ type: number
+ default: 1
+ description: The number of neutron dhcp agents to schedule per network
+ NovaPassword:
+ description: The password for the nova service account, used by nova-api.
+ type: string
+ hidden: true
+ NtpServer:
+ default: ''
+ description: Comma-separated list of ntp servers
+ type: comma_delimited_list
+ MongoDbNoJournal:
+ default: false
+ description: Should MongoDb journaling be disabled
+ type: boolean
+ PublicVirtualFixedIPs:
+ default: []
+ description: >
+ Control the IP allocation for the PublicVirtualInterface port. E.g.
+ [{'ip_address':'1.2.3.4'}]
+ type: json
+ RabbitCookieSalt:
+ type: string
+ default: unset
+ description: Salt for the rabbit cookie, change this to force the randomly generated rabbit cookie to change.
+ # FIXME: 'guest' is provisioned in RabbitMQ by default, we should create a user if these are changed
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitPassword:
+ default: guest
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ # We need to set this as string because 'unlimited' is a valid setting
+ RabbitFDLimit:
+ default: 16384
+ description: Configures RabbitMQ FD limit
+ type: string
+ SnmpdReadonlyUserName:
+ default: ro_snmp_user
+ description: The user name for SNMPd with readonly rights running on all Overcloud nodes
+ type: string
+ SnmpdReadonlyUserPassword:
+ description: The user password for SNMPd with readonly rights running on all Overcloud nodes
+ type: string
+ hidden: true
+ CloudDomain:
+ default: 'localdomain'
+ type: string
+ description: >
+ The DNS domain used for the hosts. This should match the dhcp_domain
+ configured in the Undercloud neutron. Defaults to localdomain.
+ ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API.
+ type: json
+
+ # Controller-specific params
+ AdminToken:
+ description: The keystone auth secret.
+ type: string
+ hidden: true
+ CinderLVMLoopDeviceSize:
+ default: 5000
+ description: The size of the loopback file used by the cinder LVM driver.
+ type: number
+ CinderNfsMountOptions:
+ default: ''
+ description: >
+ Mount options for NFS mounts used by Cinder NFS backend. Effective
+ when CinderEnableNfsBackend is true.
+ type: string
+ CinderNfsServers:
+ default: ''
+ description: >
+ NFS servers used by Cinder NFS backend. Effective when
+ CinderEnableNfsBackend is true.
+ type: comma_delimited_list
+ CinderPassword:
+ description: The password for the cinder service account, used by cinder-api.
+ type: string
+ hidden: true
+ CinderISCSIHelper:
+ default: tgtadm
+ description: The iSCSI helper to use with cinder.
+ type: string
+ ControllerCount:
+ type: number
+ default: 1
+ constraints:
+ - range: {min: 1}
+ controllerExtraConfig:
+ default: {}
+ description: |
+ Controller specific configuration to inject into the cluster. Same
+ structure as ExtraConfig.
+ type: json
+ controllerImage:
+ type: string
+ default: overcloud-full
+ constraints:
+ - custom_constraint: glance.image
+ OvercloudControlFlavor:
+ description: Flavor for control nodes to request when deploying.
+ default: baremetal
+ type: string
+ constraints:
+ - custom_constraint: nova.flavor
+ ControlVirtualInterface:
+ default: 'br-ex'
+ description: Interface where virtual ip will be assigned.
+ type: string
+ EnableFencing:
+ default: false
+ description: Whether to enable fencing in Pacemaker or not.
+ type: boolean
+ EnableGalera:
+ default: true
+ description: Whether to use Galera instead of regular MariaDB.
+ type: boolean
+ ControllerEnableCephStorage:
+ default: false
+ description: Whether to deploy Ceph Storage (OSD) on the Controller
+ type: boolean
+ ControllerEnableSwiftStorage:
+ default: true
+ description: Whether to enable Swift Storage on the Controller
+ type: boolean
+ ControllerSchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
+ ExtraConfig:
+ default: {}
+ description: |
+ Additional configuration to inject into the cluster. The format required
+ may be implementation specific, e.g puppet hieradata. Any role specific
+ ExtraConfig, e.g controllerExtraConfig takes precedence over ExtraConfig.
+ type: json
+ FencingConfig:
+ default: {}
+ description: |
+ Pacemaker fencing configuration. The JSON should have
+ the following structure:
+ {
+ "devices": [
+ {
+ "agent": "AGENT_NAME",
+ "host_mac": "HOST_MAC_ADDRESS",
+ "params": {"PARAM_NAME": "PARAM_VALUE"}
+ }
+ ]
+ }
+ For instance:
+ {
+ "devices": [
+ {
+ "agent": "fence_xvm",
+ "host_mac": "52:54:00:aa:bb:cc",
+ "params": {
+ "multicast_address": "225.0.0.12",
+ "port": "baremetal_0",
+ "manage_fw": true,
+ "manage_key_file": true,
+ "key_file": "/etc/fence_xvm.key",
+ "key_file_password": "abcdef"
+ }
+ }
+ ]
+ }
+ type: json
+ GlanceLogFile:
+ description: The filepath of the file to use for logging messages from Glance.
+ type: string
+ default: ''
+ GlanceNotifierStrategy:
+ description: Strategy to use for Glance notification queue
+ type: string
+ default: noop
+ GlancePassword:
+ description: The password for the glance service account, used by the glance services.
+ type: string
+ hidden: true
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
+ HeatPassword:
+ description: The password for the Heat service account, used by the Heat services.
+ type: string
+ hidden: true
+ HeatStackDomainAdminPassword:
+ description: Password for heat_domain_admin user.
+ type: string
+ hidden: true
+ InstanceNameTemplate:
+ default: 'instance-%08x'
+ description: Template string to be used to generate instance names
+ type: string
+ KeystoneCACertificate:
+ default: ''
+ description: Keystone self-signed certificate authority certificate.
+ type: string
+ KeystoneSigningCertificate:
+ default: ''
+ description: Keystone certificate for verifying token validity.
+ type: string
+ KeystoneSigningKey:
+ default: ''
+ description: Keystone key for signing tokens.
+ type: string
+ hidden: true
+ KeystoneSSLCertificate:
+ default: ''
+ description: Keystone certificate for verifying token validity.
+ type: string
+ KeystoneSSLCertificateKey:
+ default: ''
+ description: Keystone key for signing tokens.
+ type: string
+ hidden: true
+ KeystoneNotificationDriver:
+ description: Comma-separated list of Oslo notification drivers used by Keystone
+ default: ['messaging']
+ type: comma_delimited_list
+ KeystoneNotificationFormat:
+ description: The Keystone notification format
+ default: 'basic'
+ type: string
+ constraints:
+ - allowed_values: [ 'basic', 'cadf' ]
+ ManageFirewall:
+ default: false
+ description: Whether to manage IPtables rules.
+ type: boolean
+ PurgeFirewallRules:
+ default: false
+ description: Whether IPtables rules should be purged before setting up the ones.
+ type: boolean
+ MysqlInnodbBufferPoolSize:
+ description: >
+ Specifies the size of the buffer pool in megabytes. Setting to
+ zero should be interpreted as "no value" and will defer to the
+ lower level default.
+ type: number
+ default: 0
+ MysqlMaxConnections:
+ description: Configures MySQL max_connections config setting
+ type: number
+ default: 4096
+ NeutronDnsmasqOptions:
+ default: 'dhcp-option-force=26,1400'
+ description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the tunnel overhead.
+ type: string
+ NeutronPublicInterfaceDefaultRoute:
+ default: ''
+ description: A custom default route for the NeutronPublicInterface.
+ type: string
+ NeutronPublicInterfaceIP:
+ default: ''
+ description: A custom IP address to put onto the NeutronPublicInterface.
+ type: string
+ NeutronPublicInterfaceRawDevice:
+ default: ''
+ description: If set, the public interface is a vlan with this device as the raw device.
+ type: string
+ PublicVirtualInterface:
+ default: 'br-ex'
+ description: >
+ Specifies the interface where the public-facing virtual ip will be assigned.
+ This should be int_public when a VLAN is being used.
+ type: string
+ SwiftHashSuffix:
+ description: A random string to be used as a salt when hashing to determine mappings in the ring.
+ type: string
+ hidden: true
+ SwiftPassword:
+ description: The password for the swift service account, used by the swift proxy services.
+ type: string
+ hidden: true
+ SwiftMountCheck:
+ default: 'false'
+ description: Value of mount_check in Swift account/container/object -server.conf
+ type: boolean
+ SwiftMinPartHours:
+ type: number
+ default: 1
+ description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance.
+ SwiftPartPower:
+ default: 10
+ description: Partition Power to use when building Swift rings
+ type: number
+ SwiftReplicas:
+ type: number
+ default: 3
+ description: How many replicas to use in the swift rings.
+
+# Compute-specific params
+ CeilometerComputeAgent:
+ description: Indicates whether the Compute agent is present and expects nova-compute to be configured accordingly
+ type: string
+ default: ''
+ constraints:
+ - allowed_values: ['', Present]
+ ComputeCount:
+ type: number
+ default: 1
+ HypervisorNeutronPhysicalBridge:
+ default: 'br-ex'
+ description: >
+ An OVS bridge to create on each hypervisor. This defaults to br-ex the
+ same as the control plane nodes, as we have a uniform configuration of
+ the openvswitch agent. Typically should not need to be changed.
+ type: string
+ HypervisorNeutronPublicInterface:
+ default: nic1
+ description: What interface to add to the HypervisorNeutronPhysicalBridge.
+ type: string
+ NeutronNetworkVLANRanges:
+ default: 'datacentre:1:1000'
+ description: >
+ The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
+ Neutron documentation for permitted values. Defaults to permitting any
+ VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
+ type: comma_delimited_list
+ NovaComputeDriver:
+ type: string
+ default: libvirt.LibvirtDriver
+ NovaComputeExtraConfig:
+ default: {}
+ description: |
+ NovaCompute specific configuration to inject into the cluster. Same
+ structure as ExtraConfig.
+ type: json
+ NovaComputeLibvirtType:
+ default: kvm
+ type: string
+ NovaComputeLibvirtVifDriver:
+ default: ''
+ description: Libvirt VIF driver configuration for the network
+ type: string
+ NovaComputeSchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
+ NovaEnableRbdBackend:
+ default: false
+ description: Whether to enable or not the Rbd backend for Nova
+ type: boolean
+ NovaImage:
+ type: string
+ default: overcloud-full
+ constraints:
+ - custom_constraint: glance.image
+ NovaOVSBridge:
+ default: 'br-int'
+ description: Name of integration bridge used by Open vSwitch
+ type: string
+ NovaSecurityGroupAPI:
+ default: 'neutron'
+ description: The full class name of the security API class
+ type: string
+ OvercloudComputeFlavor:
+ description: Use this flavor
+ default: baremetal
+ type: string
+ constraints:
+ - custom_constraint: nova.flavor
+ ServiceNetMap:
+ default:
+ NeutronTenantNetwork: tenant
+ CeilometerApiNetwork: internal_api
+ MongoDbNetwork: internal_api
+ CinderApiNetwork: internal_api
+ CinderIscsiNetwork: storage
+ GlanceApiNetwork: storage
+ GlanceRegistryNetwork: internal_api
+ KeystoneAdminApiNetwork: ctlplane # allows undercloud to config endpoints
+ KeystonePublicApiNetwork: internal_api
+ NeutronApiNetwork: internal_api
+ HeatApiNetwork: internal_api
+ NovaApiNetwork: internal_api
+ NovaMetadataNetwork: internal_api
+ NovaVncProxyNetwork: internal_api
+ SwiftMgmtNetwork: storage_mgmt
+ SwiftProxyNetwork: storage
+ HorizonNetwork: internal_api
+ MemcachedNetwork: internal_api
+ RabbitMqNetwork: internal_api
+ RedisNetwork: internal_api
+ MysqlNetwork: internal_api
+ CephClusterNetwork: storage_mgmt
+ CephPublicNetwork: storage
+ ControllerHostnameResolveNetwork: internal_api
+ ComputeHostnameResolveNetwork: internal_api
+ BlockStorageHostnameResolveNetwork: internal_api
+ ObjectStorageHostnameResolveNetwork: internal_api
+ CephStorageHostnameResolveNetwork: storage
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+# Block storage specific parameters
+ BlockStorageCount:
+ type: number
+ default: 0
+ BlockStorageImage:
+ default: overcloud-full
+ type: string
+ OvercloudBlockStorageFlavor:
+ description: Flavor for block storage nodes to request when deploying.
+ default: baremetal
+ type: string
+ constraints:
+ - custom_constraint: nova.flavor
+ BlockStorageExtraConfig:
+ default: {}
+ description: |
+ BlockStorage specific configuration to inject into the cluster. Same
+ structure as ExtraConfig.
+ type: json
+ BlockStorageSchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
+
+
+# Object storage specific parameters
+ ObjectStorageCount:
+ type: number
+ default: 0
+ OvercloudSwiftStorageFlavor:
+ description: Flavor for Swift storage nodes to request when deploying.
+ default: baremetal
+ type: string
+ constraints:
+ - custom_constraint: nova.flavor
+ SwiftStorageImage:
+ default: overcloud-full
+ type: string
+ ObjectStorageExtraConfig:
+ default: {}
+ description: |
+ ObjectStorage specific configuration to inject into the cluster. Same
+ structure as ExtraConfig.
+ type: json
+ ObjectStorageSchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
+
+# Ceph storage specific parameters
+ CephStorageCount:
+ type: number
+ default: 0
+ CephStorageImage:
+ default: overcloud-full
+ type: string
+ OvercloudCephStorageFlavor:
+ default: baremetal
+ description: Flavor for Ceph storage nodes to request when deploying.
+ type: string
+ constraints:
+ - custom_constraint: nova.flavor
+ CephStorageExtraConfig:
+ default: {}
+ description: |
+ CephStorage specific configuration to inject into the cluster. Same
+ structure as ExtraConfig.
+ type: json
+ CephStorageSchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
+
+
+ # Hostname format for each role
+ # Note %index% is translated into the index of the node, e.g 0/1/2 etc
+ # and %stackname% is replaced with OS::stack_name in the template below.
+ # If you want to use the heat generated names, pass '' (empty string).
+ ControllerHostnameFormat:
+ type: string
+ description: Format for Controller node hostnames
+ default: '%stackname%-controller-%index%'
+ ComputeHostnameFormat:
+ type: string
+ description: Format for Compute node hostnames
+ default: '%stackname%-novacompute-%index%'
+ BlockStorageHostnameFormat:
+ type: string
+ description: Format for BlockStorage node hostnames
+ default: '%stackname%-blockstorage-%index%'
+ ObjectStorageHostnameFormat:
+ type: string
+ description: Format for SwiftStorage node hostnames
+ default: '%stackname%-objectstorage-%index%'
+ CephStorageHostnameFormat:
+ type: string
+ description: Format for CephStorage node hostnames
+ default: '%stackname%-cephstorage-%index%'
+
+ # Identifiers to trigger tasks on nodes
+ UpdateIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting to a previously unused value during stack-update will trigger
+ package update on all nodes
+ DeployIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+
+ # If you want to remove a specific node from a resource group, you can pass
+ # the node name or id as a <Group>RemovalPolicies parameter, for example:
+ # ComputeRemovalPolicies: [{'resource_list': ['0']}]
+ ControllerRemovalPolicies:
+ default: []
+ type: json
+ description: >
+ List of resources to be removed from ControllerResourceGroup when
+ doing an update which requires removal of specific resources.
+ ComputeRemovalPolicies:
+ default: []
+ type: json
+ description: >
+ List of resources to be removed from ComputeResourceGroup when
+ doing an update which requires removal of specific resources.
+ BlockStorageRemovalPolicies:
+ default: []
+ type: json
+ description: >
+ List of resources to be removed from BlockStorageResourceGroup when
+ doing an update which requires removal of specific resources.
+ ObjectStorageRemovalPolicies:
+ default: []
+ type: json
+ description: >
+ List of resources to be removed from ObjectStorageResourceGroup when
+ doing an update which requires removal of specific resources.
+ CephStorageRemovalPolicies:
+ default: []
+ type: json
+ description: >
+ List of resources to be removed from CephStorageResourceGroup when
+ doing an update which requires removal of specific resources.
+
+
+resources:
+
+ HeatAuthEncryptionKey:
+ type: OS::Heat::RandomString
+
+ PcsdPassword:
+ type: OS::Heat::RandomString
+ properties:
+ length: 16
+
+ HorizonSecret:
+ type: OS::Heat::RandomString
+ properties:
+ length: 10
+
+ EndpointMap:
+ type: OS::TripleO::EndpointMap
+ properties:
+ CloudName: {get_param: CloudName}
+ CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
+ GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
+ GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
+ HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
+ KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
+ KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
+ MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
+ NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
+ NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+ SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
+ PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]}
+
+ Controller:
+ type: OS::Heat::ResourceGroup
+ depends_on: Networks
+ properties:
+ count: {get_param: ControllerCount}
+ removal_policies: {get_param: ControllerRemovalPolicies}
+ resource_def:
+ type: OS::TripleO::Controller
+ properties:
+ AdminPassword: {get_param: AdminPassword}
+ AdminToken: {get_param: AdminToken}
+ CeilometerBackend: {get_param: CeilometerBackend}
+ CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret}
+ CeilometerPassword: {get_param: CeilometerPassword}
+ CinderLVMLoopDeviceSize: {get_param: CinderLVMLoopDeviceSize}
+ CinderNfsMountOptions: {get_param: CinderNfsMountOptions}
+ CinderNfsServers: {get_param: CinderNfsServers}
+ CinderPassword: {get_param: CinderPassword}
+ CinderISCSIHelper: {get_param: CinderISCSIHelper}
+ CinderEnableNfsBackend: {get_param: CinderEnableNfsBackend}
+ CinderEnableIscsiBackend: {get_param: CinderEnableIscsiBackend}
+ CinderEnableRbdBackend: {get_param: CinderEnableRbdBackend}
+ CloudName: {get_param: CloudName}
+ CloudDomain: {get_param: CloudDomain}
+ ControlVirtualInterface: {get_param: ControlVirtualInterface}
+ ControllerExtraConfig: {get_param: controllerExtraConfig}
+ Debug: {get_param: Debug}
+ EnableFencing: {get_param: EnableFencing}
+ ManageFirewall: {get_param: ManageFirewall}
+ PurgeFirewallRules: {get_param: PurgeFirewallRules}
+ EnableGalera: {get_param: EnableGalera}
+ EnableCephStorage: {get_param: ControllerEnableCephStorage}
+ EnableSwiftStorage: {get_param: ControllerEnableSwiftStorage}
+ ExtraConfig: {get_param: ExtraConfig}
+ FencingConfig: {get_param: FencingConfig}
+ Flavor: {get_param: OvercloudControlFlavor}
+ GlancePassword: {get_param: GlancePassword}
+ GlanceBackend: {get_param: GlanceBackend}
+ GlanceNotifierStrategy: {get_param: GlanceNotifierStrategy}
+ GlanceLogFile: {get_param: GlanceLogFile}
+ HAProxySyslogAddress: {get_param: HAProxySyslogAddress}
+ HeatPassword: {get_param: HeatPassword}
+ HeatStackDomainAdminPassword: {get_param: HeatStackDomainAdminPassword}
+ HeatAuthEncryptionKey: {get_resource: HeatAuthEncryptionKey}
+ HorizonAllowedHosts: {get_param: HorizonAllowedHosts}
+ HorizonSecret: {get_resource: HorizonSecret}
+ Image: {get_param: controllerImage}
+ ImageUpdatePolicy: {get_param: ImageUpdatePolicy}
+ InstanceNameTemplate: {get_param: InstanceNameTemplate}
+ KeyName: {get_param: KeyName}
+ KeystoneCACertificate: {get_param: KeystoneCACertificate}
+ KeystoneSigningCertificate: {get_param: KeystoneSigningCertificate}
+ KeystoneSigningKey: {get_param: KeystoneSigningKey}
+ KeystoneSSLCertificate: {get_param: KeystoneSSLCertificate}
+ KeystoneSSLCertificateKey: {get_param: KeystoneSSLCertificateKey}
+ KeystoneNotificationDriver: {get_param: KeystoneNotificationDriver}
+ KeystoneNotificationFormat: {get_param: KeystoneNotificationFormat}
+ MysqlClusterUniquePart: {get_attr: [MysqlClusterUniquePart, value]}
+ MysqlInnodbBufferPoolSize: {get_param: MysqlInnodbBufferPoolSize}
+ MysqlMaxConnections: {get_param: MysqlMaxConnections}
+ MysqlRootPassword: {get_attr: [MysqlRootPassword, value]}
+ NeutronPublicInterfaceIP: {get_param: NeutronPublicInterfaceIP}
+ NeutronFlatNetworks: {get_param: NeutronFlatNetworks}
+ NeutronBridgeMappings: {get_param: NeutronBridgeMappings}
+ NeutronExternalNetworkBridge: {get_param: NeutronExternalNetworkBridge}
+ NeutronEnableIsolatedMetadata: {get_param: NeutronEnableIsolatedMetadata}
+ NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling}
+ NeutronEnableL2Pop: {get_param: NeutronEnableL2Pop}
+ NeutronNetworkVLANRanges: {get_param: NeutronNetworkVLANRanges}
+ NeutronPublicInterface: {get_param: NeutronPublicInterface}
+ NeutronPublicInterfaceDefaultRoute: {get_param: NeutronPublicInterfaceDefaultRoute}
+ NeutronPublicInterfaceRawDevice: {get_param: NeutronPublicInterfaceRawDevice}
+ NeutronPassword: {get_param: NeutronPassword}
+ NeutronDnsmasqOptions: {get_param: NeutronDnsmasqOptions}
+ NeutronDVR: {get_param: NeutronDVR}
+ NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret}
+ NeutronAgentMode: {get_param: NeutronAgentMode}
+ NeutronCorePlugin: {get_param: NeutronCorePlugin}
+ NeutronServicePlugins: {get_param: NeutronServicePlugins}
+ NeutronTypeDrivers: {get_param: NeutronTypeDrivers}
+ NeutronMechanismDrivers: {get_param: NeutronMechanismDrivers}
+ NeutronAllowL3AgentFailover: {get_param: NeutronAllowL3AgentFailover}
+ NeutronL3HA: {get_param: NeutronL3HA}
+ NeutronDhcpAgentsPerNetwork: {get_param: NeutronDhcpAgentsPerNetwork}
+ NeutronNetworkType: {get_param: NeutronNetworkType}
+ NeutronTunnelTypes: {get_param: NeutronTunnelTypes}
+ NovaPassword: {get_param: NovaPassword}
+ NtpServer: {get_param: NtpServer}
+ MongoDbNoJournal: {get_param: MongoDbNoJournal}
+ PcsdPassword: {get_resource: PcsdPassword}
+ PublicVirtualInterface: {get_param: PublicVirtualInterface}
+ RabbitPassword: {get_param: RabbitPassword}
+ RabbitUserName: {get_param: RabbitUserName}
+ RabbitCookie: {get_attr: [RabbitCookie, value]}
+ RabbitClientUseSSL: {get_param: RabbitClientUseSSL}
+ RabbitClientPort: {get_param: RabbitClientPort}
+ RabbitFDLimit: {get_param: RabbitFDLimit}
+ SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName}
+ SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword}
+ RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
+ SwiftHashSuffix: {get_param: SwiftHashSuffix}
+ SwiftMountCheck: {get_param: SwiftMountCheck}
+ SwiftMinPartHours: {get_param: SwiftMinPartHours}
+ SwiftPartPower: {get_param: SwiftPartPower}
+ SwiftPassword: {get_param: SwiftPassword}
+ SwiftReplicas: { get_param: SwiftReplicas}
+ VirtualIP: {get_attr: [VipMap, net_ip_map, ctlplane]} # deprecated. Use per service VIP settings instead now.
+ PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
+ HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
+ GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
+ GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
+ NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+ SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
+ MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
+ KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
+ KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
+ NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
+ NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+ UpdateIdentifier: {get_param: UpdateIdentifier}
+ Hostname:
+ str_replace:
+ template: {get_param: ControllerHostnameFormat}
+ params:
+ '%stackname%': {get_param: 'OS::stack_name'}
+ NodeIndex: '%index%'
+ ServerMetadata: {get_param: ServerMetadata}
+ SchedulerHints: {get_param: ControllerSchedulerHints}
+
+ Compute:
+ type: OS::Heat::ResourceGroup
+ depends_on: Networks
+ properties:
+ count: {get_param: ComputeCount}
+ removal_policies: {get_param: ComputeRemovalPolicies}
+ resource_def:
+ type: OS::TripleO::Compute
+ properties:
+ AdminPassword: {get_param: AdminPassword}
+ CeilometerComputeAgent: {get_param: CeilometerComputeAgent}
+ CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret}
+ CeilometerPassword: {get_param: CeilometerPassword}
+ CinderEnableNfsBackend: {get_param: CinderEnableNfsBackend}
+ CinderEnableRbdBackend: {get_param: CinderEnableRbdBackend}
+ Debug: {get_param: Debug}
+ ExtraConfig: {get_param: ExtraConfig}
+ Flavor: {get_param: OvercloudComputeFlavor}
+ GlanceHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
+ Image: {get_param: NovaImage}
+ ImageUpdatePolicy: {get_param: ImageUpdatePolicy}
+ KeyName: {get_param: KeyName}
+ KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
+ KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
+ NeutronBridgeMappings: {get_param: NeutronBridgeMappings}
+ NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling}
+ NeutronEnableL2Pop : {get_param: NeutronEnableL2Pop}
+ NeutronFlatNetworks: {get_param: NeutronFlatNetworks}
+ NeutronHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
+ NeutronNetworkType: {get_param: NeutronNetworkType}
+ NeutronTunnelTypes: {get_param: NeutronTunnelTypes}
+ NeutronNetworkVLANRanges: {get_param: NeutronNetworkVLANRanges}
+ NeutronPassword: {get_param: NeutronPassword}
+ NeutronPhysicalBridge: {get_param: HypervisorNeutronPhysicalBridge}
+ NeutronPublicInterface: {get_param: HypervisorNeutronPublicInterface}
+ NeutronDVR: {get_param: NeutronDVR}
+ NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret}
+ NeutronAgentMode: {get_param: NeutronComputeAgentMode}
+ NeutronPublicInterfaceRawDevice: {get_param: NeutronPublicInterfaceRawDevice}
+ NeutronCorePlugin: {get_param: NeutronCorePlugin}
+ NeutronServicePlugins: {get_param: NeutronServicePlugins}
+ NeutronTypeDrivers: {get_param: NeutronTypeDrivers}
+ NeutronMechanismDrivers: {get_param: NeutronMechanismDrivers}
+ # L3 HA and Failover is not relevant for Computes, should be removed
+ NeutronAllowL3AgentFailover: {get_param: NeutronAllowL3AgentFailover}
+ NeutronL3HA: {get_param: NeutronL3HA}
+ NovaApiHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+ NovaComputeDriver: {get_param: NovaComputeDriver}
+ NovaComputeExtraConfig: {get_param: NovaComputeExtraConfig}
+ NovaComputeLibvirtType: {get_param: NovaComputeLibvirtType}
+ NovaComputeLibvirtVifDriver: {get_param: NovaComputeLibvirtVifDriver}
+ NovaEnableRbdBackend: {get_param: NovaEnableRbdBackend}
+ NovaPublicIP: {get_attr: [VipMap, net_ip_map, external]}
+ NovaPassword: {get_param: NovaPassword}
+ NovaOVSBridge: {get_param: NovaOVSBridge}
+ NovaSecurityGroupAPI: {get_param: NovaSecurityGroupAPI}
+ NtpServer: {get_param: NtpServer}
+ RabbitHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
+ RabbitPassword: {get_param: RabbitPassword}
+ RabbitUserName: {get_param: RabbitUserName}
+ RabbitClientUseSSL: {get_param: RabbitClientUseSSL}
+ RabbitClientPort: {get_param: RabbitClientPort}
+ SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName}
+ SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ UpdateIdentifier: {get_param: UpdateIdentifier}
+ Hostname:
+ str_replace:
+ template: {get_param: ComputeHostnameFormat}
+ params:
+ '%stackname%': {get_param: 'OS::stack_name'}
+ CloudDomain: {get_param: CloudDomain}
+ ServerMetadata: {get_param: ServerMetadata}
+ SchedulerHints: {get_param: NovaComputeSchedulerHints}
+
+ BlockStorage:
+ type: OS::Heat::ResourceGroup
+ depends_on: Networks
+ properties:
+ count: {get_param: BlockStorageCount}
+ removal_policies: {get_param: BlockStorageRemovalPolicies}
+ resource_def:
+ type: OS::TripleO::BlockStorage
+ properties:
+ Debug: {get_param: Debug}
+ Image: {get_param: BlockStorageImage}
+ CinderISCSIHelper: {get_param: CinderISCSIHelper}
+ CinderLVMLoopDeviceSize: {get_param: CinderLVMLoopDeviceSize}
+ # Purpose of the dedicated BlockStorage nodes should be to use their local LVM
+ CinderEnableIscsiBackend: {get_param: CinderEnableIscsiBackend}
+ CinderPassword: {get_param: CinderPassword}
+ KeyName: {get_param: KeyName}
+ Flavor: {get_param: OvercloudBlockStorageFlavor}
+ VirtualIP: {get_attr: [VipMap, net_ip_map, ctlplane]}
+ GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
+ RabbitPassword: {get_param: RabbitPassword}
+ RabbitUserName: {get_param: RabbitUserName}
+ RabbitClientUseSSL: {get_param: RabbitClientUseSSL}
+ RabbitClientPort: {get_param: RabbitClientPort}
+ NtpServer: {get_param: NtpServer}
+ UpdateIdentifier: {get_param: UpdateIdentifier}
+ Hostname:
+ str_replace:
+ template: {get_param: BlockStorageHostnameFormat}
+ params:
+ '%stackname%': {get_param: 'OS::stack_name'}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
+ ExtraConfig: {get_param: ExtraConfig}
+ BlockStorageExtraConfig: {get_param: BlockStorageExtraConfig}
+ CloudDomain: {get_param: CloudDomain}
+ ServerMetadata: {get_param: ServerMetadata}
+ SchedulerHints: {get_param: BlockStorageSchedulerHints}
+
+ ObjectStorage:
+ type: OS::Heat::ResourceGroup
+ depends_on: Networks
+ properties:
+ count: {get_param: ObjectStorageCount}
+ removal_policies: {get_param: ObjectStorageRemovalPolicies}
+ resource_def:
+ type: OS::TripleO::ObjectStorage
+ properties:
+ KeyName: {get_param: KeyName}
+ Flavor: {get_param: OvercloudSwiftStorageFlavor}
+ HashSuffix: {get_param: SwiftHashSuffix}
+ MountCheck: {get_param: SwiftMountCheck}
+ MinPartHours: {get_param: SwiftMinPartHours}
+ PartPower: {get_param: SwiftPartPower}
+ Image: {get_param: SwiftStorageImage}
+ Replicas: { get_param: SwiftReplicas}
+ NtpServer: {get_param: NtpServer}
+ UpdateIdentifier: {get_param: UpdateIdentifier}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ Hostname:
+ str_replace:
+ template: {get_param: ObjectStorageHostnameFormat}
+ params:
+ '%stackname%': {get_param: 'OS::stack_name'}
+ ExtraConfig: {get_param: ExtraConfig}
+ ObjectStorageExtraConfig: {get_param: ObjectStorageExtraConfig}
+ CloudDomain: {get_param: CloudDomain}
+ ServerMetadata: {get_param: ServerMetadata}
+ SchedulerHints: {get_param: ObjectStorageSchedulerHints}
+
+ CephStorage:
+ type: OS::Heat::ResourceGroup
+ depends_on: Networks
+ properties:
+ count: {get_param: CephStorageCount}
+ removal_policies: {get_param: CephStorageRemovalPolicies}
+ resource_def:
+ type: OS::TripleO::CephStorage
+ properties:
+ Image: {get_param: CephStorageImage}
+ KeyName: {get_param: KeyName}
+ Flavor: {get_param: OvercloudCephStorageFlavor}
+ NtpServer: {get_param: NtpServer}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ UpdateIdentifier: {get_param: UpdateIdentifier}
+ Hostname:
+ str_replace:
+ template: {get_param: CephStorageHostnameFormat}
+ params:
+ '%stackname%': {get_param: 'OS::stack_name'}
+ ExtraConfig: {get_param: ExtraConfig}
+ CephStorageExtraConfig: {get_param: CephStorageExtraConfig}
+ CloudDomain: {get_param: CloudDomain}
+ ServerMetadata: {get_param: ServerMetadata}
+ SchedulerHints: {get_param: CephStorageSchedulerHints}
+
+ ControllerIpListMap:
+ type: OS::TripleO::Network::Ports::NetIpListMap
+ properties:
+ ControlPlaneIpList: {get_attr: [Controller, ip_address]}
+ ExternalIpList: {get_attr: [Controller, external_ip_address]}
+ InternalApiIpList: {get_attr: [Controller, internal_api_ip_address]}
+ StorageIpList: {get_attr: [Controller, storage_ip_address]}
+ StorageMgmtIpList: {get_attr: [Controller, storage_mgmt_ip_address]}
+ TenantIpList: {get_attr: [Controller, tenant_ip_address]}
+ ManagementIpList: {get_attr: [Controller, management_ip_address]}
+
+ allNodesConfig:
+ type: OS::TripleO::AllNodes::SoftwareConfig
+ properties:
+ compute_hosts: {get_attr: [Compute, hosts_entry]}
+ controller_hosts: {get_attr: [Controller, hosts_entry]}
+ controller_ips: {get_attr: [Controller, ip_address]}
+ block_storage_hosts: {get_attr: [BlockStorage, hosts_entry]}
+ object_storage_hosts: {get_attr: [ObjectStorage, hosts_entry]}
+ ceph_storage_hosts: {get_attr: [CephStorage, hosts_entry]}
+ controller_names: {get_attr: [Controller, hostname]}
+ rabbit_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
+ mongo_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, MongoDbNetwork]}]}
+ redis_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]}
+ memcache_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
+ mysql_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
+ horizon_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
+ heat_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
+ swift_proxy_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
+ ceilometer_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ nova_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+ nova_metadata_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
+ glance_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
+ glance_registry_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
+ cinder_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
+ neutron_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
+ keystone_public_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
+ keystone_admin_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
+ DeployIdentifier: {get_param: DeployIdentifier}
+ UpdateIdentifier: {get_param: UpdateIdentifier}
+
+ MysqlRootPassword:
+ type: OS::Heat::RandomString
+ properties:
+ length: 10
+
+ MysqlClusterUniquePart:
+ type: OS::Heat::RandomString
+ properties:
+ length: 10
+
+ RabbitCookie:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20
+ salt: {get_param: RabbitCookieSalt}
+
+ # creates the network architecture
+ Networks:
+ type: OS::TripleO::Network
+
+ ControlVirtualIP:
+ type: OS::Neutron::Port
+ depends_on: Networks
+ properties:
+ name: control_virtual_ip
+ network: {get_param: NeutronControlPlaneID}
+ fixed_ips: {get_param: ControlFixedIPs}
+ replacement_policy: AUTO
+
+ RedisVirtualIP:
+ depends_on: Networks
+ type: OS::TripleO::Network::Ports::RedisVipPort
+ properties:
+ ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
+ PortName: redis_virtual_ip
+ NetworkName: {get_param: [ServiceNetMap, RedisNetwork]}
+ ServiceName: redis
+
+ # The public VIP is on the External net, falls back to ctlplane
+ PublicVirtualIP:
+ depends_on: Networks
+ type: OS::TripleO::Network::Ports::ExternalVipPort
+ properties:
+ ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
+ PortName: public_virtual_ip
+ FixedIPs: {get_param: PublicVirtualFixedIPs}
+
+ InternalApiVirtualIP:
+ depends_on: Networks
+ type: OS::TripleO::Network::Ports::InternalApiVipPort
+ properties:
+ ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ PortName: internal_api_virtual_ip
+
+ StorageVirtualIP:
+ depends_on: Networks
+ type: OS::TripleO::Network::Ports::StorageVipPort
+ properties:
+ ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ PortName: storage_virtual_ip
+
+ StorageMgmtVirtualIP:
+ depends_on: Networks
+ type: OS::TripleO::Network::Ports::StorageMgmtVipPort
+ properties:
+ ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ PortName: storage_management_virtual_ip
+
+ VipMap:
+ type: OS::TripleO::Network::Ports::NetVipMap
+ properties:
+ ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ ExternalIp: {get_attr: [PublicVirtualIP, ip_address]}
+ InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]}
+ StorageIp: {get_attr: [StorageVirtualIP, ip_address]}
+ StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]}
+ # No tenant or management VIP required
+
+ VipConfig:
+ type: OS::TripleO::VipConfig
+
+ VipDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_resource: VipConfig}
+ servers: {get_attr: [Controller, attributes, nova_server_resource]}
+ input_values:
+ # service VIP mappings
+ keystone_admin_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
+ keystone_public_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
+ neutron_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
+ cinder_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
+ glance_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
+ glance_registry_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
+ swift_proxy_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
+ nova_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+ nova_metadata_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
+ ceilometer_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ heat_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
+ horizon_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
+ redis_vip: {get_attr: [RedisVirtualIP, ip_address]}
+ mysql_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
+ rabbit_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
+ # direct configuration of Virtual IPs for each network
+ control_virtual_ip: {get_attr: [VipMap, net_ip_map, ctlplane]}
+ public_virtual_ip: {get_attr: [VipMap, net_ip_map, external]}
+ internal_api_virtual_ip: {get_attr: [VipMap, net_ip_map, internal_api]}
+ storage_virtual_ip: {get_attr: [VipMap, net_ip_map, storage]}
+ storage_mgmt_virtual_ip: {get_attr: [VipMap, net_ip_map, storage_mgmt]}
+
+ ControllerBootstrapNodeConfig:
+ type: OS::TripleO::BootstrapNode::SoftwareConfig
+ properties:
+ bootstrap_nodeid: {get_attr: [Controller, resource.0.hostname]}
+ bootstrap_nodeid_ip: {get_attr: [Controller, resource.0.ip_address]}
+
+ ControllerBootstrapNodeDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [ControllerBootstrapNodeConfig, config_id]}
+ servers: {get_attr: [Controller, attributes, nova_server_resource]}
+
+ ControllerSwiftDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [SwiftDevicesAndProxyConfig, config_id]}
+ servers: {get_attr: [Controller, attributes, nova_server_resource]}
+
+ ObjectStorageSwiftDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [SwiftDevicesAndProxyConfig, config_id]}
+ servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
+
+ SwiftDevicesAndProxyConfig:
+ type: OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig
+ properties:
+ controller_swift_devices: {get_attr: [Controller, swift_device]}
+ object_store_swift_devices: {get_attr: [ObjectStorage, swift_device]}
+ controller_swift_proxy_memcaches: {get_attr: [Controller, swift_proxy_memcache]}
+
+ ComputeCephDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [CephClusterConfig, config_id]}
+ servers: {get_attr: [Compute, attributes, nova_server_resource]}
+
+ ControllerCephDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [CephClusterConfig, config_id]}
+ servers: {get_attr: [Controller, attributes, nova_server_resource]}
+
+ CephStorageCephDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [CephClusterConfig, config_id]}
+ servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+
+ CephClusterConfig:
+ type: OS::TripleO::CephClusterConfig::SoftwareConfig
+ properties:
+ ceph_storage_count: {get_param: CephStorageCount}
+ ceph_fsid: {get_param: CephClusterFSID}
+ ceph_mon_key: {get_param: CephMonKey}
+ ceph_admin_key: {get_param: CephAdminKey}
+ ceph_client_key: {get_param: CephClientKey}
+ ceph_external_mon_ips: {get_param: CephExternalMonHost}
+ ceph_mon_names: {get_attr: [Controller, hostname]}
+ ceph_mon_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]}
+
+ ControllerClusterConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ corosync:
+ nodes: {get_attr: [Controller, corosync_node]}
+ horizon:
+ caches:
+ memcached:
+ nodes: {get_attr: [Controller, hostname]}
+ mysql:
+ nodes: {get_attr: [Controller, corosync_node]}
+ haproxy:
+ nodes: {get_attr: [Controller, corosync_node]}
+
+ ControllerClusterDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_resource: ControllerClusterConfig}
+ servers: {get_attr: [Controller, attributes, nova_server_resource]}
+
+ ControllerAllNodesDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [allNodesConfig, config_id]}
+ servers: {get_attr: [Controller, attributes, nova_server_resource]}
+
+ ComputeAllNodesDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [allNodesConfig, config_id]}
+ servers: {get_attr: [Compute, attributes, nova_server_resource]}
+
+ BlockStorageAllNodesDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [allNodesConfig, config_id]}
+ servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
+
+ ObjectStorageAllNodesDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [allNodesConfig, config_id]}
+ servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
+
+ CephStorageAllNodesDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ config: {get_attr: [allNodesConfig, config_id]}
+ servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+
+ # All Nodes Validations
+ AllNodesValidationConfig:
+ type: OS::TripleO::AllNodes::Validation
+ properties:
+ PingTestIps:
+ list_join:
+ - ' '
+ - - {get_attr: [Controller, resource.0.external_ip_address]}
+ - {get_attr: [Controller, resource.0.internal_api_ip_address]}
+ - {get_attr: [Controller, resource.0.storage_ip_address]}
+ - {get_attr: [Controller, resource.0.storage_mgmt_ip_address]}
+ - {get_attr: [Controller, resource.0.tenant_ip_address]}
+
+ ControllerAllNodesValidationDeployment:
+ type: OS::Heat::StructuredDeployments
+ depends_on: ControllerAllNodesDeployment
+ properties:
+ config: {get_resource: AllNodesValidationConfig}
+ servers: {get_attr: [Controller, attributes, nova_server_resource]}
+
+ ComputeAllNodesValidationDeployment:
+ type: OS::Heat::StructuredDeployments
+ depends_on: ComputeAllNodesDeployment
+ properties:
+ config: {get_resource: AllNodesValidationConfig}
+ servers: {get_attr: [Compute, attributes, nova_server_resource]}
+
+ BlockStorageAllNodesValidationDeployment:
+ type: OS::Heat::StructuredDeployments
+ depends_on: BlockStorageAllNodesDeployment
+ properties:
+ config: {get_resource: AllNodesValidationConfig}
+ servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
+
+ ObjectStorageAllNodesValidationDeployment:
+ type: OS::Heat::StructuredDeployments
+ depends_on: ObjectStorageAllNodesDeployment
+ properties:
+ config: {get_resource: AllNodesValidationConfig}
+ servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
+
+ CephStorageAllNodesValidationDeployment:
+ type: OS::Heat::StructuredDeployments
+ depends_on: CephStorageAllNodesDeployment
+ properties:
+ config: {get_resource: AllNodesValidationConfig}
+ servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+
+ # Optional ExtraConfig for all nodes - all roles are passed in here, but
+ # the nested template may configure each role differently (or not at all)
+ AllNodesExtraConfig:
+ type: OS::TripleO::AllNodesExtraConfig
+ properties:
+ controller_servers: {get_attr: [Controller, attributes, nova_server_resource]}
+ compute_servers: {get_attr: [Compute, attributes, nova_server_resource]}
+ blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
+ objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
+ cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+
+ # Nested stack deployment runs after all other controller deployments
+ ControllerNodesPostDeployment:
+ type: OS::TripleO::ControllerPostDeployment
+ depends_on: [ControllerBootstrapNodeDeployment, ControllerAllNodesDeployment, ControllerSwiftDeployment, ControllerCephDeployment]
+ properties:
+ servers: {get_attr: [Controller, attributes, nova_server_resource]}
+ NodeConfigIdentifiers:
+ allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
+ controller_config: {get_attr: [Controller, attributes, config_identifier]}
+ deployment_identifier: {get_param: DeployIdentifier}
+
+ ComputeNodesPostDeployment:
+ type: OS::TripleO::ComputePostDeployment
+ depends_on: [ComputeAllNodesDeployment, ComputeCephDeployment]
+ properties:
+ servers: {get_attr: [Compute, attributes, nova_server_resource]}
+ NodeConfigIdentifiers:
+ allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
+ compute_config: {get_attr: [Compute, attributes, config_identifier]}
+ deployment_identifier: {get_param: DeployIdentifier}
+
+ ObjectStorageNodesPostDeployment:
+ type: OS::TripleO::ObjectStoragePostDeployment
+ depends_on: [ObjectStorageSwiftDeployment, ObjectStorageAllNodesDeployment]
+ properties:
+ servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
+ NodeConfigIdentifiers:
+ allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
+ objectstorage_config: {get_attr: [ObjectStorage, attributes, config_identifier]}
+ deployment_identifier: {get_param: DeployIdentifier}
+
+ BlockStorageNodesPostDeployment:
+ type: OS::TripleO::BlockStoragePostDeployment
+ depends_on: [ControllerNodesPostDeployment, BlockStorageAllNodesDeployment]
+ properties:
+ servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
+ NodeConfigIdentifiers:
+ allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
+ blockstorage_config: {get_attr: [BlockStorage, attributes, config_identifier]}
+ deployment_identifier: {get_param: DeployIdentifier}
+
+ CephStorageNodesPostDeployment:
+ type: OS::TripleO::CephStoragePostDeployment
+ depends_on: [ControllerNodesPostDeployment, CephStorageCephDeployment, CephStorageAllNodesDeployment]
+ properties:
+ servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+ NodeConfigIdentifiers:
+ allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]}
+ cephstorage_config: {get_attr: [CephStorage, attributes, config_identifier]}
+ deployment_identifier: {get_param: DeployIdentifier}
+
+outputs:
+ KeystoneURL:
+ description: URL for the Overcloud Keystone service
+ value: {get_attr: [EndpointMap, endpoint_map, KeystonePublic, uri]}
+ KeystoneAdminVip:
+ description: Keystone Admin VIP endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
+ PublicVip:
+ description: Controller VIP for public API endpoints
+ value: {get_attr: [VipMap, net_ip_map, external]}
+ CeilometerInternalVip:
+ description: VIP for Ceilometer API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ CinderInternalVip:
+ description: VIP for Cinder API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
+ GlanceInternalVip:
+ description: VIP for Glance API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
+ HeatInternalVip:
+ description: VIP for Heat API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
+ KeystoneInternalVip:
+ description: VIP for Keystone API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
+ NeutronInternalVip:
+ description: VIP for Neutron API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
+ NovaInternalVip:
+ description: VIP for Nova API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+ SwiftInternalVip:
+ description: VIP for Swift Proxy internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
+ HostsEntry:
+ description: |
+ The content that should be appended to your /etc/hosts if you want to get
+ hostname-based access to the deployed nodes (useful for testing without
+ setting up a DNS).
+ value: {get_attr: [allNodesConfig, hosts_entries]}
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 2bc519bb..9dd43680 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -51,6 +51,17 @@ parameters:
keystone_admin_api_node_ips:
type: comma_delimited_list
+ DeployIdentifier:
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+ UpdateIdentifier:
+ type: string
+ description: >
+ Setting to a previously unused value during stack-update will trigger
+ package update on all nodes
+
resources:
allNodesConfigImpl:
@@ -240,8 +251,17 @@ resources:
nova::rabbit_hosts: *rabbit_nodes_array
keystone::rabbit_hosts: *rabbit_nodes_array
+ deploy_identifier: {get_param: DeployIdentifier}
+ update_identifier: {get_param: UpdateIdentifier}
+
outputs:
config_id:
description: The ID of the allNodesConfigImpl resource.
value:
{get_resource: allNodesConfigImpl}
+ hosts_entries:
+ description: |
+ The content that should be appended to your /etc/hosts if you want to get
+ hostname-based access to the deployed nodes (useful for testing without
+ setting up a DNS).
+ value: {get_attr: [allNodesConfigImpl, config, hosts]}
diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml
index 99265493..96198c3f 100644
--- a/puppet/ceph-cluster-config.yaml
+++ b/puppet/ceph-cluster-config.yaml
@@ -13,7 +13,7 @@ parameters:
ceph_client_key:
default: ''
type: string
- description: Ceph key used to create the 'openstack' user keyring.
+ description: Ceph key used to create the client user keyring.
ceph_fsid:
default: ''
type: string
@@ -27,6 +27,18 @@ parameters:
type: comma_delimited_list
ceph_mon_ips:
type: comma_delimited_list
+ NovaRbdPoolName:
+ default: vms
+ type: string
+ CinderRbdPoolName:
+ default: volumes
+ type: string
+ GlanceRbdPoolName:
+ default: images
+ type: string
+ CephClientUserName:
+ default: openstack
+ type: string
resources:
CephClusterConfigImpl:
@@ -65,15 +77,34 @@ resources:
keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring',
cap_mon: 'allow profile bootstrap-osd'
},
- client.openstack: {
+ client.CLIENT_USER: {
secret: 'ADMIN_KEY',
mode: '0644',
cap_mon: 'allow r',
- cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=images'
+ cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL'
}
}"
params:
+ CLIENT_USER: {get_param: CephClientUserName}
ADMIN_KEY: {get_param: ceph_admin_key}
+ NOVA_POOL: {get_param: NovaRbdPoolName}
+ CINDER_POOL: {get_param: CinderRbdPoolName}
+ GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
+ cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
+ glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+ nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
+ glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
+ nova::compute::rbd::rbd_keyring:
+ list_join:
+ - '.'
+ - - 'client'
+ - {get_param: CephClientUserName}
+ ceph_client_user_name: {get_param: CephClientUserName}
+ ceph_pools:
+ - {get_param: CinderRbdPoolName}
+ - {get_param: NovaRbdPoolName}
+ - {get_param: GlanceRbdPoolName}
outputs:
config_id:
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index 75294599..ede1263b 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -16,14 +16,15 @@ parameters:
description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
type: string
KeyName:
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
default: default
constraints:
- custom_constraint: nova.keypair
NtpServer:
- type: string
default: ''
+ description: Comma-separated list of ntp servers
+ type: comma_delimited_list
EnablePackageInstall:
default: 'false'
description: Set to true to enable package installation via Puppet
@@ -53,7 +54,34 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
-
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
+ CloudDomain:
+ default: ''
+ type: string
+ description: >
+ The DNS domain used for the hosts. This should match the dhcp_domain
+ configured in the Undercloud neutron. Defaults to localdomain.
+ ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API.
+ type: json
+ SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
resources:
CephStorage:
@@ -68,6 +96,9 @@ resources:
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
name: {get_param: Hostname}
+ software_config_transport: {get_param: SoftwareConfigTransport}
+ metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -89,6 +120,16 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ ExternalPort:
+ type: OS::TripleO::CephStorage::Ports::ExternalPort
+ properties:
+ ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
+ InternalApiPort:
+ type: OS::TripleO::CephStorage::Ports::InternalApiPort
+ properties:
+ ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
StoragePort:
type: OS::TripleO::CephStorage::Ports::StoragePort
properties:
@@ -99,32 +140,55 @@ resources:
properties:
ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ TenantPort:
+ type: OS::TripleO::CephStorage::Ports::TenantPort
+ properties:
+ ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
+ ManagementPort:
+ type: OS::TripleO::CephStorage::Ports::ManagementPort
+ properties:
+ ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
NetworkConfig:
type: OS::TripleO::CephStorage::Net::SoftwareConfig
properties:
ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
properties:
ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
+ InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
StorageIp: {get_attr: [StoragePort, ip_address]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ TenantIp: {get_attr: [TenantPort, ip_address]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
NetIpSubnetMap:
type: OS::TripleO::Network::Ports::NetIpSubnetMap
properties:
ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
properties:
config: {get_resource: NetworkConfig}
server: {get_resource: CephStorage}
+ actions: {get_param: NetworkDeploymentActions}
CephStorageDeployment:
type: OS::Heat::StructuredDeployment
@@ -133,11 +197,7 @@ resources:
config: {get_resource: CephStorageConfig}
server: {get_resource: CephStorage}
input_values:
- ntp_servers:
- str_replace:
- template: '["server"]'
- params:
- server: {get_param: NtpServer}
+ ntp_servers: {get_param: NtpServer}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]}
@@ -174,6 +234,13 @@ resources:
ceph::profile::params::cluster_network: {get_input: ceph_cluster_network}
ceph::profile::params::public_network: {get_input: ceph_public_network}
+ # Resource for site-specific injection of root certificate
+ NodeTLSCAData:
+ depends_on: CephStorageDeployment
+ type: OS::TripleO::NodeTLSCAData
+ properties:
+ server: {get_resource: CephStorage}
+
# Hook for site-specific additional pre-deployment config, e.g extra hieradata
CephStorageExtraConfigPre:
depends_on: CephStorageDeployment
@@ -184,7 +251,7 @@ resources:
# Hook for site-specific additional pre-deployment config,
# applying to all nodes, e.g node registration/unregistration
NodeExtraConfig:
- depends_on: CephStorageExtraConfigPre
+ depends_on: [CephStorageExtraConfigPre, NodeTLSCAData]
type: OS::TripleO::NodeExtraConfig
properties:
server: {get_resource: CephStorage}
@@ -205,25 +272,39 @@ outputs:
hosts_entry:
value:
str_replace:
- template: "IP HOST.localdomain HOST"
+ template: "IP HOST.DOMAIN HOST"
params:
IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
HOST: {get_attr: [CephStorage, name]}
nova_server_resource:
description: Heat resource handle for the ceph storage server
value:
{get_resource: CephStorage}
+ external_ip_address:
+ description: IP address of the server in the external network
+ value: {get_attr: [ExternalPort, ip_address]}
+ internal_api_ip_address:
+ description: IP address of the server in the internal_api network
+ value: {get_attr: [InternalApiPort, ip_address]}
storage_ip_address:
description: IP address of the server in the storage network
value: {get_attr: [StoragePort, ip_address]}
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ tenant_ip_address:
+ description: IP address of the server in the tenant network
+ value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
config_identifier:
description: identifier which changes if the node configuration may need re-applying
value:
list_join:
- ','
- - {get_attr: [CephStorageDeployment, deploy_stdout]}
+ - {get_attr: [NodeTLSCAData, deploy_stdout]}
- {get_attr: [CephStorageExtraConfigPre, deploy_stdout]}
- {get_param: UpdateIdentifier}
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index 6a869219..9fdd0123 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -17,7 +17,6 @@ parameters:
description: The size of the loopback file used by the cinder LVM driver.
type: number
CinderPassword:
- default: unset
description: The password for the cinder service and db account, used by cinder-api.
type: string
hidden: true
@@ -44,17 +43,9 @@ parameters:
type: string
constraints:
- custom_constraint: nova.flavor
- GlancePort:
- default: "9292"
- description: Glance port.
- type: string
- GlanceProtocol:
- default: http
- description: Protocol to use when connecting to glance, set to https for SSL.
- type: string
KeyName:
default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
RabbitPassword:
default: 'guest'
@@ -78,13 +69,13 @@ parameters:
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
type: string
SnmpdReadonlyUserPassword:
- default: unset
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
NtpServer:
- type: string
default: ''
+ description: Comma-separated list of ntp servers
+ type: comma_delimited_list
EnablePackageInstall:
default: 'false'
description: Set to true to enable package installation via Puppet
@@ -103,12 +94,46 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
GlanceApiVirtualIP:
type: string
default: ''
MysqlVirtualIP:
type: string
default: ''
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
+ CloudDomain:
+ default: ''
+ type: string
+ description: >
+ The DNS domain used for the hosts. This should match the dhcp_domain
+ configured in the Undercloud neutron. Defaults to localdomain.
+ ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API.
+ type: json
+ SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
+
resources:
BlockStorage:
@@ -123,6 +148,9 @@ resources:
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
name: {get_param: Hostname}
+ software_config_transport: {get_param: SoftwareConfigTransport}
+ metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -144,6 +172,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ ExternalPort:
+ type: OS::TripleO::BlockStorage::Ports::ExternalPort
+ properties:
+ ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+
InternalApiPort:
type: OS::TripleO::BlockStorage::Ports::InternalApiPort
properties:
@@ -159,27 +192,44 @@ resources:
properties:
ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ TenantPort:
+ type: OS::TripleO::BlockStorage::Ports::TenantPort
+ properties:
+ ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+
+ ManagementPort:
+ type: OS::TripleO::BlockStorage::Ports::ManagementPort
+ properties:
+ ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+
NetworkConfig:
type: OS::TripleO::BlockStorage::Net::SoftwareConfig
properties:
ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
properties:
ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
StorageIp: {get_attr: [StoragePort, ip_address]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ TenantIp: {get_attr: [TenantPort, ip_address]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
properties:
config: {get_resource: NetworkConfig}
server: {get_resource: BlockStorage}
+ actions: {get_param: NetworkDeploymentActions}
BlockStorageDeployment:
type: OS::Heat::StructuredDeployment
@@ -200,23 +250,12 @@ resources:
cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
cinder_iscsi_helper: {get_param: CinderISCSIHelper}
cinder_iscsi_ip_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]}
- glance_api_servers:
- list_join:
- - ''
- - - {get_param: GlanceProtocol}
- - '://'
- - {get_param: GlanceApiVirtualIP}
- - ':'
- - {get_param: GlancePort}
+ glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
rabbit_username: {get_param: RabbitUserName}
rabbit_password: {get_param: RabbitPassword}
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
rabbit_client_port: {get_param: RabbitClientPort}
- ntp_servers:
- str_replace:
- template: '["server"]'
- params:
- server: {get_param: NtpServer}
+ ntp_servers: {get_param: NtpServer}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
@@ -264,10 +303,17 @@ resources:
snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
+ # Resource for site-specific injection of root certificate
+ NodeTLSCAData:
+ depends_on: BlockStorageDeployment
+ type: OS::TripleO::NodeTLSCAData
+ properties:
+ server: {get_resource: BlockStorage}
+
# Hook for site-specific additional pre-deployment config,
# applying to all nodes, e.g node registration/unregistration
NodeExtraConfig:
- depends_on: BlockStorageDeployment
+ depends_on: NodeTLSCAData
type: OS::TripleO::NodeExtraConfig
properties:
server: {get_resource: BlockStorage}
@@ -288,14 +334,18 @@ outputs:
hosts_entry:
value:
str_replace:
- template: "IP HOST.localdomain HOST"
+ template: "IP HOST.DOMAIN HOST"
params:
IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
HOST: {get_attr: [BlockStorage, name]}
nova_server_resource:
description: Heat resource handle for the block storage server
value:
{get_resource: BlockStorage}
+ external_ip_address:
+ description: IP address of the server in the external network
+ value: {get_attr: [ExternalPort, ip_address]}
internal_api_ip_address:
description: IP address of the server in the internal_api network
value: {get_attr: [InternalApiPort, ip_address]}
@@ -305,10 +355,17 @@ outputs:
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ tenant_ip_address:
+ description: IP address of the server in the tenant network
+ value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
config_identifier:
description: identifier which changes if the node configuration may need re-applying
value:
list_join:
- ''
- - {get_attr: [BlockStorageDeployment, deploy_stdout]}
+ - {get_attr: [NodeTLSCAData, deploy_stdout]}
- {get_param: UpdateIdentifier}
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index 2b635357..7269d736 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -1,11 +1,10 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
OpenStack hypervisor node configured via Puppet.
parameters:
AdminPassword:
- default: unset
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
@@ -16,12 +15,10 @@ parameters:
constraints:
- allowed_values: ['', Present]
CeilometerMeteringSecret:
- default: unset
description: Secret shared by the ceilometer services.
type: string
hidden: true
CeilometerPassword:
- default: unset
description: The password for the ceilometer service account.
type: string
hidden: true
@@ -51,14 +48,6 @@ parameters:
GlanceHost:
type: string
default: '' # Has to be here because of the ignored empty value bug
- GlancePort:
- default: "9292"
- description: Glance port.
- type: string
- GlanceProtocol:
- default: http
- description: Protocol to use when connecting to glance, set to https for SSL.
- type: string
Image:
type: string
default: overcloud-compute
@@ -69,7 +58,7 @@ parameters:
description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
type: string
KeyName:
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
default: default
constraints:
@@ -88,13 +77,18 @@ parameters:
to create provider networks (and we use this for the default floating
network) - if changing this either use different post-install network
scripts or be sure to keep 'datacentre' as a mapping network name.
- type: string
+ type: comma_delimited_list
default: "datacentre:br-ex"
NeutronEnableTunnelling:
type: string
default: "True"
- NeutronFlatNetworks:
+ NeutronEnableL2Pop:
type: string
+ description: >
+ Enable/disable the L2 population feature in the Neutron agents.
+ default: "False"
+ NeutronFlatNetworks:
+ type: comma_delimited_list
default: 'datacentre'
description: >
If set, flat networks to configure in neutron plugins.
@@ -102,18 +96,17 @@ parameters:
type: string
default: '' # Has to be here because of the ignored empty value bug
NeutronNetworkType:
- type: string
- description: The tenant network type for Neutron, either gre or vxlan.
+ type: comma_delimited_list
+ description: The tenant network type for Neutron.
default: 'vxlan'
NeutronNetworkVLANRanges:
- default: 'datacentre'
+ default: 'datacentre:1:1000'
description: >
The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
Neutron documentation for permitted values. Defaults to permitting any
VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
type: comma_delimited_list
NeutronPassword:
- default: unset
description: The password for the neutron service account, used by neutron agents.
type: string
hidden: true
@@ -126,10 +119,9 @@ parameters:
description: A port to add to the NeutronPhysicalBridge.
type: string
NeutronTunnelTypes:
- type: string
+ type: comma_delimited_list
description: |
- The tunnel types for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'gre,vxlan'
+ The tunnel types for the Neutron tenant network.
default: 'vxlan'
NeutronTunnelIdRanges:
description: |
@@ -150,7 +142,6 @@ parameters:
default: 'False'
type: string
NeutronMetadataProxySharedSecret:
- default: 'unset'
description: Shared secret to prevent spoofing
type: string
hidden: true
@@ -174,9 +165,8 @@ parameters:
NeutronMechanismDrivers:
default: 'openvswitch'
description: |
- The mechanism drivers for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'openvswitch,l2_population'
- type: string
+ The mechanism drivers for the Neutron tenant network.
+ type: comma_delimited_list
# Not relevant for Computes, should be removed
NeutronAllowL3AgentFailover:
default: 'True'
@@ -205,22 +195,34 @@ parameters:
type: json
NovaComputeLibvirtType:
type: string
+ default: kvm
+ NovaComputeLibvirtVifDriver:
default: ''
+ description: Libvirt VIF driver configuration for the network
+ type: string
NovaEnableRbdBackend:
default: false
description: Whether to enable or not the Rbd backend for Nova
type: boolean
NovaPassword:
- default: unset
description: The password for the nova service account, used by nova-api.
type: string
hidden: true
NovaPublicIP:
type: string
default: '' # Has to be here because of the ignored empty value bug
- NtpServer:
+ NovaOVSBridge:
+ default: 'br-int'
+ description: Name of integration bridge used by Open vSwitch
+ type: string
+ NovaSecurityGroupAPI:
+ default: 'neutron'
+ description: The full class name of the security API class
type: string
+ NtpServer:
default: ''
+ description: Comma-separated list of ntp servers
+ type: comma_delimited_list
RabbitHost:
type: string
default: '' # Has to be here because of the ignored empty value bug
@@ -248,7 +250,6 @@ parameters:
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
type: string
SnmpdReadonlyUserPassword:
- default: unset
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
@@ -261,6 +262,11 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
UpdateIdentifier:
default: ''
type: string
@@ -270,6 +276,34 @@ parameters:
Hostname:
type: string
default: '' # Defaults to Heat created hostname
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
+ CloudDomain:
+ default: ''
+ type: string
+ description: >
+ The DNS domain used for the hosts. This should match the dhcp_domain
+ configured in the Undercloud neutron. Defaults to localdomain.
+ ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API.
+ type: json
+ SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
resources:
@@ -287,6 +321,9 @@ resources:
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
name: {get_param: Hostname}
+ software_config_transport: {get_param: SoftwareConfigTransport}
+ metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -308,6 +345,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ ExternalPort:
+ type: OS::TripleO::Compute::Ports::ExternalPort
+ properties:
+ ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+
InternalApiPort:
type: OS::TripleO::Compute::Ports::InternalApiPort
properties:
@@ -318,32 +360,49 @@ resources:
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ StorageMgmtPort:
+ type: OS::TripleO::Compute::Ports::StorageMgmtPort
+ properties:
+ ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+
TenantPort:
type: OS::TripleO::Compute::Ports::TenantPort
properties:
ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ ManagementPort:
+ type: OS::TripleO::Compute::Ports::ManagementPort
+ properties:
+ ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
properties:
ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
StorageIp: {get_attr: [StoragePort, ip_address]}
+ StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
NetworkConfig:
type: OS::TripleO::Compute::Net::SoftwareConfig
properties:
ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
+ StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
properties:
config: {get_resource: NetworkConfig}
server: {get_resource: NovaCompute}
+ actions: {get_param: NetworkDeploymentActions}
input_values:
bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
@@ -366,6 +425,8 @@ resources:
- '"%{::osfamily}"'
- common
- cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre
+ - nova_nuage_data # Optionally provided by ComputeExtraConfigPre
+ - midonet_data # Optionally provided by AllNodesExtraConfig
datafiles:
compute_extraconfig:
mapped_data: {get_param: NovaComputeExtraConfig}
@@ -386,12 +447,15 @@ resources:
nova::rabbit_port: {get_input: rabbit_client_port}
nova_compute_driver: {get_input: nova_compute_driver}
nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type}
+ nova::compute::neutron::libvirt_vif_driver: {get_input: nova_compute_libvirt_vif_driver}
nova_api_host: {get_input: nova_api_host}
nova::compute::vncproxy_host: {get_input: nova_public_ip}
nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend}
rbd_persistent_storage: {get_input: cinder_enable_rbd_backend}
nova_password: {get_input: nova_password}
nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address}
+ nova::network::neutron::neutron_ovs_bridge: {get_input: nova_ovs_bridge}
+ nova::network::neutron::security_group_api: {get_input: nova_security_group_api}
ceilometer::debug: {get_input: debug}
ceilometer::rabbit_userid: {get_input: rabbit_username}
ceilometer::rabbit_password: {get_input: rabbit_password}
@@ -406,24 +470,25 @@ resources:
nova::glance_api_servers: {get_input: glance_api_servers}
neutron::debug: {get_input: debug}
neutron::rabbit_password: {get_input: rabbit_password}
- neutron::rabbit_user: {get_input: rabbit_user}
+ neutron::rabbit_user: {get_input: rabbit_username}
neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
neutron::rabbit_port: {get_input: rabbit_client_port}
- neutron_flat_networks: {get_input: neutron_flat_networks}
+ neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks}
neutron_host: {get_input: neutron_host}
neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
- neutron_tenant_network_type: {get_input: neutron_tenant_network_type}
- neutron_tunnel_types: {get_input: neutron_tunnel_types}
+ neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types}
+ neutron::agents::ml2::ovs:tunnel_types: {get_input: neutron_tunnel_types}
neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges}
- neutron_bridge_mappings: {get_input: neutron_bridge_mappings}
+ neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings}
neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling}
+ neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
neutron_physical_bridge: {get_input: neutron_physical_bridge}
neutron_public_interface: {get_input: neutron_public_interface}
nova::network::neutron::neutron_admin_password: {get_input: neutron_password}
- nova::network::neutron::neutron_url: {get_input: neutron_url}
+ nova::network::neutron::neutron_url: {get_input: neutron_internal_url}
nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url}
neutron_router_distributed: {get_input: neutron_router_distributed}
neutron_agent_mode: {get_input: neutron_agent_mode}
@@ -431,8 +496,9 @@ resources:
neutron::core_plugin: {get_input: neutron_core_plugin}
neutron::service_plugins: {get_input: neutron_service_plugins}
neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers}
- neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers}
+ neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers}
neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device}
+ keystone_public_api_virtual_ip: {get_input: keystone_vip}
admin_password: {get_input: admin_password}
ntp::servers: {get_input: ntp_servers}
tripleo::packages::enable_install: {get_input: enable_package_install}
@@ -449,62 +515,61 @@ resources:
debug: {get_param: Debug}
nova_compute_driver: {get_param: NovaComputeDriver}
nova_compute_libvirt_type: {get_param: NovaComputeLibvirtType}
+ nova_compute_libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver}
nova_public_ip: {get_param: NovaPublicIP}
nova_api_host: {get_param: NovaApiHost}
nova_password: {get_param: NovaPassword}
nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend}
cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]}
+ nova_ovs_bridge: {get_param: NovaOVSBridge}
+ nova_security_group_api: {get_param: NovaSecurityGroupAPI}
ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
ceilometer_password: {get_param: CeilometerPassword}
ceilometer_compute_agent: {get_param: CeilometerComputeAgent}
- ceilometer_agent_auth_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: KeystonePublicApiVirtualIP}
- - ':5000/v2.0'
+ ceilometer_agent_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
- glance_api_servers:
- list_join:
- - ''
- - - {get_param: GlanceProtocol}
- - '://'
- - {get_param: GlanceHost}
- - ':'
- - {get_param: GlancePort}
- neutron_flat_networks: {get_param: NeutronFlatNetworks}
+ glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
+ neutron_flat_networks:
+ str_replace:
+ template: NETWORKS
+ params:
+ NETWORKS: {get_param: NeutronFlatNetworks}
neutron_host: {get_param: NeutronHost}
neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]}
- neutron_tenant_network_type: {get_param: NeutronNetworkType}
- neutron_tunnel_types: {get_param: NeutronTunnelTypes}
neutron_tunnel_id_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronTunnelIdRanges}
+ RANGES: {get_param: NeutronTunnelIdRanges}
neutron_vni_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
+ params:
+ RANGES: {get_param: NeutronVniRanges}
+ neutron_tenant_network_types:
+ str_replace:
+ template: TYPES
+ params:
+ TYPES: {get_param: NeutronNetworkType}
+ neutron_tunnel_types:
+ str_replace:
+ template: TYPES
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronVniRanges}
+ TYPES: {get_param: NeutronTunnelTypes}
neutron_network_vlan_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
+ params:
+ RANGES: {get_param: NeutronNetworkVLANRanges}
+ neutron_bridge_mappings:
+ str_replace:
+ template: MAPPINGS
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronNetworkVLANRanges}
- neutron_bridge_mappings: {get_param: NeutronBridgeMappings}
+ MAPPINGS: {get_param: NeutronBridgeMappings}
neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
+ neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
neutron_physical_bridge: {get_param: NeutronPhysicalBridge}
neutron_public_interface: {get_param: NeutronPublicInterface}
neutron_password: {get_param: NeutronPassword}
@@ -514,47 +579,39 @@ resources:
neutron_core_plugin: {get_param: NeutronCorePlugin}
neutron_service_plugins:
str_replace:
- template: "['PLUGINS']"
+ template: PLUGINS
params:
- PLUGINS:
- list_join:
- - "','"
- - {get_param: NeutronServicePlugins}
+ PLUGINS: {get_param: NeutronServicePlugins}
neutron_type_drivers:
str_replace:
- template: "['DRIVERS']"
+ template: DRIVERS
params:
- DRIVERS:
- list_join:
- - "','"
- - {get_param: NeutronTypeDrivers}
- neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers}
+ DRIVERS: {get_param: NeutronTypeDrivers}
+ neutron_mechanism_drivers:
+ str_replace:
+ template: MECHANISMS
+ params:
+ MECHANISMS: {get_param: NeutronMechanismDrivers}
neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
- neutron_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: NeutronHost}
- - ':9696'
- neutron_admin_auth_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: KeystoneAdminApiVirtualIP}
- - ':35357/v2.0'
+ neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]}
+ neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]}
+ keystone_vip: {get_param: KeystonePublicApiVirtualIP}
admin_password: {get_param: AdminPassword}
rabbit_username: {get_param: RabbitUserName}
rabbit_password: {get_param: RabbitPassword}
rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
rabbit_client_port: {get_param: RabbitClientPort}
- ntp_servers:
- str_replace:
- template: '["server"]'
- params:
- server: {get_param: NtpServer}
+ ntp_servers: {get_param: NtpServer}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+ # Resource for site-specific injection of root certificate
+ NodeTLSCAData:
+ depends_on: NovaComputeDeployment
+ type: OS::TripleO::NodeTLSCAData
+ properties:
+ server: {get_resource: NovaCompute}
+
# Hook for site-specific additional pre-deployment config, e.g extra hieradata
ComputeExtraConfigPre:
depends_on: NovaComputeDeployment
@@ -565,7 +622,7 @@ resources:
# Hook for site-specific additional pre-deployment config,
# applying to all nodes, e.g node registration/unregistration
NodeExtraConfig:
- depends_on: ComputeExtraConfigPre
+ depends_on: [ComputeExtraConfigPre, NodeTLSCAData]
type: OS::TripleO::NodeExtraConfig
properties:
server: {get_resource: NovaCompute}
@@ -586,15 +643,24 @@ outputs:
ip_address:
description: IP address of the server in the ctlplane network
value: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ external_ip_address:
+ description: IP address of the server in the external network
+ value: {get_attr: [ExternalPort, ip_address]}
internal_api_ip_address:
description: IP address of the server in the internal_api network
value: {get_attr: [InternalApiPort, ip_address]}
storage_ip_address:
description: IP address of the server in the storage network
value: {get_attr: [StoragePort, ip_address]}
+ storage_mgmt_ip_address:
+ description: IP address of the server in the storage_mgmt network
+ value: {get_attr: [StorageMgmtPort, ip_address]}
tenant_ip_address:
description: IP address of the server in the tenant network
value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
hostname:
description: Hostname of the server
value: {get_attr: [NovaCompute, name]}
@@ -603,9 +669,10 @@ outputs:
Server's IP address and hostname in the /etc/hosts format
value:
str_replace:
- template: "IP HOST.localdomain HOST"
+ template: "IP HOST.DOMAIN HOST"
params:
IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
HOST: {get_attr: [NovaCompute, name]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
@@ -617,5 +684,6 @@ outputs:
list_join:
- ','
- - {get_attr: [NovaComputeDeployment, deploy_stdout]}
+ - {get_attr: [NodeTLSCAData, deploy_stdout]}
- {get_attr: [ComputeExtraConfigPre, deploy_stdout]}
- {get_param: UpdateIdentifier}
diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml
index 941e1ac5..ed8129e7 100644
--- a/puppet/controller-post.yaml
+++ b/puppet/controller-post.yaml
@@ -17,6 +17,13 @@ parameters:
resources:
+ ControllerPrePuppet:
+ type: OS::TripleO::Tasks::ControllerPrePuppet
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
ControllerPuppetConfig:
type: OS::TripleO::ControllerConfig
@@ -26,6 +33,7 @@ resources:
# e.g all Deployment resources should have a *Deployment_StepN suffix
ControllerLoadBalancerDeployment_Step1:
type: OS::Heat::StructuredDeployments
+ depends_on: ControllerPrePuppet
properties:
servers: {get_param: servers}
config: {get_resource: ControllerPuppetConfig}
@@ -98,10 +106,18 @@ resources:
step: 5
update_identifier: {get_param: NodeConfigIdentifiers}
+ ControllerPostPuppet:
+ type: OS::TripleO::Tasks::ControllerPostPuppet
+ depends_on: ControllerOvercloudServicesDeployment_Step6
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
ExtraConfig:
- depends_on: ControllerOvercloudServicesDeployment_Step5
+ depends_on: ControllerPostPuppet
type: OS::TripleO::NodeExtraConfigPost
properties:
servers: {get_param: servers}
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index a903021a..a825f582 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
description: >
OpenStack controller node configured by Puppet.
@@ -10,12 +10,10 @@ parameters:
type: string
hidden: true
AdminPassword:
- default: unset
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
AdminToken:
- default: unset
description: The keystone auth secret and db password.
type: string
hidden: true
@@ -27,18 +25,20 @@ parameters:
description: The ceilometer backend type.
type: string
CeilometerMeteringSecret:
- default: unset
description: Secret shared by the ceilometer services.
type: string
hidden: true
CeilometerPassword:
- default: unset
description: The password for the ceilometer service and db account.
type: string
hidden: true
CinderApiVirtualIP:
type: string
default: ''
+ CeilometerWorkers:
+ default: 0
+ description: Number of workers for Ceilometer service.
+ type: number
CinderEnableNfsBackend:
default: false
description: Whether to enable or not the NFS backend for Cinder
@@ -72,7 +72,6 @@ parameters:
CinderEnableNfsBackend is true.
type: comma_delimited_list
CinderPassword:
- default: unset
description: The password for the cinder service and db account, used by cinder-api.
type: string
hidden: true
@@ -81,8 +80,12 @@ parameters:
description: Contains parameters to configure Cinder backends. Typically
set via parameter_defaults in the resource registry.
type: json
+ CinderWorkers:
+ default: 0
+ description: Number of workers for Cinder service.
+ type: number
CloudName:
- default: ''
+ default: overcloud
description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
type: string
ControllerExtraConfig:
@@ -90,6 +93,15 @@ parameters:
description: |
Controller specific hiera configuration data to inject into the cluster.
type: json
+ ControllerIPs:
+ default: {}
+ description: >
+ A network mapped list of IPs to assign to Controllers in the following form:
+ {
+ "internal_api": ["a.b.c.d", "e.f.g.h"],
+ ...
+ }
+ type: json
ControlVirtualInterface:
default: 'br-ex'
description: Interface where virtual ip will be assigned.
@@ -106,6 +118,10 @@ parameters:
default: true
description: Whether to use Galera instead of regular MariaDB.
type: boolean
+ EnableLoadBalancer:
+ default: true
+ description: Whether to deploy a LoadBalancer on the Controller
+ type: boolean
EnableCephStorage:
default: false
description: Whether to deploy Ceph Storage (OSD) on the Controller
@@ -166,18 +182,9 @@ parameters:
type: string
default: ''
GlancePassword:
- default: unset
description: The password for the glance service and db account, used by the glance services.
type: string
hidden: true
- GlancePort:
- default: "9292"
- description: Glance port.
- type: string
- GlanceProtocol:
- default: http
- description: Protocol to use when connecting to glance, set to https for SSL.
- type: string
GlanceBackend:
default: swift
description: The short name of the Glance backend to use. Should be one
@@ -213,15 +220,17 @@ parameters:
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
+ GlanceWorkers:
+ default: 0
+ description: Number of workers for Glance service.
+ type: number
HeatPassword:
- default: unset
description: The password for the Heat service and db account, used by the Heat services.
type: string
hidden: true
HeatStackDomainAdminPassword:
description: Password for heat_domain_admin user.
type: string
- default: ''
hidden: true
HeatAuthEncryptionKey:
description: Auth encryption key for heat-engine
@@ -231,6 +240,10 @@ parameters:
default: '*'
description: A list of IP/Hostname allowed to connect to horizon
type: comma_delimited_list
+ HeatWorkers:
+ default: 0
+ description: Number of workers for Heat service.
+ type: number
HorizonSecret:
description: Secret key for Django
type: string
@@ -244,9 +257,13 @@ parameters:
default: 'REBUILD_PRESERVE_EPHEMERAL'
description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
type: string
+ InstanceNameTemplate:
+ default: 'instance-%08x'
+ description: Template string to be used to generate instance names
+ type: string
KeyName:
default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
constraints:
- custom_constraint: nova.keypair
@@ -286,6 +303,18 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ ManageFirewall:
+ default: false
+ description: Whether to manage IPtables rules.
+ type: boolean
+ PurgeFirewallRules:
+ default: false
+ description: Whether IPtables rules should be purged before setting up the new ones.
+ type: boolean
+ KeystoneWorkers:
+ default: 0
+ description: Number of workers for Keystone service.
+ type: number
MysqlClusterUniquePart:
description: A unique identifier of the MySQL cluster the controller is in.
type: string
@@ -320,12 +349,28 @@ parameters:
to create provider networks (and we use this for the default floating
network) - if changing this either use different post-install network
scripts or be sure to keep 'datacentre' as a mapping network name.
- type: string
+ type: comma_delimited_list
default: "datacentre:br-ex"
NeutronDnsmasqOptions:
default: 'dhcp-option-force=26,1400'
description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the gre tunnel overhead.
type: string
+ NeutronEnableDHCPAgent:
+ description: Knob to enable/disable DHCP Agent
+ type: boolean
+ default: true
+ NeutronEnableL3Agent:
+ description: Knob to enable/disable L3 agent
+ type: boolean
+ default: true
+ NeutronEnableMetadataAgent:
+ description: Knob to enable/disable Metadata agent
+ type: boolean
+ default: true
+ NeutronEnableOVSAgent:
+ description: Knob to enable/disable OVS Agent
+ type: boolean
+ default: true
NeutronAgentMode:
default: 'dvr_snat'
description: Agent mode for the neutron-l3-agent on the controller hosts
@@ -343,7 +388,6 @@ parameters:
description: Whether to configure Neutron Distributed Virtual Routers
type: string
NeutronMetadataProxySharedSecret:
- default: 'unset'
description: Shared secret to prevent spoofing
type: string
hidden: true
@@ -367,18 +411,26 @@ parameters:
NeutronMechanismDrivers:
default: 'openvswitch'
description: |
- The mechanism drivers for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'openvswitch,l2_population'
- type: string
+ The mechanism drivers for the Neutron tenant network.
+ type: comma_delimited_list
NeutronAllowL3AgentFailover:
default: 'True'
description: Allow automatic l3-agent failover
type: string
+ NeutronEnableIsolatedMetadata:
+ default: 'False'
+ description: If True, DHCP provide metadata route to VM.
+ type: string
NeutronEnableTunnelling:
type: string
default: "True"
- NeutronFlatNetworks:
+ NeutronEnableL2Pop:
type: string
+ description: >
+ Enable/disable the L2 population feature in the Neutron agents.
+ default: "False"
+ NeutronFlatNetworks:
+ type: comma_delimited_list
default: 'datacentre'
description: If set, flat networks to configure in neutron plugins.
NeutronL3HA:
@@ -387,17 +439,16 @@ parameters:
type: string
NeutronNetworkType:
default: 'vxlan'
- description: The tenant network type for Neutron, either gre or vxlan.
- type: string
+ description: The tenant network type for Neutron.
+ type: comma_delimited_list
NeutronNetworkVLANRanges:
- default: 'datacentre'
+ default: 'datacentre:1:1000'
description: >
The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
Neutron documentation for permitted values. Defaults to permitting any
VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
type: comma_delimited_list
NeutronPassword:
- default: unset
description: The password for the neutron service and db account, used by neutron agents.
type: string
hidden: true
@@ -430,9 +481,8 @@ parameters:
NeutronTunnelTypes:
default: 'vxlan'
description: |
- The tunnel types for the Neutron tenant network. To specify multiple
- values, use a comma separated string, like so: 'gre,vxlan'
- type: string
+ The tunnel types for the Neutron tenant network.
+ type: comma_delimited_list
NeutronTunnelIdRanges:
description: |
Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
@@ -448,18 +498,31 @@ parameters:
NovaApiVirtualIP:
type: string
default: ''
+ NeutronWorkers:
+ default: 0
+ description: Number of workers for Neutron service.
+ type: number
+ NovaEnableDBPurge:
+ default: true
+ description: |
+ Whether to create cron job for purging soft deleted rows in Nova database.
+ type: boolean
NovaPassword:
- default: unset
description: The password for the nova service and db account, used by nova-api.
type: string
hidden: true
+ NovaWorkers:
+ default: 0
+ description: Number of workers for Nova service.
+ type: number
MongoDbNoJournal:
default: false
description: Should MongoDb journaling be disabled
type: boolean
NtpServer:
- type: string
default: ''
+ description: Comma-separated list of ntp servers
+ type: comma_delimited_list
PcsdPassword:
type: string
description: The password for the 'pcsd' user.
@@ -508,26 +571,10 @@ parameters:
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
type: string
SnmpdReadonlyUserPassword:
- default: unset
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
- SSLCACertificate:
- default: ''
- description: If set, the contents of an SSL certificate authority file.
- type: string
- SSLCertificate:
- default: ''
- description: If set, the contents of an SSL certificate .crt file for encrypting SSL endpoints.
- type: string
- hidden: true
- SSLKey:
- default: ''
- description: If set, the contents of an SSL certificate .key file for encrypting SSL endpoints.
- type: string
- hidden: true
SwiftHashSuffix:
- default: unset
description: A random string to be used as a salt when hashing to determine mappings
in the ring.
hidden: true
@@ -545,7 +592,6 @@ parameters:
description: Partition Power to use when building Swift rings
type: number
SwiftPassword:
- default: unset
description: The password for the swift service account, used by the swift proxy
services.
hidden: true
@@ -557,6 +603,10 @@ parameters:
type: number
default: 3
description: How many replicas to use in the swift rings.
+ SwiftWorkers:
+ default: 0
+ description: Number of workers for Swift service.
+ type: number
VirtualIP: # DEPRECATED: use per service settings instead
type: string
default: '' # Has to be here because of the ignored empty value bug
@@ -590,6 +640,11 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
UpdateIdentifier:
default: ''
type: string
@@ -599,6 +654,37 @@ parameters:
Hostname:
type: string
default: '' # Defaults to Heat created hostname
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
+ NodeIndex:
+ type: number
+ default: 0
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
+ CloudDomain:
+ default: ''
+ type: string
+ description: >
+ The DNS domain used for the hosts. This should match the dhcp_domain
+ configured in the Undercloud neutron. Defaults to localdomain.
+ ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API.
+ type: json
+ SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
resources:
@@ -614,6 +700,9 @@ resources:
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
name: {get_param: Hostname}
+ software_config_transport: {get_param: SoftwareConfigTransport}
+ metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -638,26 +727,41 @@ resources:
ExternalPort:
type: OS::TripleO::Controller::Ports::ExternalPort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
InternalApiPort:
type: OS::TripleO::Controller::Ports::InternalApiPort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
StoragePort:
type: OS::TripleO::Controller::Ports::StoragePort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
StorageMgmtPort:
type: OS::TripleO::Controller::Ports::StorageMgmtPort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
TenantPort:
type: OS::TripleO::Controller::Ports::TenantPort
properties:
+ IPPool: {get_param: ControllerIPs}
+ NodeIndex: {get_param: NodeIndex}
+ ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
+
+ ManagementPort:
+ type: OS::TripleO::Controller::Ports::ManagementPort
+ properties:
ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
NetIpMap:
@@ -669,6 +773,7 @@ resources:
StorageIp: {get_attr: [StoragePort, ip_address]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
TenantIp: {get_attr: [TenantPort, ip_address]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
NetIpSubnetMap:
type: OS::TripleO::Network::Ports::NetIpSubnetMap
@@ -679,6 +784,7 @@ resources:
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetworkConfig:
type: OS::TripleO::Controller::Net::SoftwareConfig
@@ -689,16 +795,33 @@ resources:
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
properties:
config: {get_resource: NetworkConfig}
server: {get_resource: Controller}
+ actions: {get_param: NetworkDeploymentActions}
input_values:
bridge_name: br-ex
interface_name: {get_param: NeutronPublicInterface}
+ # Resource for site-specific injection of root certificate
+ NodeTLSCAData:
+ depends_on: NetworkDeployment
+ type: OS::TripleO::NodeTLSCAData
+ properties:
+ server: {get_resource: Controller}
+
+ # Resource for site-specific passing of private keys/certificates
+ NodeTLSData:
+ depends_on: NodeTLSCAData
+ type: OS::TripleO::NodeTLSData
+ properties:
+ server: {get_resource: Controller}
+ NodeIndex: {get_param: NodeIndex}
+
ControllerDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: NetworkDeployment
@@ -707,7 +830,17 @@ resources:
server: {get_resource: Controller}
input_values:
bootstack_nodeid: {get_attr: [Controller, name]}
+ ceilometer_workers: {get_param: CeilometerWorkers}
+ cinder_workers: {get_param: CinderWorkers}
+ glance_workers: {get_param: GlanceWorkers}
+ heat_workers: {get_param: HeatWorkers}
+ keystone_workers: {get_param: KeystoneWorkers}
+ nova_workers: {get_param: NovaWorkers}
+ neutron_workers: {get_param: NeutronWorkers}
+ swift_workers: {get_param: SwiftWorkers}
neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
+ neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
+ neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
haproxy_log_address: {get_param: HAProxySyslogAddress}
heat.watch_server_url:
list_join:
@@ -727,24 +860,6 @@ resources:
- - 'http://'
- {get_param: HeatApiVirtualIP}
- ':8000/v1/waitcondition'
- heat_public_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':8004/v1/%(tenant_id)s'
- heat_internal_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: HeatApiVirtualIP}
- - ':8004/v1/%(tenant_id)s'
- heat_admin_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: HeatApiVirtualIP}
- - ':8004/v1/%(tenant_id)s'
heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey}
horizon_allowed_hosts: {get_param: HorizonAllowedHosts}
horizon_secret: {get_param: HorizonSecret}
@@ -758,12 +873,9 @@ resources:
cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
cinder_nfs_servers:
str_replace:
- template: "['SERVERS']"
+ template: SERVERS
params:
- SERVERS:
- list_join:
- - "','"
- - {get_param: CinderNfsServers}
+ SERVERS: {get_param: CinderNfsServers}
cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
cinder_password: {get_param: CinderPassword}
cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
@@ -777,43 +889,7 @@ resources:
- '@'
- {get_param: MysqlVirtualIP}
- '/cinder'
- cinder_public_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':8776/v1/%(tenant_id)s'
- cinder_internal_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: CinderApiVirtualIP}
- - ':8776/v1/%(tenant_id)s'
- cinder_admin_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: CinderApiVirtualIP}
- - ':8776/v1/%(tenant_id)s'
- cinder_public_url_v2:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':8776/v2/%(tenant_id)s'
- cinder_internal_url_v2:
- list_join:
- - ''
- - - 'http://'
- - {get_param: CinderApiVirtualIP}
- - ':8776/v2/%(tenant_id)s'
- cinder_admin_url_v2:
- list_join:
- - ''
- - - 'http://'
- - {get_param: CinderApiVirtualIP}
- - ':8776/v2/%(tenant_id)s'
- glance_port: {get_param: GlancePort}
+ glance_port: {get_param: [EndpointMap, GlanceInternal, port]}
glance_password: {get_param: GlancePassword}
glance_backend: {get_param: GlanceBackend}
glance_file_pcmk_device: {get_param: GlanceFilePcmkDevice}
@@ -840,7 +916,6 @@ resources:
- '@'
- {get_param: MysqlVirtualIP}
- '/heat'
- keystone_auth_address: {list_join: ['', ['http://', {get_param: KeystonePublicApiVirtualIP} , ':5000/v2.0']]}
keystone_ca_certificate: {get_param: KeystoneCACertificate}
keystone_signing_key: {get_param: KeystoneSigningKey}
keystone_signing_certificate: {get_param: KeystoneSigningCertificate}
@@ -856,40 +931,18 @@ resources:
- '@'
- {get_param: MysqlVirtualIP}
- '/keystone'
- keystone_identity_uri:
- list_join:
- - ''
- - - 'http://'
- - {get_param: KeystoneAdminApiVirtualIP}
- - ':35357'
- keystone_auth_uri:
- list_join:
- - ''
- - - 'http://'
- - {get_param: KeystonePublicApiVirtualIP}
- - ':5000/v2.0/'
- keystone_public_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':5000'
- keystone_internal_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: KeystonePublicApiVirtualIP}
- - ':5000'
- keystone_ec2_uri:
- list_join:
- - ''
- - - 'http://'
- - {get_param: KeystonePublicApiVirtualIP}
- - ':5000/v2.0/ec2tokens'
+ keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
+ keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ keystone_public_url: { get_param: [EndpointMap, KeystonePublic, uri_no_suffix] }
+ keystone_internal_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
+ keystone_ec2_uri: { get_param: [EndpointMap, KeystoneEC2, uri] }
enable_fencing: {get_param: EnableFencing}
enable_galera: {get_param: EnableGalera}
+ enable_load_balancer: {get_param: EnableLoadBalancer}
enable_ceph_storage: {get_param: EnableCephStorage}
enable_swift_storage: {get_param: EnableSwiftStorage}
+ manage_firewall: {get_param: ManageFirewall}
+ purge_firewall_rules: {get_param: PurgeFirewallRules}
mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize}
mysql_max_connections: {get_param: MysqlMaxConnections}
mysql_root_password: {get_param: MysqlRootPassword}
@@ -898,63 +951,72 @@ resources:
template: tripleo-CLUSTER
params:
CLUSTER: {get_param: MysqlClusterUniquePart}
- neutron_flat_networks: {get_param: NeutronFlatNetworks}
+ neutron_flat_networks:
+ str_replace:
+ template: NETWORKS
+ params:
+ NETWORKS: {get_param: NeutronFlatNetworks}
neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
neutron_agent_mode: {get_param: NeutronAgentMode}
neutron_router_distributed: {get_param: NeutronDVR}
neutron_core_plugin: {get_param: NeutronCorePlugin}
neutron_service_plugins:
str_replace:
- template: "['PLUGINS']"
+ template: PLUGINS
params:
- PLUGINS:
- list_join:
- - "','"
- - {get_param: NeutronServicePlugins}
+ PLUGINS: {get_param: NeutronServicePlugins}
neutron_type_drivers:
str_replace:
- template: "['DRIVERS']"
+ template: DRIVERS
+ params:
+ DRIVERS: {get_param: NeutronTypeDrivers}
+ neutron_enable_dhcp_agent: {get_param: NeutronEnableDHCPAgent}
+ neutron_enable_l3_agent: {get_param: NeutronEnableL3Agent}
+ neutron_enable_metadata_agent: {get_param: NeutronEnableMetadataAgent}
+ neutron_enable_ovs_agent: {get_param: NeutronEnableOVSAgent}
+ neutron_mechanism_drivers:
+ str_replace:
+ template: MECHANISMS
params:
- DRIVERS:
- list_join:
- - "','"
- - {get_param: NeutronTypeDrivers}
- neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers}
+ MECHANISMS: {get_param: NeutronMechanismDrivers}
neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron_l3_ha: {get_param: NeutronL3HA}
neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
neutron_network_vlan_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
+ params:
+ RANGES: {get_param: NeutronNetworkVLANRanges}
+ neutron_bridge_mappings:
+ str_replace:
+ template: MAPPINGS
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronNetworkVLANRanges}
- neutron_bridge_mappings: {get_param: NeutronBridgeMappings}
+ MAPPINGS: {get_param: NeutronBridgeMappings}
neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge}
neutron_public_interface: {get_param: NeutronPublicInterface}
neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute}
neutron_public_interface_tag: {get_param: NeutronPublicInterfaceTag}
- neutron_tenant_network_type: {get_param: NeutronNetworkType}
- neutron_tunnel_types: {get_param: NeutronTunnelTypes}
neutron_tunnel_id_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronTunnelIdRanges}
+ RANGES: {get_param: NeutronTunnelIdRanges}
neutron_vni_ranges:
str_replace:
- template: "['RANGES']"
+ template: RANGES
+ params:
+ RANGES: {get_param: NeutronVniRanges}
+ neutron_tenant_network_types:
+ str_replace:
+ template: TYPES
+ params:
+ TYPES: {get_param: NeutronNetworkType}
+ neutron_tunnel_types:
+ str_replace:
+ template: TYPES
params:
- RANGES:
- list_join:
- - "','"
- - {get_param: NeutronVniRanges}
+ TYPES: {get_param: NeutronTunnelTypes}
neutron_password: {get_param: NeutronPassword}
neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions}
neutron_dsn:
@@ -965,30 +1027,11 @@ resources:
- '@'
- {get_param: MysqlVirtualIP}
- '/ovs_neutron?charset=utf8'
- neutron_internal_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: NeutronApiVirtualIP}
- - ':9696'
- neutron_public_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':9696'
- neutron_admin_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: NeutronApiVirtualIP}
- - ':9696'
- neutron_admin_auth_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: KeystoneAdminApiVirtualIP}
- - ':35357/v2.0'
+ neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] }
+ neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] }
+ neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] }
+ neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri ] }
+ nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] }
ceilometer_backend: {get_param: CeilometerBackend}
ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
ceilometer_password: {get_param: CeilometerPassword}
@@ -1006,26 +1049,9 @@ resources:
- '@'
- {get_param: MysqlVirtualIP}
- '/ceilometer'
- ceilometer_public_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':8777'
- ceilometer_internal_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: CeilometerApiVirtualIP}
- - ':8777'
- ceilometer_admin_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: CeilometerApiVirtualIP}
- - ':8777'
snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
+ nova_enable_db_purge: {get_param: NovaEnableDBPurge}
nova_password: {get_param: NovaPassword}
nova_dsn:
list_join:
@@ -1035,60 +1061,7 @@ resources:
- '@'
- {get_param: MysqlVirtualIP}
- '/nova'
- nova_public_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':8774/v2/%(tenant_id)s'
- nova_internal_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: NovaApiVirtualIP}
- - ':8774/v2/%(tenant_id)s'
- nova_admin_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: NovaApiVirtualIP}
- - ':8774/v2/%(tenant_id)s'
- nova_v3_public_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':8774/v3'
- nova_v3_internal_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: NovaApiVirtualIP}
- - ':8774/v3'
- nova_v3_admin_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: NovaApiVirtualIP}
- - ':8774/v3'
- nova_ec2_public_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':8773/services/Cloud'
- nova_ec2_internal_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: NovaApiVirtualIP}
- - ':8773/services/Cloud'
- nova_ec2_admin_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: NovaApiVirtualIP}
- - ':8773/services/Admin'
+ instance_name_template: {get_param: InstanceNameTemplate}
fencing_config: {get_param: FencingConfig}
pcsd_password: {get_param: PcsdPassword}
rabbit_username: {get_param: RabbitUserName}
@@ -1105,11 +1078,7 @@ resources:
template: "'LIMIT'"
params:
LIMIT: {get_param: RabbitFDLimit}
- ntp_servers:
- str_replace:
- template: '["server"]'
- params:
- server: {get_param: NtpServer}
+ ntp_servers: {get_param: NtpServer}
control_virtual_interface: {get_param: ControlVirtualInterface}
public_virtual_interface: {get_param: PublicVirtualInterface}
swift_hash_suffix: {get_param: SwiftHashSuffix}
@@ -1118,42 +1087,6 @@ resources:
swift_replicas: {get_param: SwiftReplicas}
swift_min_part_hours: {get_param: SwiftMinPartHours}
swift_mount_check: {get_param: SwiftMountCheck}
- swift_public_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':8080/v1/AUTH_%(tenant_id)s'
- swift_internal_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: SwiftProxyVirtualIP}
- - ':8080/v1/AUTH_%(tenant_id)s'
- swift_admin_url:
- list_join:
- - ''
- - - 'http://'
- - {get_param: SwiftProxyVirtualIP}
- - ':8080'
- swift_public_url_s3:
- list_join:
- - ''
- - - 'http://'
- - {get_param: PublicVirtualIP}
- - ':8080'
- swift_internal_url_s3:
- list_join:
- - ''
- - - 'http://'
- - {get_param: SwiftProxyVirtualIP}
- - ':8080'
- swift_admin_url_s3:
- list_join:
- - ''
- - - 'http://'
- - {get_param: SwiftProxyVirtualIP}
- - ':8080'
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
@@ -1162,39 +1095,8 @@ resources:
cinder_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
glance_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
glance_registry_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
- glance_api_servers:
- list_join:
- - ''
- - - {get_param: GlanceProtocol}
- - '://'
- - {get_param: GlanceApiVirtualIP}
- - ':'
- - {get_param: GlancePort}
+ glance_api_servers: { get_param: [EndpointMap, GlanceInternal, uri]}
glance_registry_host: {get_param: GlanceRegistryVirtualIP}
- glance_public_url:
- list_join:
- - ''
- - - {get_param: GlanceProtocol}
- - '://'
- - {get_param: PublicVirtualIP}
- - ':'
- - {get_param: GlancePort}
- glance_internal_url:
- list_join:
- - ''
- - - {get_param: GlanceProtocol}
- - '://'
- - {get_param: GlanceApiVirtualIP}
- - ':'
- - {get_param: GlancePort}
- glance_admin_url:
- list_join:
- - ''
- - - {get_param: GlanceProtocol}
- - '://'
- - {get_param: GlanceApiVirtualIP}
- - ':'
- - {get_param: GlancePort}
heat_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
keystone_public_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
keystone_admin_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
@@ -1244,6 +1146,8 @@ resources:
- neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
- neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
- cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
+ - neutron_nuage_data # Optionally provided by ControllerExtraConfigPre
+ - midonet_data #Optionally provided by AllNodesExtraConfig
datafiles:
controller_extraconfig:
mapped_data: {get_param: ControllerExtraConfig}
@@ -1268,6 +1172,7 @@ resources:
# Pacemaker
enable_fencing: {get_input: enable_fencing}
+ enable_load_balancer: {get_input: enable_load_balancer}
hacluster_pwd: {get_input: pcsd_password}
tripleo::fencing::config: {get_input: fencing_config}
@@ -1278,18 +1183,11 @@ resources:
swift::storage::all::storage_local_net_ip: {get_input: swift_management_network}
swift::swift_hash_suffix: {get_input: swift_hash_suffix}
swift::proxy::authtoken::admin_password: {get_input: swift_password}
+ swift::proxy::workers: {get_input: swift_workers}
tripleo::ringbuilder::part_power: {get_input: swift_part_power}
tripleo::ringbuilder::replicas: {get_input: swift_replicas}
tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours}
swift_mount_check: {get_input: swift_mount_check}
- swift::keystone::auth::public_url: {get_input: swift_public_url }
- swift::keystone::auth::internal_url: {get_input: swift_internal_url }
- swift::keystone::auth::admin_url: {get_input: swift_admin_url }
- swift::keystone::auth::public_url_s3: {get_input: swift_public_url_v3 }
- swift::keystone::auth::internal_url_s3: {get_input: swift_internal_url_v3 }
- swift::keystone::auth::admin_url_s3: {get_input: swift_admin_url_v3 }
- swift::keystone::auth::password: {get_input: swift_password }
- swift::keystone::auth::region: {get_input: keystone_region}
# NOTE(dprince): build_ring support is currently not wired in.
# See: https://review.openstack.org/#/c/109225/
@@ -1317,14 +1215,6 @@ resources:
cinder::glance::glance_api_servers: {get_input: glance_api_servers}
cinder_backend_config: {get_input: CinderBackendConfig}
cinder::db::mysql::password: {get_input: cinder_password}
- cinder::keystone::auth::public_url: {get_input: cinder_public_url }
- cinder::keystone::auth::internal_url: {get_input: cinder_internal_url }
- cinder::keystone::auth::admin_url: {get_input: cinder_admin_url }
- cinder::keystone::auth::public_url_v2: {get_input: cinder_public_url_v2 }
- cinder::keystone::auth::internal_url_v2: {get_input: cinder_internal_url_v2 }
- cinder::keystone::auth::admin_url_v2: {get_input: cinder_admin_url_v2 }
- cinder::keystone::auth::password: {get_input: cinder_password }
- cinder::keystone::auth::region: {get_input: keystone_region}
# Glance
glance::api::bind_port: {get_input: glance_port}
@@ -1334,6 +1224,7 @@ resources:
glance::api::registry_host: {get_input: glance_registry_host}
glance::api::keystone_password: {get_input: glance_password}
glance::api::debug: {get_input: debug}
+ glance::api::workers: {get_input: glance_workers}
glance_notifier_strategy: {get_input: glance_notifier_strategy}
glance_log_file: {get_input: glance_log_file}
glance_log_file: {get_input: glance_log_file}
@@ -1344,16 +1235,12 @@ resources:
glance::registry::auth_uri: {get_input: keystone_auth_uri}
glance::registry::identity_uri: {get_input: keystone_identity_uri}
glance::registry::debug: {get_input: debug}
- glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_address}
+ glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_uri}
+ glance::registry::workers: {get_input: glance_workers}
glance::backend::swift::swift_store_user: service:glance
glance::backend::swift::swift_store_key: {get_input: glance_password}
glance_backend: {get_input: glance_backend}
glance::db::mysql::password: {get_input: glance_password}
- glance::keystone::auth::public_url: {get_input: glance_public_url }
- glance::keystone::auth::internal_url: {get_input: glance_internal_url }
- glance::keystone::auth::admin_url: {get_input: glance_admin_url }
- glance::keystone::auth::password: {get_input: glance_password }
- glance::keystone::auth::region: {get_input: keystone_region}
glance_file_pcmk_device: {get_input: glance_file_pcmk_device}
glance_file_pcmk_fstype: {get_input: glance_file_pcmk_fstype}
glance_file_pcmk_manage: {get_input: glance_file_pcmk_manage}
@@ -1374,16 +1261,14 @@ resources:
heat::identity_uri: {get_input: keystone_identity_uri}
heat::keystone_password: {get_input: heat_password}
heat::api::bind_host: {get_input: heat_api_network}
+ heat::api::workers: {get_input: heat_workers}
heat::api_cloudwatch::bind_host: {get_input: heat_api_network}
+ heat::api_cloudwatch::workers: {get_input: heat_workers}
heat::api_cfn::bind_host: {get_input: heat_api_network}
+ heat::api_cfn::workers: {get_input: heat_workers}
heat::database_connection: {get_input: heat_dsn}
heat::debug: {get_input: debug}
heat::db::mysql::password: {get_input: heat_password}
- heat::keystone::auth::public_url: {get_input: heat_public_url }
- heat::keystone::auth::internal_url: {get_input: heat_internal_url }
- heat::keystone::auth::admin_url: {get_input: heat_admin_url }
- heat::keystone::auth::password: {get_input: heat_password }
- heat::keystone::auth::region: {get_input: keystone_region}
# Keystone
keystone::admin_token: {get_input: admin_token}
@@ -1409,6 +1294,9 @@ resources:
keystone::endpoint::internal_url: {get_input: keystone_internal_url}
keystone::endpoint::admin_url: {get_input: keystone_identity_uri}
keystone::endpoint::region: {get_input: keystone_region}
+ keystone::admin_workers: {get_input: keystone_workers}
+ keystone::public_workers: {get_input: keystone_workers}
+
# MongoDB
mongodb::server::bind_ip: {get_input: mongo_db_network}
mongodb::server::nojournal: {get_input: mongodb_no_journal}
@@ -1427,38 +1315,46 @@ resources:
# Neutron
neutron::bind_host: {get_input: neutron_api_network}
neutron::rabbit_password: {get_input: rabbit_password}
- neutron::rabbit_user: {get_input: rabbit_user}
+ neutron::rabbit_user: {get_input: rabbit_username}
neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
neutron::rabbit_port: {get_input: rabbit_client_port}
neutron::debug: {get_input: debug}
neutron::server::auth_uri: {get_input: keystone_auth_uri}
neutron::server::identity_uri: {get_input: keystone_identity_uri}
neutron::server::database_connection: {get_input: neutron_dsn}
+ neutron::server::api_workers: {get_input: neutron_workers}
neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge}
neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling}
+ neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
+ neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata}
neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
- neutron_flat_networks: {get_input: neutron_flat_networks}
+ neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks}
neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network}
+ neutron::agents::metadata::metadata_workers: {get_input: neutron_workers}
neutron_agent_mode: {get_input: neutron_agent_mode}
neutron_router_distributed: {get_input: neutron_router_distributed}
neutron::core_plugin: {get_input: neutron_core_plugin}
neutron::service_plugins: {get_input: neutron_service_plugins}
+ neutron::enable_dhcp_agent: {get_input: neutron_enable_dhcp_agent}
+ neutron::enable_l3_agent: {get_input: neutron_enable_l3_agent}
+ neutron::enable_metadata_agent: {get_input: neutron_enable_metadata_agent}
+ neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent}
neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers}
- neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers}
+ neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers}
neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover}
neutron::server::l3_ha: {get_input: neutron_l3_ha}
neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network}
neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges}
- neutron_bridge_mappings: {get_input: neutron_bridge_mappings}
+ neutron::agents::ml2::ovs:bridge_mappings: {get_input: neutron_bridge_mappings}
neutron_public_interface: {get_input: neutron_public_interface}
neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device}
neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route}
neutron_public_interface_tag: {get_input: neutron_public_interface_tag}
- neutron_tenant_network_type: {get_input: neutron_tenant_network_type}
- neutron_tunnel_types: {get_input: neutron_tunnel_types}
+ neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types}
+ neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types}
neutron::server::auth_password: {get_input: neutron_password}
neutron::agents::metadata::auth_password: {get_input: neutron_password}
neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options}
@@ -1470,6 +1366,10 @@ resources:
neutron::keystone::auth::admin_url: {get_input: neutron_admin_url }
neutron::keystone::auth::password: {get_input: neutron_password }
neutron::keystone::auth::region: {get_input: keystone_region}
+ neutron::server::notifications::nova_url: {get_input: nova_internal_url}
+ neutron::server::notifications::auth_url: {get_input: neutron_admin_auth_url}
+ neutron::server::notifications::tenant_name: 'service'
+ neutron::server::notifications::password: {get_input: nova_password}
# Ceilometer
ceilometer_backend: {get_input: ceilometer_backend}
@@ -1485,14 +1385,9 @@ resources:
ceilometer::api::keystone_auth_uri: {get_input: keystone_auth_uri}
ceilometer::api::keystone_identity_uri: {get_input: keystone_identity_uri}
ceilometer::agent::auth::auth_password: {get_input: ceilometer_password}
- ceilometer::agent::auth::auth_url: {get_input: keystone_auth_address}
+ ceilometer::agent::auth::auth_url: {get_input: keystone_auth_uri}
ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url}
ceilometer::db::mysql::password: {get_input: ceilometer_password}
- ceilometer::keystone::auth::public_url: {get_input: ceilometer_public_url }
- ceilometer::keystone::auth::internal_url: {get_input: ceilometer_internal_url }
- ceilometer::keystone::auth::admin_url: {get_input: ceilometer_admin_url }
- ceilometer::keystone::auth::password: {get_input: ceilometer_password }
- ceilometer::keystone::auth::region: {get_input: keystone_region}
snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
@@ -1507,25 +1402,19 @@ resources:
nova::api::api_bind_address: {get_input: nova_api_network}
nova::api::metadata_listen: {get_input: nova_metadata_network}
nova::api::admin_password: {get_input: nova_password}
+ nova::api::osapi_compute_workers: {get_input: nova_workers}
+ nova::api::ec2_workers: {get_input: nova_workers}
+ nova::api::metadata_workers: {get_input: nova_workers}
nova::database_connection: {get_input: nova_dsn}
nova::glance_api_servers: {get_input: glance_api_servers}
nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
+ nova::api::instance_name_template: {get_input: instance_name_template}
nova::network::neutron::neutron_admin_password: {get_input: neutron_password}
nova::network::neutron::neutron_url: {get_input: neutron_internal_url}
nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url}
nova::vncproxy::host: {get_input: nova_api_network}
nova::db::mysql::password: {get_input: nova_password}
- nova::keystone::auth::public_url: {get_input: nova_public_url}
- nova::keystone::auth::internal_url: {get_input: nova_internal_url}
- nova::keystone::auth::admin_url: {get_input: nova_admin_url}
- nova::keystone::auth::public_url_v3: {get_input: nova_v3_public_url}
- nova::keystone::auth::internal_url_v3: {get_input: nova_v3_internal_url}
- nova::keystone::auth::admin_url_v3: {get_input: nova_v3_admin_url}
- nova::keystone::auth::ec2_public_url: {get_input: nova_ec2_public_url}
- nova::keystone::auth::ec2_internal_url: {get_input: nova_ec2_internal_url}
- nova::keystone::auth::ec2_admin_url: {get_input: nova_ec2_admin_url}
- nova::keystone::auth::password: {get_input: nova_password }
- nova::keystone::auth::region: {get_input: keystone_region}
+ nova_enable_db_purge: {get_input: nova_enable_db_purge}
# Horizon
apache::ip: {get_input: horizon_network}
@@ -1539,9 +1428,14 @@ resources:
rabbitmq::node_ip_address: {get_input: rabbitmq_network}
rabbitmq::erlang_cookie: {get_input: rabbit_cookie}
rabbitmq::file_limit: {get_input: rabbit_fd_limit}
+ rabbitmq::default_user: {get_input: rabbit_username}
+ rabbitmq::default_pass: {get_input: rabbit_password}
# Redis
redis::bind: {get_input: redis_network}
redis_vip: {get_input: redis_vip}
+ # Firewall
+ tripleo::firewall::manage_firewall: {get_input: manage_firewall}
+ tripleo::firewall::purge_firewall_rules: {get_input: purge_firewall_rules}
# Misc
memcached::listen_ip: {get_input: memcached_network}
neutron_public_interface_ip: {get_input: neutron_public_interface_ip}
@@ -1551,6 +1445,7 @@ resources:
tripleo::loadbalancer::control_virtual_interface: {get_input: control_virtual_interface}
tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface}
tripleo::loadbalancer::haproxy_log_address: {get_input: haproxy_log_address}
+ tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
tripleo::packages::enable_install: {get_input: enable_package_install}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
@@ -1564,7 +1459,7 @@ resources:
# Hook for site-specific additional pre-deployment config,
# applying to all nodes, e.g node registration/unregistration
NodeExtraConfig:
- depends_on: ControllerExtraConfigPre
+ depends_on: [ControllerExtraConfigPre, NodeTLSData]
type: OS::TripleO::NodeExtraConfig
properties:
server: {get_resource: Controller}
@@ -1600,6 +1495,9 @@ outputs:
tenant_ip_address:
description: IP address of the server in the tenant network
value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
hostname:
description: Hostname of the server
value: {get_attr: [Controller, name]}
@@ -1615,9 +1513,10 @@ outputs:
Server's IP address and hostname in the /etc/hosts format
value:
str_replace:
- template: IP HOST.localdomain HOST CLOUDNAME
+ template: IP HOST.DOMAIN HOST CLOUDNAME
params:
IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
HOST: {get_attr: [Controller, name]}
CLOUDNAME: {get_param: CloudName}
nova_server_resource:
@@ -1644,5 +1543,13 @@ outputs:
list_join:
- ','
- - {get_attr: [ControllerDeployment, deploy_stdout]}
+ - {get_attr: [NodeTLSCAData, deploy_stdout]}
+ - {get_attr: [NodeTLSData, deploy_stdout]}
- {get_attr: [ControllerExtraConfigPre, deploy_stdout]}
- {get_param: UpdateIdentifier}
+ tls_key_modulus_md5:
+ description: MD5 checksum of the TLS Key Modulus
+ value: {get_attr: [NodeTLSData, key_modulus_md5]}
+ tls_cert_modulus_md5:
+ description: MD5 checksum of the TLS Certificate Modulus
+ value: {get_attr: [NodeTLSData, cert_modulus_md5]}
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
new file mode 100644
index 00000000..26ce7138
--- /dev/null
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -0,0 +1,119 @@
+heat_template_version: 2015-10-15
+
+description: Configure hieradata for all MidoNet nodes
+
+parameters:
+ # Parameters passed from the parent template
+ controller_servers:
+ type: json
+ compute_servers:
+ type: json
+ blockstorage_servers:
+ type: json
+ objectstorage_servers:
+ type: json
+ cephstorage_servers:
+ type: json
+
+ EnableZookeeperOnController:
+ label: Enable Zookeeper On Controller
+ description: 'Whether enable Zookeeper cluster on Controller'
+ type: boolean
+ default: false
+ EnableCassandraOnController:
+ label: Enable Cassandra On Controller
+ description: 'Whether enable Cassandra cluster on Controller'
+ type: boolean
+ default: false
+ CassandraStoragePort:
+ label: Cassandra Storage Port
+ description: 'The Cassandra port for inter-node communication'
+ type: string
+ default: '7000'
+ CassandraSslStoragePort:
+ label: Cassandra SSL Storage Port
+ description: 'The SSL port for encrypted communication. Unused unless enabled in encryption_options'
+ type: string
+ default: '7001'
+ CassandraClientPort:
+ label: Cassandra Client Port
+ description: 'Native Transport Port'
+ type: string
+ default: '9042'
+ CassandraClientPortThrift:
+ label: Cassandra Client Thrift Port
+ description: 'The port for the Thrift RPC service, which is used for client connections'
+ type: string
+ default: '9160'
+ TunnelZoneName:
+ label: Name of the Tunnelzone
+ description: 'Name of the tunnel zone used to tunnel packages'
+ type: string
+ default: 'tunnelzone_tripleo'
+ TunnelZoneType:
+ label: Type of the Tunnel
+ description: 'Type of the tunnels on the overlay. Choose between `gre` and `vxlan`'
+ type: string
+ default: 'vxlan'
+
+resources:
+
+ NetworkMidoNetConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ datafiles:
+ midonet_data:
+ mapped_data:
+ enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController}
+ enable_cassandra_on_controller: {get_param: EnableCassandraOnController}
+ midonet_tunnelzone_name: {get_param: TunnelZoneName}
+ midonet_tunnelzone_type: {get_param: TunnelZoneType}
+ midonet_libvirt_qemu_data: |
+ user = "root"
+ group = "root"
+ cgroup_device_acl = [
+ "/dev/null", "/dev/full", "/dev/zero",
+ "/dev/random", "/dev/urandom",
+ "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+ "/dev/rtc","/dev/hpet", "/dev/vfio/vfio",
+ "/dev/net/tun"
+ ]
+ tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort}
+ tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort}
+ tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort}
+ tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift}
+ tripleo::loadbalancer::midonet_api: true
+ # Missed Neutron Puppet data
+ neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver'
+ neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver'
+ neutron::plugins::midonet::midonet_api_port: 8081
+ neutron::params::midonet_server_package: 'python-networking-midonet'
+
+ # Make sure the l3 agent does not run
+ l3_agent_service: false
+ neutron::agents::l3::manage_service: false
+ neutron::agents::l3::enabled: false
+
+
+ NetworkMidonetDeploymentControllers:
+ type: OS::Heat::StructuredDeploymentGroup
+ properties:
+ config: {get_resource: NetworkMidoNetConfig}
+ servers: {get_param: controller_servers}
+
+ NetworkMidonetDeploymentComputes:
+ type: OS::Heat::StructuredDeploymentGroup
+ properties:
+ config: {get_resource: NetworkMidoNetConfig}
+ servers: {get_param: compute_servers}
+
+outputs:
+ config_identifier:
+ value:
+ list_join:
+ - ' '
+ - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]}
+ - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]}
diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml
index 62907104..7cefc24b 100644
--- a/puppet/extraconfig/ceph/ceph-external-config.yaml
+++ b/puppet/extraconfig/ceph/ceph-external-config.yaml
@@ -29,6 +29,18 @@ parameters:
type: comma_delimited_list
ceph_mon_ips:
type: comma_delimited_list
+ NovaRbdPoolName:
+ default: vms
+ type: string
+ CinderRbdPoolName:
+ default: volumes
+ type: string
+ GlanceRbdPoolName:
+ default: images
+ type: string
+ CephClientUserName:
+ default: openstack
+ type: string
resources:
CephClusterConfigImpl:
@@ -47,16 +59,34 @@ resources:
ceph::profile::params::client_keys:
str_replace:
template: "{
- client.openstack: {
+ client.CLIENT_USER: {
secret: 'CLIENT_KEY',
mode: '0644',
cap_mon: 'allow r',
- cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=images'
+ cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL'
}
}"
params:
+ CLIENT_USER: {get_param: CephClientUserName}
CLIENT_KEY: {get_param: ceph_client_key}
-
+ NOVA_POOL: {get_param: NovaRbdPoolName}
+ CINDER_POOL: {get_param: CinderRbdPoolName}
+ GLANCE_POOL: {get_param: GlanceRbdPoolName}
+ nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
+ cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
+ glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+ nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
+ glance::backend::rbd::rbd_store_pool: {get_param: CephClientUserName}
+ nova::compute::rbd::rbd_keyring:
+ list_join:
+ - '.'
+ - - 'client'
+ - {get_param: CephClientUserName}
+ ceph_client_user_name: {get_param: CephClientUserName}
+ ceph_pools:
+ - {get_param: CinderRbdPoolName}
+ - {get_param: NovaRbdPoolName}
+ - {get_param: GlanceRbdPoolName}
outputs:
config_id:
diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
new file mode 100644
index 00000000..96368e37
--- /dev/null
+++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
@@ -0,0 +1,92 @@
+heat_template_version: 2015-04-30
+
+description: Configure hieradata for Nuage configuration on the Compute
+
+parameters:
+ server:
+ description: ID of the compute node to apply this config to
+ type: string
+
+ NuageActiveController:
+ description: IP address of the Active Virtualized Services Controller (VSC)
+ type: string
+ NuageStandbyController:
+ description: IP address of the Standby Virtualized Services Controller (VSC)
+ type: string
+ NuageMetadataPort:
+ description: TCP Port to listen for metadata server requests
+ type: string
+ default: '9697'
+ NuageNovaMetadataPort:
+ description: TCP Port used by Nova metadata server
+ type: string
+ default: '8775'
+ NuageMetadataProxySharedSecret:
+ description: Shared secret to sign the instance-id request
+ type: string
+ NuageNovaClientVersion:
+ description: Client Version Nova
+ type: string
+ default: '2'
+ NuageNovaOsUsername:
+ description: Nova username in keystone_authtoken
+ type: string
+ default: 'nova'
+ NuageMetadataAgentStartWithOvs:
+ description: Set to True if nuage-metadata-agent needs to be started with nuage-openvswitch-switch
+ type: string
+ default: 'True'
+ NuageNovaApiEndpoint:
+ description: One of publicURL, internalURL, adminURL in "keystone endpoint-list"
+ type: string
+ default: 'publicURL'
+ NuageNovaRegionName:
+ description: Region name in "keystone endpoint-list"
+ type: string
+ default: 'regionOne'
+
+# Declaration of resources for the template.
+resources:
+ NovaNuageConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ datafiles:
+ nova_nuage_data:
+ mapped_data:
+ nuage::vrs::active_controller: {get_input: ActiveController}
+ nuage::vrs::standby_controller: {get_input: StandbyController}
+ nuage::metadataagent::metadata_port: {get_input: MetadataPort}
+ nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort}
+ nuage::metadataagent::metadata_secret: {get_input: SharedSecret}
+ nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion}
+ nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername}
+ nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs}
+ nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType}
+ nuage::metadataagent::nova_region_name: {get_input: NovaRegionName}
+
+ NovaNuageDeployment:
+ type: OS::Heat::StructuredDeployment
+ properties:
+ config: {get_resource: NovaNuageConfig}
+ server: {get_param: server}
+ input_values:
+ ActiveController: {get_param: NuageActiveController}
+ StandbyController: {get_param: NuageStandbyController}
+ MetadataPort: {get_param: NuageMetadataPort}
+ NovaMetadataPort: {get_param: NuageNovaMetadataPort}
+ SharedSecret: {get_param: NuageMetadataProxySharedSecret}
+ NovaClientVersion: {get_param: NuageNovaClientVersion}
+ NovaOsUsername: {get_param: NuageNovaOsUsername}
+ MetadataAgentStartWithOvs: {get_param: NuageMetadataAgentStartWithOvs}
+ NovaApiEndpointType: {get_param: NuageNovaApiEndpoint}
+ NovaRegionName: {get_param: NuageNovaRegionName}
+
+# Specify output parameters that will be available
+# after the template is instantiated.
+outputs:
+ deploy_stdout:
+ description: Deployment reference, used to trigger puppet apply on changes
+ value: {get_attr: [NovaNuageDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
index 5985116b..6730ddf1 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
@@ -69,6 +69,9 @@ parameters:
N1000vExistingBridge:
type: boolean
default: true
+ N1000vVSMHostMgmtIntfVlan:
+ type: number
+ default: 0
#Plugin Parameters
N1000vVSMUser:
type: string
@@ -125,6 +128,7 @@ resources:
n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask}
n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip}
n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip}
+ n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan}
# Cisco N1KV driver Parameters
neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip}
neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username}
@@ -159,6 +163,7 @@ resources:
n1kv_vsm_password: {get_param: N1000vVSMPassword}
n1kv_vsm_mgmt_netmask: {get_param: N1000vMgmtNetmask}
n1kv_vsm_gateway_ip: {get_param: N1000vMgmtGatewayIP}
+ n1kv_phy_brige_vlan: {get_param: N1000vVSMHostMgmtIntfVlan}
n1kv_vsm_pacemaker_ctrl: {get_param: N1000vPacemakerControl}
n1kv_vsm_existing_br: {get_param: N1000vExistingBridge}
n1kv_vsm_username: {get_param: N1000vVSMUser}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml
new file mode 100644
index 00000000..8378d2fc
--- /dev/null
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml
@@ -0,0 +1,90 @@
+heat_template_version: 2015-04-30
+
+description: Configure hieradata for Nuage configuration on the Controller
+
+parameters:
+ server:
+ description: ID of the controller node to apply this config to
+ type: string
+
+ # Config specific parameters, to be provided via parameter_defaults
+ NeutronNuageOSControllerIp:
+ description: IP address of the OpenStack Controller
+ type: string
+
+ NeutronNuageNetPartitionName:
+ description: Specifies the title that you will see on the VSD
+ type: string
+ default: 'default_name'
+
+ NeutronNuageVSDIp:
+ description: IP address and port of the Virtual Services Directory
+ type: string
+
+ NeutronNuageVSDUsername:
+ description: Username to be used to log into VSD
+ type: string
+
+ NeutronNuageVSDPassword:
+ description: Password to be used to log into VSD
+ type: string
+
+ NeutronNuageVSDOrganization:
+ description: Organization parameter required to log into VSD
+ type: string
+ default: 'organization'
+
+ NeutronNuageBaseURIVersion:
+ description: URI version to be used based on the VSD release
+ type: string
+ default: 'default_uri_version'
+
+ NeutronNuageCMSId:
+ description: Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD
+ type: string
+
+ UseForwardedFor:
+ description: Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.
+ type: boolean
+ default: false
+
+resources:
+ NeutronNuageConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: os-apply-config
+ config:
+ hiera:
+ datafiles:
+ neutron_nuage_data:
+ mapped_data:
+ neutron::plugins::nuage::nuage_oscontroller_ip: {get_input: NuageOSControllerIp}
+ neutron::plugins::nuage::nuage_net_partition_name: {get_input: NuageNetPartitionName}
+ neutron::plugins::nuage::nuage_vsd_ip: {get_input: NuageVSDIp}
+ neutron::plugins::nuage::nuage_vsd_username: {get_input: NuageVSDUsername}
+ neutron::plugins::nuage::nuage_vsd_password: {get_input: NuageVSDPassword}
+ neutron::plugins::nuage::nuage_vsd_organization: {get_input: NuageVSDOrganization}
+ neutron::plugins::nuage::nuage_base_uri_version: {get_input: NuageBaseURIVersion}
+ neutron::plugins::nuage::nuage_cms_id: {get_input: NuageCMSId}
+ nova::api::use_forwarded_for: {get_input: NovaUseForwardedFor}
+
+ NeutronNuageDeployment:
+ type: OS::Heat::StructuredDeployment
+ properties:
+ config: {get_resource: NeutronNuageConfig}
+ server: {get_param: server}
+ input_values:
+ NuageOSControllerIp: {get_param: NeutronNuageOSControllerIp}
+ NuageNetPartitionName: {get_param: NeutronNuageNetPartitionName}
+ NuageVSDIp: {get_param: NeutronNuageVSDIp}
+ NuageVSDUsername: {get_param: NeutronNuageVSDUsername}
+ NuageVSDPassword: {get_param: NeutronNuageVSDPassword}
+ NuageVSDOrganization: {get_param: NeutronNuageVSDOrganization}
+ NuageBaseURIVersion: {get_param: NeutronNuageBaseURIVersion}
+ NuageCMSId: {get_param: NeutronNuageCMSId}
+ NovaUseForwardedFor: {get_param: UseForwardedFor}
+
+outputs:
+ deploy_stdout:
+ description: Deployment reference, used to trigger puppet apply on changes
+ value: {get_attr: [NeutronNuageDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/tls/ca-inject.yaml b/puppet/extraconfig/tls/ca-inject.yaml
new file mode 100644
index 00000000..7e34f071
--- /dev/null
+++ b/puppet/extraconfig/tls/ca-inject.yaml
@@ -0,0 +1,66 @@
+heat_template_version: 2015-04-30
+
+description: >
+ This is a template which will inject the trusted anchor.
+
+parameters:
+ # Can be overriden via parameter_defaults in the environment
+ SSLRootCertificate:
+ description: >
+ The content of a CA's SSL certificate file in PEM format.
+ This is evaluated on the client side.
+ type: string
+ SSLRootCertificatePath:
+ default: '/etc/pki/ca-trust/source/anchors/ca.crt.pem'
+ description: >
+ The filepath of the root certificate as it will be stored in the nodes.
+ Note that the path has to be one that can be picked up by the update
+ trust anchor command. e.g. in RHEL it would be
+ /etc/pki/ca-trust/source/anchors/ca.crt.pem
+ type: string
+ UpdateTrustAnchorsCommand:
+ default: update-ca-trust extract
+ description: >
+ command that will be executed to update the trust anchors.
+ type: string
+
+ # Passed in by controller.yaml
+ server:
+ description: ID of the node to apply this config to
+ type: string
+
+resources:
+ CAConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: cacert_path
+ - name: cacert_content
+ - name: update_anchor_command
+ outputs:
+ - name: root_cert_md5sum
+ config: |
+ #!/bin/sh
+ cat > ${cacert_path} << EOF
+ ${cacert_content}
+ EOF
+ chmod 0440 ${cacert_path}
+ chown root:root ${cacert_path}
+ ${update_anchor_command}
+ md5sum ${cacert_path} > ${heat_outputs_path}.root_cert_md5sum
+
+ CADeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: CAConfig}
+ server: {get_param: server}
+ input_values:
+ cacert_path: {get_param: SSLRootCertificatePath}
+ cacert_content: {get_param: SSLRootCertificate}
+ update_anchor_command: {get_param: UpdateTrustAnchorsCommand}
+
+outputs:
+ deploy_stdout:
+ description: Deployment reference
+ value: {get_attr: [CADeployment, root_cert_md5sum]}
diff --git a/puppet/extraconfig/tls/no-ca.yaml b/puppet/extraconfig/tls/no-ca.yaml
new file mode 100644
index 00000000..5862a85c
--- /dev/null
+++ b/puppet/extraconfig/tls/no-ca.yaml
@@ -0,0 +1,17 @@
+heat_template_version: 2015-04-30
+
+description: >
+ This is a default no-op template which can be passed to the
+ OS::Nova::Server resources. This template can be replaced with
+ a different implementation via the resource registry, such that
+ deployers may customize their configuration.
+
+parameters:
+ server: # Here for compatibility with controller.yaml
+ description: ID of the controller node to apply this config to
+ type: string
+
+outputs:
+ deploy_stdout:
+ description: Deployment reference, used to trigger puppet apply on changes
+ value: 'Root CA cert injection not enabled.'
diff --git a/puppet/extraconfig/tls/no-tls.yaml b/puppet/extraconfig/tls/no-tls.yaml
new file mode 100644
index 00000000..a2b5c569
--- /dev/null
+++ b/puppet/extraconfig/tls/no-tls.yaml
@@ -0,0 +1,34 @@
+heat_template_version: 2015-04-30
+
+description: >
+ This is a default no-op template. This defines the parameters that
+ need to be passed in order to have TLS enabled in the controller
+ nodes. This template can be replaced with a different
+ implementation via the resource registry, such that deployers
+ may customize their configuration.
+
+parameters:
+ DeployedSSLCertificatePath:
+ default: ''
+ description: >
+ The filepath of the certificate as it will be stored in the controller.
+ type: string
+ NodeIndex: # Here for compatibility with puppet/controller.yaml
+ default: 0
+ type: number
+ server: # Here for compatibility with puppet/controller.yaml
+ description: ID of the controller node to apply this config to
+ type: string
+
+outputs:
+ deploy_stdout:
+ description: Deployment reference, used to trigger puppet apply on changes
+ value: 'TLS not enabled.'
+ deployed_ssl_certificate_path:
+ value: ''
+ key_modulus_md5:
+ description: Key SSL Modulus
+ value: ''
+ cert_modulus_md5:
+ description: Certificate SSL Modulus
+ value: ''
diff --git a/puppet/extraconfig/tls/tls-cert-inject.yaml b/puppet/extraconfig/tls/tls-cert-inject.yaml
new file mode 100644
index 00000000..ce524ba9
--- /dev/null
+++ b/puppet/extraconfig/tls/tls-cert-inject.yaml
@@ -0,0 +1,93 @@
+heat_template_version: 2015-04-30
+
+description: >
+ This is a template which will build the TLS Certificates necessary
+ for the load balancer using the given parameters.
+
+parameters:
+ # Can be overriden via parameter_defaults in the environment
+ SSLCertificate:
+ description: >
+ The content of the SSL certificate (without Key) in PEM format.
+ type: string
+ SSLIntermediateCertificate:
+ default: ''
+ description: >
+ The content of an SSL intermediate CA certificate in PEM format.
+ type: string
+ SSLKey:
+ description: >
+ The content of the SSL Key in PEM format.
+ type: string
+ hidden: true
+
+ # Can be overriden by parameter_defaults if the user wants to try deploying
+ # this in a distro that doesn't support this path.
+ DeployedSSLCertificatePath:
+ default: '/etc/pki/tls/private/overcloud_endpoint.pem'
+ description: >
+ The filepath of the certificate as it will be stored in the controller.
+ type: string
+
+ # Passed in by the controller
+ NodeIndex:
+ default: 0
+ type: number
+ server:
+ description: ID of the controller node to apply this config to
+ type: string
+
+resources:
+ ControllerTLSConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: cert_path
+ - name: cert_chain_content
+ outputs:
+ - name: chain_md5sum
+ - name: cert_modulus
+ - name: key_modulus
+ config: |
+ #!/bin/sh
+ cat > ${cert_path} << EOF
+ ${cert_chain_content}
+ EOF
+ chmod 0440 ${cert_path}
+ chown root:haproxy ${cert_path}
+ md5sum ${cert_path} > ${heat_outputs_path}.chain_md5sum
+ openssl x509 -noout -modulus -in ${cert_path} \
+ | openssl md5 | cut -c 10- \
+ > ${heat_outputs_path}.cert_modulus
+ openssl rsa -noout -modulus -in ${cert_path} \
+ | openssl md5 | cut -c 10- \
+ > ${heat_outputs_path}.key_modulus
+
+ ControllerTLSDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: ControllerTLSConfig}
+ server: {get_param: server}
+ input_values:
+ cert_path: {get_param: DeployedSSLCertificatePath}
+ cert_chain_content:
+ list_join:
+ - ''
+ - - {get_param: SSLCertificate}
+ - {get_param: SSLIntermediateCertificate}
+ - {get_param: SSLKey}
+
+outputs:
+ deploy_stdout:
+ description: Deployment reference
+ value: {get_attr: [ControllerTLSDeployment, chain_md5sum]}
+ deployed_ssl_certificate_path:
+ description: The location that the TLS certificate was deployed to.
+ value: {get_param: DeployedSSLCertificatePath}
+ key_modulus_md5:
+ description: MD5 checksum of the Key SSL Modulus
+ value: {get_attr: [ControllerTLSDeployment, key_modulus]}
+ cert_modulus_md5:
+ description: MD5 checksum of the Certificate SSL Modulus
+ value: {get_attr: [ControllerTLSDeployment, cert_modulus]}
diff --git a/puppet/hieradata/ceph.yaml b/puppet/hieradata/ceph.yaml
index 18a48622..1e480e60 100644
--- a/puppet/hieradata/ceph.yaml
+++ b/puppet/hieradata/ceph.yaml
@@ -1,17 +1,12 @@
ceph::profile::params::osd_journal_size: 1024
-ceph::profile::params::osd_pool_default_pg_num: 128
-ceph::profile::params::osd_pool_default_pgp_num: 128
+ceph::profile::params::osd_pool_default_pg_num: 32
+ceph::profile::params::osd_pool_default_pgp_num: 32
ceph::profile::params::osd_pool_default_size: 3
ceph::profile::params::osd_pool_default_min_size: 1
ceph::profile::params::osds: {/srv/data: {}}
ceph::profile::params::manage_repo: false
ceph::profile::params::authentication_type: cephx
-ceph_pools:
- - volumes
- - vms
- - images
-
ceph_classes: []
ceph_osd_selinux_permissive: true
diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml
index 030f661d..b4b51abf 100644
--- a/puppet/hieradata/common.yaml
+++ b/puppet/hieradata/common.yaml
@@ -7,10 +7,9 @@ ceilometer::agent::auth::auth_region: 'regionOne'
# changes in the tripleo-incubator keystone role setup
ceilometer::agent::auth::auth_tenant_name: 'admin'
+nova::api::admin_tenant_name: 'service'
nova::network::neutron::neutron_admin_tenant_name: 'service'
nova::network::neutron::neutron_admin_username: 'neutron'
-nova::network::neutron::vif_plugging_is_fatal: false
-nova::network::neutron::vif_plugging_timeout: 30
nova::network::neutron::dhcp_domain: ''
neutron::allow_overlapping_ips: true
diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml
index 16aeb98c..fa8dcc81 100644
--- a/puppet/hieradata/compute.yaml
+++ b/puppet/hieradata/compute.yaml
@@ -10,11 +10,14 @@ nova::compute::vnc_enabled: true
nova::compute::libvirt::vncserver_listen: '0.0.0.0'
nova::compute::libvirt::migration_support: true
-nova::compute::rbd::libvirt_rbd_user: 'openstack'
-nova::compute::rbd::rbd_keyring: 'client.openstack'
-nova::compute::rbd::libvirt_images_rbd_pool: 'vms'
nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
+# Changing the default from 512MB. The current templates can not deploy
+# overclouds with swap. On an idle compute node, we see ~1024MB of RAM
+# used. 2048 is suggested to account for other possible operations for
+# example openvswitch.
+nova::compute::reserved_host_memory: 2048
+
ceilometer::agent::auth::auth_tenant_name: 'service'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index 07bfe543..c9f3a417 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -30,7 +30,6 @@ redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
# service tenant
-nova::api::admin_tenant_name: 'service'
glance::api::keystone_tenant: 'service'
glance::registry::keystone_tenant: 'service'
neutron::server::auth_tenant: 'service'
@@ -39,13 +38,6 @@ cinder::api::keystone_tenant: 'service'
swift::proxy::authtoken::admin_tenant_name: 'service'
ceilometer::api::keystone_tenant: 'service'
heat::keystone_tenant: 'service'
-glance::keystone::auth::tenant: 'service'
-nova::keystone::auth::tenant: 'service'
-neutron::keystone::auth::tenant: 'service'
-cinder::keystone::auth::tenant: 'service'
-swift::keystone::auth::tenant: 'service'
-ceilometer::keystone::auth::tenant: 'service'
-heat::keystone::auth::tenant: 'service'
# keystone
keystone::cron::token_flush::maxdelay: 3600
@@ -67,13 +59,10 @@ swift::proxy::pipeline:
- 'proxy-server'
swift::proxy::account_autocreate: true
-swift::keystone::auth::configure_s3_endpoint: false
-swift::keystone::auth::operator_roles:
- - admin
- - swiftoperator
# glance
glance::api::pipeline: 'keystone'
+glance::api::show_image_direct_url: true
glance::registry::pipeline: 'keystone'
glance::backend::swift::swift_store_create_container_on_put: true
glance::backend::rbd::rbd_store_user: 'openstack'
@@ -88,7 +77,8 @@ nova::notify_on_state_change: 'vm_and_task_state'
nova::api::default_floating_pool: 'public'
nova::api::osapi_v3: true
nova::scheduler::filter::ram_allocation_ratio: '1.0'
-nova::keystone::auth::configure_ec2_endpoint: false
+nova::cron::archive_deleted_rows::hour: '*/12'
+nova::cron::archive_deleted_rows::destination: '/dev/null'
# ceilometer
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
@@ -138,3 +128,109 @@ tripleo::loadbalancer::heat_cfn: true
tripleo::loadbalancer::horizon: true
controller_classes: []
+# firewall
+tripleo::firewall::firewall_rules:
+ '101 mongodb_config':
+ port: 27019
+ '102 mongodb_sharding':
+ port: 27018
+ '103 mongod':
+ port: 27017
+ '104 mysql galera':
+ port:
+ - 873
+ - 3306
+ - 4444
+ - 4567
+ - 4568
+ - 9200
+ '105 ntp':
+ port: 123
+ proto: udp
+ '106 vrrp':
+ proto: vrrp
+ '107 haproxy stats':
+ port: 1993
+ '108 redis':
+ port:
+ - 6379
+ - 26379
+ '109 rabbitmq':
+ port:
+ - 5672
+ - 35672
+ '110 ceph':
+ port:
+ - 6789
+ - '6800-6810'
+ '111 keystone':
+ port:
+ - 5000
+ - 13000
+ - 35357
+ - 13357
+ '112 glance':
+ port:
+ - 9292
+ - 9191
+ - 13292
+ '113 nova':
+ port:
+ - 6080
+ - 13080
+ - 8773
+ - 3773
+ - 8774
+ - 13774
+ - 8775
+ '114 neutron server':
+ port:
+ - 9696
+ - 13696
+ '115 neutron dhcp input':
+ proto: 'udp'
+ port: 67
+ '116 neutron dhcp output':
+ proto: 'udp'
+ chain: 'OUTPUT'
+ port: 68
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ port: 4789
+ '119 cinder':
+ port:
+ - 8776
+ - 13776
+ '120 iscsi initiator':
+ port: 3260
+ '121 memcached':
+ port: 11211
+ '122 swift proxy':
+ port:
+ - 8080
+ - 13808
+ '123 swift storage':
+ port:
+ - 873
+ - 6000
+ - 6001
+ - 6002
+ '124 ceilometer':
+ port:
+ - 8777
+ - 13777
+ '125 heat':
+ port:
+ - 8000
+ - 13800
+ - 8003
+ - 13003
+ - 8004
+ - 13004
+ '126 horizon':
+ port:
+ - 80
+ - 443
+ '127 snmp':
+ port: 161
+ proto: 'udp'
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index a88ca2d9..7f8970cc 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-include tripleo::packages
+include ::tripleo::packages
+include ::tripleo::firewall
create_resources(sysctl::value, hiera('sysctl_settings'), {})
@@ -25,13 +26,13 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
exec { 'set selinux to permissive on boot':
command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
- path => ["/usr/bin", "/usr/sbin"],
+ path => ['/usr/bin', '/usr/sbin'],
}
exec { 'set selinux to permissive':
- command => "setenforce 0",
+ command => 'setenforce 0',
onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
- path => ["/usr/bin", "/usr/sbin"],
+ path => ['/usr/bin', '/usr/sbin'],
} -> Class['ceph::profile::osd']
}
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index 2150bab8..79a6abbb 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-include tripleo::packages
+include ::tripleo::packages
+include ::tripleo::firewall
create_resources(sysctl::value, hiera('sysctl_settings'), {})
@@ -24,14 +25,14 @@ if count(hiera('ntp::servers')) > 0 {
file { ['/etc/libvirt/qemu/networks/autostart/default.xml',
'/etc/libvirt/qemu/networks/default.xml']:
ensure => absent,
- before => Service['libvirt']
+ before => Service['libvirt'],
}
# in case libvirt has been already running before the Puppet run, make
# sure the default network is destroyed
exec { 'libvirt-default-net-destroy':
command => '/usr/bin/virsh net-destroy default',
- onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"',
- before => Service['libvirt'],
+ onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"',
+ before => Service['libvirt'],
}
include ::nova
@@ -49,16 +50,17 @@ if $rbd_ephemeral_storage or $rbd_persistent_storage {
include ::ceph::profile::client
$client_keys = hiera('ceph::profile::params::client_keys')
+ $client_user = join(['client.', hiera('ceph_client_user_name')])
class { '::nova::compute::rbd':
- libvirt_rbd_secret_key => $client_keys['client.openstack']['secret'],
+ libvirt_rbd_secret_key => $client_keys[$client_user]['secret'],
}
}
if hiera('cinder_enable_nfs_backend', false) {
- if ($::selinux != "false") {
+ if str2bool($::selinux) {
selboolean { 'virt_use_nfs':
- value => on,
- persistent => true,
+ value => on,
+ persistent => true,
} -> Package['nfs-utils']
}
@@ -66,23 +68,52 @@ if hiera('cinder_enable_nfs_backend', false) {
}
include ::nova::compute::libvirt
+if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ file {'/etc/libvirt/qemu.conf':
+ ensure => present,
+ content => hiera('midonet_libvirt_qemu_data')
+ }
+}
include ::nova::network::neutron
include ::neutron
-class { 'neutron::plugins::ml2':
- flat_networks => split(hiera('neutron_flat_networks'), ','),
- tenant_network_types => [hiera('neutron_tenant_network_type')],
+# If the value of core plugin is set to 'nuage',
+# include nuage agent,
+# If the value of core plugin is set to 'midonet',
+# include midonet agent,
+# else use the default value of 'ml2'
+if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
+ include ::nuage::vrs
+ include ::nova::compute::neutron
+
+ class { '::nuage::metadataagent':
+ nova_os_tenant_name => hiera('nova::api::admin_tenant_name'),
+ nova_os_password => hiera('nova_password'),
+ nova_metadata_ip => hiera('nova_metadata_node_ips'),
+ nova_auth_ip => hiera('keystone_public_api_virtual_ip'),
+ }
}
+elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+
+ # TODO(devvesa) provide non-controller ips for these services
+ $zookeeper_node_ips = hiera('neutron_api_node_ips')
+ $cassandra_node_ips = hiera('neutron_api_node_ips')
-class { 'neutron::agents::ml2::ovs':
- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
- tunnel_types => split(hiera('neutron_tunnel_types'), ','),
+ class {'::tripleo::network::midonet::agent':
+ zookeeper_servers => $zookeeper_node_ips,
+ cassandra_seeds => $cassandra_node_ips
+ }
}
+else {
+
+ include ::neutron::plugins::ml2
+ include ::neutron::agents::ml2::ovs
-if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
- class { 'neutron::agents::n1kv_vem':
- n1kv_source => hiera('n1kv_vem_source', undef),
- n1kv_version => hiera('n1kv_vem_version', undef),
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+ class { '::neutron::agents::n1kv_vem':
+ n1kv_source => hiera('n1kv_vem_source', undef),
+ n1kv_version => hiera('n1kv_vem_version', undef),
+ }
}
}
@@ -97,7 +128,7 @@ snmp::snmpv3_user { $snmpd_user:
authtype => 'MD5',
authpass => hiera('snmpd_readonly_user_password'),
}
-class { 'snmp':
+class { '::snmp':
agentaddress => ['udp:161','udp6:[::1]:161'],
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 280cc344..913bcb63 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -13,7 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-include tripleo::packages
+include ::tripleo::packages
+include ::tripleo::firewall
+
+$enable_load_balancer = hiera('enable_load_balancer', true)
if hiera('step') >= 1 {
@@ -21,9 +24,11 @@ if hiera('step') >= 1 {
$controller_node_ips = split(hiera('controller_node_ips'), ',')
- class { '::tripleo::loadbalancer' :
- controller_hosts => $controller_node_ips,
- manage_vip => true,
+ if $enable_load_balancer {
+ class { '::tripleo::loadbalancer' :
+ controller_hosts => $controller_node_ips,
+ manage_vip => true,
+ }
}
}
@@ -70,18 +75,18 @@ if hiera('step') >= 2 {
include ::tripleo::redis_notification
}
- if str2bool(hiera('enable_galera', 'true')) {
+ if str2bool(hiera('enable_galera', true)) {
$mysql_config_file = '/etc/my.cnf.d/galera.cnf'
} else {
$mysql_config_file = '/etc/my.cnf.d/server.cnf'
}
# TODO Galara
- class { 'mysql::server':
- config_file => $mysql_config_file,
- override_options => {
+ class { '::mysql::server':
+ config_file => $mysql_config_file,
+ override_options => {
'mysqld' => {
- 'bind-address' => hiera('mysql_bind_host'),
- 'max_connections' => hiera('mysql_max_connections'),
+ 'bind-address' => hiera('mysql_bind_host'),
+ 'max_connections' => hiera('mysql_max_connections'),
'open_files_limit' => '-1',
},
},
@@ -126,31 +131,31 @@ if hiera('step') >= 2 {
$enable_ceph = hiera('ceph_storage_count', 0) > 0
if $enable_ceph {
- class { 'ceph::profile::params':
- mon_initial_members => downcase(hiera('ceph_mon_initial_members'))
+ class { '::ceph::profile::params':
+ mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
}
include ::ceph::profile::mon
}
- if str2bool(hiera('enable_ceph_storage', 'false')) {
+ if str2bool(hiera('enable_ceph_storage', false)) {
if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
exec { 'set selinux to permissive on boot':
command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
- path => ["/usr/bin", "/usr/sbin"],
+ path => ['/usr/bin', '/usr/sbin'],
}
exec { 'set selinux to permissive':
- command => "setenforce 0",
+ command => 'setenforce 0',
onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
- path => ["/usr/bin", "/usr/sbin"],
+ path => ['/usr/bin', '/usr/sbin'],
} -> Class['ceph::profile::osd']
}
include ::ceph::profile::osd
}
- if str2bool(hiera('enable_external_ceph', 'false')) {
+ if str2bool(hiera('enable_external_ceph', false)) {
include ::ceph::profile::client
}
@@ -196,9 +201,9 @@ if hiera('step') >= 3 {
$glance_backend = downcase(hiera('glance_backend', 'swift'))
case $glance_backend {
- swift: { $backend_store = 'glance.store.swift.Store' }
- file: { $backend_store = 'glance.store.filesystem.Store' }
- rbd: { $backend_store = 'glance.store.rbd.Store' }
+ 'swift': { $backend_store = 'glance.store.swift.Store' }
+ 'file': { $backend_store = 'glance.store.filesystem.Store' }
+ 'rbd': { $backend_store = 'glance.store.rbd.Store' }
default: { fail('Unrecognized glance_backend parameter.') }
}
$http_store = ['glance.store.http.Store']
@@ -206,8 +211,8 @@ if hiera('step') >= 3 {
# TODO: notifications, scrubber, etc.
include ::glance
- class { 'glance::api':
- known_stores => $glance_store
+ class { '::glance::api':
+ known_stores => $glance_store,
}
include ::glance::registry
include join(['::glance::backend::', $glance_backend])
@@ -225,73 +230,136 @@ if hiera('step') >= 3 {
include ::nova::scheduler
include ::nova::scheduler::filter
- include ::neutron
- include ::neutron::server
- include ::neutron::agents::l3
- include ::neutron::agents::dhcp
- include ::neutron::agents::metadata
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
- file { '/etc/neutron/dnsmasq-neutron.conf':
- content => hiera('neutron_dnsmasq_options'),
- owner => 'neutron',
- group => 'neutron',
- notify => Service['neutron-dhcp-service'],
- require => Package['neutron'],
- }
+ # TODO(devvesa) provide non-controller ips for these services
+ $zookeeper_node_ips = hiera('neutron_api_node_ips')
+ $cassandra_node_ips = hiera('neutron_api_node_ips')
- class { 'neutron::plugins::ml2':
- flat_networks => split(hiera('neutron_flat_networks'), ','),
- tenant_network_types => [hiera('neutron_tenant_network_type')],
- mechanism_drivers => [hiera('neutron_mechanism_drivers')],
- }
- class { 'neutron::agents::ml2::ovs':
- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
- tunnel_types => split(hiera('neutron_tunnel_types'), ','),
- }
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
- include neutron::plugins::ml2::cisco::nexus1000v
+ # Run zookeeper in the controller if configured
+ if hiera('enable_zookeeper_on_controller') {
+ class {'::tripleo::cluster::zookeeper':
+ zookeeper_server_ips => $zookeeper_node_ips,
+ zookeeper_client_ip => $ipaddress,
+ zookeeper_hostnames => hiera('controller_node_names')
+ }
+ }
- class { 'neutron::agents::n1kv_vem':
- n1kv_source => hiera('n1kv_vem_source', undef),
- n1kv_version => hiera('n1kv_vem_version', undef),
+ # Run cassandra in the controller if configured
+ if hiera('enable_cassandra_on_controller') {
+ class {'::tripleo::cluster::cassandra':
+ cassandra_servers => $cassandra_node_ips,
+ cassandra_ip => $ipaddress
+ }
}
- class { 'n1k_vsm':
- n1kv_source => hiera('n1kv_vsm_source', undef),
- n1kv_version => hiera('n1kv_vsm_version', undef),
- pacemaker_control => false,
+ class {'::tripleo::network::midonet::agent':
+ zookeeper_servers => $zookeeper_node_ips,
+ cassandra_seeds => $cassandra_node_ips
}
- }
- if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
- include ::neutron::plugins::ml2::cisco::ucsm
- }
- if 'cisco_nexus' in hiera('neutron_mechanism_drivers') {
- include ::neutron::plugins::ml2::cisco::nexus
- include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
- }
+ class {'::tripleo::network::midonet::api':
+ zookeeper_servers => $zookeeper_node_ips,
+ vip => $ipaddress,
+ keystone_ip => $ipaddress,
+ keystone_admin_token => hiera('keystone::admin_token'),
+ bind_address => $ipaddress,
+ admin_password => hiera('admin_password')
+ }
+
+ # TODO: find a way to get an empty list from hiera
+ class {'::neutron':
+ service_plugins => []
+ }
- if hiera('neutron_enable_bigswitch_ml2', false) {
- include neutron::plugins::ml2::bigswitch::restproxy
- }
- neutron_l3_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
}
- neutron_dhcp_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
+ else {
+
+ # ML2 plugin
+ include ::neutron
}
- Service['neutron-server'] -> Service['neutron-dhcp-service']
- Service['neutron-server'] -> Service['neutron-l3']
- Service['neutron-server'] -> Service['neutron-ovs-agent-service']
- Service['neutron-server'] -> Service['neutron-metadata']
+ include ::neutron::server
+ include ::neutron::server::notifications
+
+ # If the value of core plugin is set to 'nuage',
+ # include nuage core plugin, and it does not
+ # need the l3, dhcp and metadata agents
+ if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
+ include ::neutron::plugins::nuage
+ } else {
+ include ::neutron::agents::l3
+ include ::neutron::agents::dhcp
+ include ::neutron::agents::metadata
+
+ file { '/etc/neutron/dnsmasq-neutron.conf':
+ content => hiera('neutron_dnsmasq_options'),
+ owner => 'neutron',
+ group => 'neutron',
+ notify => Service['neutron-dhcp-service'],
+ require => Package['neutron'],
+ }
+
+ # If the value of core plugin is set to 'midonet',
+ # skip all the ML2 configuration
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+
+ class {'::neutron::plugins::midonet':
+ midonet_api_ip => $ipaddress,
+ keystone_tenant => hiera('neutron::server::auth_tenant'),
+ keystone_password => hiera('neutron::server::auth_password')
+ }
+ } else {
+
+ include ::neutron::plugins::ml2
+ include ::neutron::agents::ml2::ovs
+
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+ include ::neutron::plugins::ml2::cisco::nexus1000v
+
+ class { '::neutron::agents::n1kv_vem':
+ n1kv_source => hiera('n1kv_vem_source', undef),
+ n1kv_version => hiera('n1kv_vem_version', undef),
+ }
+
+ class { '::n1k_vsm':
+ n1kv_source => hiera('n1kv_vsm_source', undef),
+ n1kv_version => hiera('n1kv_vsm_version', undef),
+ pacemaker_control => false,
+ }
+ }
+
+ if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+ include ::neutron::plugins::ml2::cisco::ucsm
+ }
+ if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+ include ::neutron::plugins::ml2::cisco::nexus
+ include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
+ }
+
+ if hiera('neutron_enable_bigswitch_ml2', false) {
+ include ::neutron::plugins::ml2::bigswitch::restproxy
+ }
+ neutron_l3_agent_config {
+ 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
+ }
+ neutron_dhcp_agent_config {
+ 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
+ }
+ Service['neutron-server'] -> Service['neutron-ovs-agent-service']
+ }
+
+ Service['neutron-server'] -> Service['neutron-dhcp-service']
+ Service['neutron-server'] -> Service['neutron-l3']
+ Service['neutron-server'] -> Service['neutron-metadata']
+ }
include ::cinder
include ::cinder::api
include ::cinder::glance
include ::cinder::scheduler
include ::cinder::volume
- class {'cinder::setup_test_volume':
+ class { '::cinder::setup_test_volume':
size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
}
@@ -307,16 +375,14 @@ if hiera('step') >= 3 {
if $enable_ceph {
- Ceph_pool {
+ $ceph_pools = hiera('ceph_pools')
+ ceph::pool { $ceph_pools :
pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'),
pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
size => hiera('ceph::profile::params::osd_pool_default_size'),
}
- $ceph_pools = hiera('ceph_pools')
- ceph::pool { $ceph_pools : }
-
- $cinder_pool_requires = [Ceph::Pool['volumes']]
+ $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]]
} else {
$cinder_pool_requires = []
@@ -326,8 +392,8 @@ if hiera('step') >= 3 {
$cinder_rbd_backend = 'tripleo_ceph'
cinder::backend::rbd { $cinder_rbd_backend :
- rbd_pool => 'volumes',
- rbd_user => 'openstack',
+ rbd_pool => hiera('cinder_rbd_pool_name'),
+ rbd_user => hiera('ceph_client_user_name'),
rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
require => $cinder_pool_requires,
}
@@ -392,18 +458,18 @@ if hiera('step') >= 3 {
if hiera('cinder_enable_nfs_backend', false) {
$cinder_nfs_backend = 'tripleo_nfs'
- if ($::selinux != "false") {
+ if str2bool($::selinux) {
selboolean { 'virt_use_nfs':
- value => on,
- persistent => true,
+ value => on,
+ persistent => true,
} -> Package['nfs-utils']
}
package {'nfs-utils': } ->
cinder::backend::nfs { $cinder_nfs_backend :
- nfs_servers => hiera('cinder_nfs_servers'),
- nfs_mount_options => hiera('cinder_nfs_mount_options'),
- nfs_shares_config => '/etc/cinder/shares-nfs.conf',
+ nfs_servers => hiera('cinder_nfs_servers'),
+ nfs_mount_options => hiera('cinder_nfs_mount_options',''),
+ nfs_shares_config => '/etc/cinder/shares-nfs.conf',
}
}
@@ -427,9 +493,9 @@ if hiera('step') >= 3 {
include ::swift::proxy::formpost
# swift storage
- if str2bool(hiera('enable_swift_storage', 'true')) {
- class {'swift::storage::all':
- mount_check => str2bool(hiera('swift_mount_check'))
+ if str2bool(hiera('enable_swift_storage', true)) {
+ class { '::swift::storage::all':
+ mount_check => str2bool(hiera('swift_mount_check')),
}
if(!defined(File['/srv/node'])) {
file { '/srv/node':
@@ -459,11 +525,9 @@ if hiera('step') >= 3 {
include ::ceilometer::api
include ::ceilometer::agent::notification
include ::ceilometer::agent::central
- include ::ceilometer::alarm::notifier
- include ::ceilometer::alarm::evaluator
include ::ceilometer::expirer
include ::ceilometer::collector
- include ceilometer::agent::auth
+ include ::ceilometer::agent::auth
class { '::ceilometer::db' :
database_connection => $ceilometer_database_connection,
}
@@ -478,15 +542,16 @@ if hiera('step') >= 3 {
include ::heat::engine
# Horizon
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
$_profile_support = 'cisco'
} else {
$_profile_support = 'None'
}
$neutron_options = {'profile_support' => $_profile_support }
- class { 'horizon':
- cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
- neutron_options => $neutron_options,
+
+ class { '::horizon':
+ cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
+ neutron_options => $neutron_options,
}
$snmpd_user = hiera('snmpd_readonly_user_name')
@@ -494,7 +559,7 @@ if hiera('step') >= 3 {
authtype => 'MD5',
authpass => hiera('snmpd_readonly_user_password'),
}
- class { 'snmp':
+ class { '::snmp':
agentaddress => ['udp:161','udp6:[::1]:161'],
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
@@ -504,16 +569,12 @@ if hiera('step') >= 3 {
} #END STEP 3
if hiera('step') >= 4 {
- include ::keystone::cron::token_flush
-
- include ::ceilometer::keystone::auth
- include ::cinder::keystone::auth
- include ::glance::keystone::auth
- include ::heat::keystone::auth
- include ::neutron::keystone::auth
- include ::nova::keystone::auth
- include ::swift::keystone::auth
+ $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
+ include ::keystone::cron::token_flush
+ if $nova_enable_db_purge {
+ include ::nova::cron::archive_deleted_rows
+ }
} #END STEP 4
$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')])
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 5729ba00..e6ee85ae 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -18,7 +18,8 @@ Pcmk_resource <| |> {
try_sleep => 3,
}
-include tripleo::packages
+include ::tripleo::packages
+include ::tripleo::firewall
if $::hostname == downcase(hiera('bootstrap_nodeid')) {
$pacemaker_master = true
@@ -28,7 +29,8 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) {
$sync_db = false
}
-$enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5
+$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
+$enable_load_balancer = hiera('enable_load_balancer', true)
# When to start and enable services which haven't been Pacemakerized
# FIXME: remove when we start all OpenStack services using Pacemaker
@@ -45,17 +47,19 @@ if hiera('step') >= 1 {
$controller_node_ips = split(hiera('controller_node_ips'), ',')
$controller_node_names = split(downcase(hiera('controller_node_names')), ',')
- class { '::tripleo::loadbalancer' :
- controller_hosts => $controller_node_ips,
- controller_hosts_names => $controller_node_names,
- manage_vip => false,
- mysql_clustercheck => true,
- haproxy_service_manage => false,
+ if $enable_load_balancer {
+ class { '::tripleo::loadbalancer' :
+ controller_hosts => $controller_node_ips,
+ controller_hosts_names => $controller_node_names,
+ manage_vip => false,
+ mysql_clustercheck => true,
+ haproxy_service_manage => false,
+ }
}
$pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
user { 'hacluster':
- ensure => present,
+ ensure => present,
} ->
class { '::pacemaker':
hacluster_pwd => hiera('hacluster_pwd'),
@@ -68,17 +72,17 @@ if hiera('step') >= 1 {
disable => !$enable_fencing,
}
if $enable_fencing {
- include tripleo::fencing
+ include ::tripleo::fencing
# enable stonith after all fencing devices have been created
Class['tripleo::fencing'] -> Class['pacemaker::stonith']
}
- # FIXME(gfidente): sets 90secs as default start timeout op
+ # FIXME(gfidente): sets 200secs as default start timeout op
# param; until we can use pcmk global defaults we'll still
# need to add it to every resource which redefines op params
Pacemaker::Resource::Service {
- op_params => 'start timeout=90s',
+ op_params => 'start timeout=200s stop timeout=200s',
}
# Only configure RabbitMQ in this step, don't start it yet to
@@ -93,7 +97,7 @@ if hiera('step') >= 1 {
environment_variables => hiera('rabbitmq_environment'),
} ->
file { '/var/lib/rabbitmq/.erlang.cookie':
- ensure => 'present',
+ ensure => file,
owner => 'rabbitmq',
group => 'rabbitmq',
mode => '0400',
@@ -120,7 +124,7 @@ if hiera('step') >= 1 {
}
# Galera
- if str2bool(hiera('enable_galera', 'true')) {
+ if str2bool(hiera('enable_galera', true)) {
$mysql_config_file = '/etc/my.cnf.d/galera.cnf'
} else {
$mysql_config_file = '/etc/my.cnf.d/server.cnf'
@@ -154,7 +158,7 @@ if hiera('step') >= 1 {
'wsrep_causal_reads' => '0',
'wsrep_notify_cmd' => '',
'wsrep_sst_method' => 'rsync',
- }
+ },
}
class { '::mysql::server':
@@ -178,160 +182,164 @@ if hiera('step') >= 2 {
if $pacemaker_master {
- include pacemaker::resource_defaults
-
- # FIXME: we should not have to access tripleo::loadbalancer class
- # parameters here to configure pacemaker VIPs. The configuration
- # of pacemaker VIPs could move into puppet-tripleo or we should
- # make use of less specific hiera parameters here for the settings.
- pacemaker::resource::service { 'haproxy':
- clone_params => true,
- }
+ if $enable_load_balancer {
- $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
- pacemaker::resource::ip { 'control_vip':
- ip_address => $control_vip,
- }
- pacemaker::constraint::base { 'control_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${control_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['control_vip']],
- }
- pacemaker::constraint::colocation { 'control_vip-with-haproxy':
- source => "ip-${control_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['control_vip']],
- }
+ include ::pacemaker::resource_defaults
- $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
- if $public_vip and $public_vip != $control_vip {
- pacemaker::resource::ip { 'public_vip':
- ip_address => $public_vip,
- }
- pacemaker::constraint::base { 'public_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${public_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['public_vip']],
- }
- pacemaker::constraint::colocation { 'public_vip-with-haproxy':
- source => "ip-${public_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['public_vip']],
+ # FIXME: we should not have to access tripleo::loadbalancer class
+ # parameters here to configure pacemaker VIPs. The configuration
+ # of pacemaker VIPs could move into puppet-tripleo or we should
+ # make use of less specific hiera parameters here for the settings.
+ pacemaker::resource::service { 'haproxy':
+ clone_params => true,
}
- }
- $redis_vip = hiera('redis_vip')
- if $redis_vip and $redis_vip != $control_vip {
- pacemaker::resource::ip { 'redis_vip':
- ip_address => $redis_vip,
+ $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
+ pacemaker::resource::ip { 'control_vip':
+ ip_address => $control_vip,
}
- pacemaker::constraint::base { 'redis_vip-then-haproxy':
+ pacemaker::constraint::base { 'control_vip-then-haproxy':
constraint_type => 'order',
- first_resource => "ip-${redis_vip}",
+ first_resource => "ip-${control_vip}",
second_resource => 'haproxy-clone',
first_action => 'start',
second_action => 'start',
constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['redis_vip']],
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['control_vip']],
}
- pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
- source => "ip-${redis_vip}",
+ pacemaker::constraint::colocation { 'control_vip-with-haproxy':
+ source => "ip-${control_vip}",
target => 'haproxy-clone',
score => 'INFINITY',
require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['redis_vip']],
+ Pacemaker::Resource::Ip['control_vip']],
}
- }
- $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
- if $internal_api_vip and $internal_api_vip != $control_vip {
- pacemaker::resource::ip { 'internal_api_vip':
- ip_address => $internal_api_vip,
- }
- pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${internal_api_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['internal_api_vip']],
- }
- pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
- source => "ip-${internal_api_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['internal_api_vip']],
+ $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
+ if $public_vip and $public_vip != $control_vip {
+ pacemaker::resource::ip { 'public_vip':
+ ip_address => $public_vip,
+ }
+ pacemaker::constraint::base { 'public_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${public_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['public_vip']],
+ }
+ pacemaker::constraint::colocation { 'public_vip-with-haproxy':
+ source => "ip-${public_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['public_vip']],
+ }
}
- }
- $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
- if $storage_vip and $storage_vip != $control_vip {
- pacemaker::resource::ip { 'storage_vip':
- ip_address => $storage_vip,
- }
- pacemaker::constraint::base { 'storage_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${storage_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_vip']],
- }
- pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
- source => "ip-${storage_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_vip']],
+ $redis_vip = hiera('redis_vip')
+ if $redis_vip and $redis_vip != $control_vip {
+ pacemaker::resource::ip { 'redis_vip':
+ ip_address => $redis_vip,
+ }
+ pacemaker::constraint::base { 'redis_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${redis_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['redis_vip']],
+ }
+ pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
+ source => "ip-${redis_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['redis_vip']],
+ }
}
- }
- $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
- if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
- pacemaker::resource::ip { 'storage_mgmt_vip':
- ip_address => $storage_mgmt_vip,
+ $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
+ if $internal_api_vip and $internal_api_vip != $control_vip {
+ pacemaker::resource::ip { 'internal_api_vip':
+ ip_address => $internal_api_vip,
+ }
+ pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${internal_api_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['internal_api_vip']],
+ }
+ pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
+ source => "ip-${internal_api_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['internal_api_vip']],
+ }
}
- pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${storage_mgmt_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_mgmt_vip']],
+
+ $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
+ if $storage_vip and $storage_vip != $control_vip {
+ pacemaker::resource::ip { 'storage_vip':
+ ip_address => $storage_vip,
+ }
+ pacemaker::constraint::base { 'storage_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${storage_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_vip']],
+ }
+ pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
+ source => "ip-${storage_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_vip']],
+ }
}
- pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
- source => "ip-${storage_mgmt_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_mgmt_vip']],
+
+ $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
+ if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
+ pacemaker::resource::ip { 'storage_mgmt_vip':
+ ip_address => $storage_mgmt_vip,
+ }
+ pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${storage_mgmt_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_mgmt_vip']],
+ }
+ pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
+ source => "ip-${storage_mgmt_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_mgmt_vip']],
+ }
}
+
}
pacemaker::resource::service { $::memcached::params::service_name :
- clone_params => "interleave=true",
+ clone_params => 'interleave=true',
require => Class['::memcached'],
}
@@ -344,7 +352,7 @@ if hiera('step') >= 2 {
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
pacemaker::resource::service { $::mongodb::params::service_name :
- op_params => 'start timeout=120s',
+ op_params => 'start timeout=370s stop timeout=200s',
clone_params => true,
require => Class['::mongodb::server'],
}
@@ -385,7 +393,7 @@ if hiera('step') >= 2 {
timeout => 30,
tries => 180,
try_sleep => 10,
- environment => ["AVAILABLE_WHEN_READONLY=0"],
+ environment => ['AVAILABLE_WHEN_READONLY=0'],
require => File['/etc/sysconfig/clustercheck'],
}
@@ -411,28 +419,28 @@ MYSQL_HOST=localhost\n",
# Create all the database schemas
if $sync_db {
- class { 'keystone::db::mysql':
- require => Exec['galera-ready'],
+ class { '::keystone::db::mysql':
+ require => Exec['galera-ready'],
}
- class { 'glance::db::mysql':
- require => Exec['galera-ready'],
+ class { '::glance::db::mysql':
+ require => Exec['galera-ready'],
}
- class { 'nova::db::mysql':
- require => Exec['galera-ready'],
+ class { '::nova::db::mysql':
+ require => Exec['galera-ready'],
}
- class { 'neutron::db::mysql':
- require => Exec['galera-ready'],
+ class { '::neutron::db::mysql':
+ require => Exec['galera-ready'],
}
- class { 'cinder::db::mysql':
- require => Exec['galera-ready'],
+ class { '::cinder::db::mysql':
+ require => Exec['galera-ready'],
}
- class { 'heat::db::mysql':
- require => Exec['galera-ready'],
+ class { '::heat::db::mysql':
+ require => Exec['galera-ready'],
}
if downcase(hiera('ceilometer_backend')) == 'mysql' {
- class { 'ceilometer::db::mysql':
- require => Exec['galera-ready'],
+ class { '::ceilometer::db::mysql':
+ require => Exec['galera-ready'],
}
}
}
@@ -444,31 +452,31 @@ MYSQL_HOST=localhost\n",
$enable_ceph = hiera('ceph_storage_count', 0) > 0
if $enable_ceph {
- class { 'ceph::profile::params':
- mon_initial_members => downcase(hiera('ceph_mon_initial_members'))
+ class { '::ceph::profile::params':
+ mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
}
include ::ceph::profile::mon
}
- if str2bool(hiera('enable_ceph_storage', 'false')) {
+ if str2bool(hiera('enable_ceph_storage', false)) {
if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
exec { 'set selinux to permissive on boot':
command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
- path => ["/usr/bin", "/usr/sbin"],
+ path => ['/usr/bin', '/usr/sbin'],
}
exec { 'set selinux to permissive':
- command => "setenforce 0",
+ command => 'setenforce 0',
onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
- path => ["/usr/bin", "/usr/sbin"],
+ path => ['/usr/bin', '/usr/sbin'],
} -> Class['ceph::profile::osd']
}
include ::ceph::profile::osd
}
- if str2bool(hiera('enable_external_ceph', 'false')) {
+ if str2bool(hiera('enable_external_ceph', false)) {
include ::ceph::profile::client
}
@@ -478,9 +486,9 @@ MYSQL_HOST=localhost\n",
if hiera('step') >= 3 {
class { '::keystone':
- sync_db => $sync_db,
+ sync_db => $sync_db,
manage_service => false,
- enabled => false,
+ enabled => false,
}
#TODO: need a cleanup-keystone-tokens.sh solution here
@@ -517,35 +525,36 @@ if hiera('step') >= 3 {
$glance_backend = downcase(hiera('glance_backend', 'swift'))
case $glance_backend {
- swift: { $backend_store = 'glance.store.swift.Store' }
- file: { $backend_store = 'glance.store.filesystem.Store' }
- rbd: { $backend_store = 'glance.store.rbd.Store' }
+ 'swift': { $backend_store = 'glance.store.swift.Store' }
+ 'file': { $backend_store = 'glance.store.filesystem.Store' }
+ 'rbd': { $backend_store = 'glance.store.rbd.Store' }
default: { fail('Unrecognized glance_backend parameter.') }
}
$http_store = ['glance.store.http.Store']
$glance_store = concat($http_store, $backend_store)
if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) {
- pacemaker::resource::filesystem { "glance-fs":
+ $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"'
+ pacemaker::resource::filesystem { 'glance-fs':
device => hiera('glance_file_pcmk_device'),
directory => hiera('glance_file_pcmk_directory'),
fstype => hiera('glance_file_pcmk_fstype'),
- fsoptions => hiera('glance_file_pcmk_options', ''),
+ fsoptions => join([$secontext, hiera('glance_file_pcmk_options', '')],','),
clone_params => '',
}
}
# TODO: notifications, scrubber, etc.
include ::glance
- class { 'glance::api':
- known_stores => $glance_store,
+ class { '::glance::api':
+ known_stores => $glance_store,
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::glance::registry' :
- sync_db => $sync_db,
+ sync_db => $sync_db,
manage_service => false,
- enabled => false,
+ enabled => false,
}
include join(['::glance::backend::', $glance_backend])
@@ -556,94 +565,151 @@ if hiera('step') >= 3 {
include ::nova::config
class { '::nova::api' :
- sync_db => $sync_db,
+ sync_db => $sync_db,
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::nova::cert' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::nova::conductor' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::nova::consoleauth' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::nova::vncproxy' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
include ::nova::scheduler::filter
class { '::nova::scheduler' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
include ::nova::network::neutron
- # Neutron class definitions
- include ::neutron
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+
+ # TODO(devvesa) provide non-controller ips for these services
+ $zookeeper_node_ips = hiera('neutron_api_node_ips')
+ $cassandra_node_ips = hiera('neutron_api_node_ips')
+
+ # Run zookeeper in the controller if configured
+ if hiera('enable_zookeeper_on_controller') {
+ class {'::tripleo::cluster::zookeeper':
+ zookeeper_server_ips => $zookeeper_node_ips,
+ zookeeper_client_ip => $ipaddress,
+ zookeeper_hostnames => hiera('controller_node_names')
+ }
+ }
+
+ # Run cassandra in the controller if configured
+ if hiera('enable_cassandra_on_controller') {
+ class {'::tripleo::cluster::cassandra':
+ cassandra_servers => $cassandra_node_ips,
+ cassandra_ip => $ipaddress
+ }
+ }
+
+ class {'::tripleo::network::midonet::agent':
+ zookeeper_servers => $zookeeper_node_ips,
+ cassandra_seeds => $cassandra_node_ips
+ }
+
+ class {'::tripleo::network::midonet::api':
+ zookeeper_servers => hiera('neutron_api_node_ips'),
+ vip => $public_vip,
+ keystone_ip => $public_vip,
+ keystone_admin_token => hiera('keystone::admin_token'),
+ bind_address => $ipaddress,
+ admin_password => hiera('admin_password')
+ }
+
+ # Configure Neutron
+ class {'::neutron':
+ service_plugins => []
+ }
+
+ }
+ else {
+ # Neutron class definitions
+ include ::neutron
+ }
+
class { '::neutron::server' :
- sync_db => $sync_db,
+ sync_db => $sync_db,
manage_service => false,
- enabled => false,
+ enabled => false,
}
- class { '::neutron::agents::dhcp' :
- manage_service => false,
- enabled => false,
+ include ::neutron::server::notifications
+ if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
+ include ::neutron::plugins::nuage
}
- class { '::neutron::agents::l3' :
- manage_service => false,
- enabled => false,
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ class {'::neutron::plugins::midonet':
+ midonet_api_ip => $public_vip,
+ keystone_tenant => hiera('neutron::server::auth_tenant'),
+ keystone_password => hiera('neutron::server::auth_password')
+ }
}
- class { 'neutron::agents::metadata':
- manage_service => false,
- enabled => false,
+ if hiera('neutron::enable_dhcp_agent',true) {
+ class { '::neutron::agents::dhcp' :
+ manage_service => false,
+ enabled => false,
+ }
+ file { '/etc/neutron/dnsmasq-neutron.conf':
+ content => hiera('neutron_dnsmasq_options'),
+ owner => 'neutron',
+ group => 'neutron',
+ notify => Service['neutron-dhcp-service'],
+ require => Package['neutron'],
+ }
}
- file { '/etc/neutron/dnsmasq-neutron.conf':
- content => hiera('neutron_dnsmasq_options'),
- owner => 'neutron',
- group => 'neutron',
- notify => Service['neutron-dhcp-service'],
- require => Package['neutron'],
+ if hiera('neutron::enable_l3_agent',true) {
+ class { '::neutron::agents::l3' :
+ manage_service => false,
+ enabled => false,
+ }
}
- class { 'neutron::plugins::ml2':
- flat_networks => split(hiera('neutron_flat_networks'), ','),
- tenant_network_types => [hiera('neutron_tenant_network_type')],
- mechanism_drivers => [hiera('neutron_mechanism_drivers')],
+ if hiera('neutron::enable_metadata_agent',true) {
+ class { '::neutron::agents::metadata':
+ manage_service => false,
+ enabled => false,
+ }
}
- class { 'neutron::agents::ml2::ovs':
- manage_service => false,
- enabled => false,
- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
- tunnel_types => split(hiera('neutron_tunnel_types'), ','),
+ include ::neutron::plugins::ml2
+ class { '::neutron::agents::ml2::ovs':
+ manage_service => false,
+ enabled => false,
}
- if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
include ::neutron::plugins::ml2::cisco::ucsm
}
- if 'cisco_nexus' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
include ::neutron::plugins::ml2::cisco::nexus
include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
}
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
- include neutron::plugins::ml2::cisco::nexus1000v
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+ include ::neutron::plugins::ml2::cisco::nexus1000v
- class { 'neutron::agents::n1kv_vem':
- n1kv_source => hiera('n1kv_vem_source', undef),
- n1kv_version => hiera('n1kv_vem_version', undef),
+ class { '::neutron::agents::n1kv_vem':
+ n1kv_source => hiera('n1kv_vem_source', undef),
+ n1kv_version => hiera('n1kv_vem_version', undef),
}
- class { 'n1k_vsm':
- n1kv_source => hiera('n1kv_vsm_source', undef),
- n1kv_version => hiera('n1kv_vsm_version', undef),
+ class { '::n1k_vsm':
+ n1kv_source => hiera('n1kv_vsm_source', undef),
+ n1kv_version => hiera('n1kv_vsm_version', undef),
}
}
if hiera('neutron_enable_bigswitch_ml2', false) {
- include neutron::plugins::ml2::bigswitch::restproxy
+ include ::neutron::plugins::ml2::bigswitch::restproxy
}
neutron_l3_agent_config {
'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
@@ -654,20 +720,20 @@ if hiera('step') >= 3 {
include ::cinder
class { '::cinder::api':
- sync_db => $sync_db,
+ sync_db => $sync_db,
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::cinder::scheduler' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::cinder::volume' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
include ::cinder::glance
- class {'cinder::setup_test_volume':
+ class { '::cinder::setup_test_volume':
size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
}
@@ -683,16 +749,14 @@ if hiera('step') >= 3 {
if $enable_ceph {
- Ceph_pool {
+ $ceph_pools = hiera('ceph_pools')
+ ceph::pool { $ceph_pools :
pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'),
pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
size => hiera('ceph::profile::params::osd_pool_default_size'),
}
- $ceph_pools = hiera('ceph_pools')
- ceph::pool { $ceph_pools : }
-
- $cinder_pool_requires = [Ceph::Pool['volumes']]
+ $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]]
} else {
$cinder_pool_requires = []
@@ -702,8 +766,8 @@ if hiera('step') >= 3 {
$cinder_rbd_backend = 'tripleo_ceph'
cinder::backend::rbd { $cinder_rbd_backend :
- rbd_pool => 'volumes',
- rbd_user => 'openstack',
+ rbd_pool => hiera('cinder_rbd_pool_name'),
+ rbd_user => hiera('ceph_client_user_name'),
rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
require => $cinder_pool_requires,
}
@@ -768,18 +832,18 @@ if hiera('step') >= 3 {
if hiera('cinder_enable_nfs_backend', false) {
$cinder_nfs_backend = 'tripleo_nfs'
- if ($::selinux != "false") {
+ if str2bool($::selinux) {
selboolean { 'virt_use_nfs':
- value => on,
- persistent => true,
+ value => on,
+ persistent => true,
} -> Package['nfs-utils']
}
- package {'nfs-utils': } ->
+ package { 'nfs-utils': } ->
cinder::backend::nfs { $cinder_nfs_backend:
- nfs_servers => hiera('cinder_nfs_servers'),
- nfs_mount_options => hiera('cinder_nfs_mount_options'),
- nfs_shares_config => '/etc/cinder/shares-nfs.conf',
+ nfs_servers => hiera('cinder_nfs_servers'),
+ nfs_mount_options => hiera('cinder_nfs_mount_options',''),
+ nfs_shares_config => '/etc/cinder/shares-nfs.conf',
}
}
@@ -791,7 +855,7 @@ if hiera('step') >= 3 {
# swift proxy
class { '::swift::proxy' :
manage_service => $non_pcmk_start,
- enabled => $non_pcmk_start,
+ enabled => $non_pcmk_start,
}
include ::swift::proxy::proxy_logging
include ::swift::proxy::healthcheck
@@ -805,21 +869,21 @@ if hiera('step') >= 3 {
include ::swift::proxy::formpost
# swift storage
- if str2bool(hiera('enable_swift_storage', 'true')) {
+ if str2bool(hiera('enable_swift_storage', true)) {
class {'::swift::storage::all':
- mount_check => str2bool(hiera('swift_mount_check'))
+ mount_check => str2bool(hiera('swift_mount_check')),
}
class {'::swift::storage::account':
manage_service => $non_pcmk_start,
- enabled => $non_pcmk_start,
+ enabled => $non_pcmk_start,
}
class {'::swift::storage::container':
manage_service => $non_pcmk_start,
- enabled => $non_pcmk_start,
+ enabled => $non_pcmk_start,
}
class {'::swift::storage::object':
manage_service => $non_pcmk_start,
- enabled => $non_pcmk_start,
+ enabled => $non_pcmk_start,
}
if(!defined(File['/srv/node'])) {
file { '/srv/node':
@@ -848,34 +912,26 @@ if hiera('step') >= 3 {
include ::ceilometer::config
class { '::ceilometer::api' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::ceilometer::agent::notification' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::ceilometer::agent::central' :
manage_service => false,
- enabled => false,
- }
- class { '::ceilometer::alarm::notifier' :
- manage_service => false,
- enabled => false,
- }
- class { '::ceilometer::alarm::evaluator' :
- manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::ceilometer::collector' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
include ::ceilometer::expirer
class { '::ceilometer::db' :
database_connection => $ceilometer_database_connection,
sync_db => $sync_db,
}
- include ceilometer::agent::auth
+ include ::ceilometer::agent::auth
Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
@@ -885,19 +941,19 @@ if hiera('step') >= 3 {
}
class { '::heat::api' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::heat::api_cfn' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::heat::api_cloudwatch' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
class { '::heat::engine' :
manage_service => false,
- enabled => false,
+ enabled => false,
}
# httpd/apache and horizon
@@ -907,15 +963,15 @@ if hiera('step') >= 3 {
# service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
}
include ::apache::mod::status
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
$_profile_support = 'cisco'
} else {
$_profile_support = 'None'
}
$neutron_options = {'profile_support' => $_profile_support }
- class { 'horizon':
- cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
- neutron_options => $neutron_options,
+ class { '::horizon':
+ cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
+ neutron_options => $neutron_options,
}
$snmpd_user = hiera('snmpd_readonly_user_name')
@@ -923,7 +979,7 @@ if hiera('step') >= 3 {
authtype => 'MD5',
authpass => hiera('snmpd_readonly_user_password'),
}
- class { 'snmp':
+ class { '::snmp':
agentaddress => ['udp:161','udp6:[::1]:161'],
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
@@ -933,31 +989,37 @@ if hiera('step') >= 3 {
} #END STEP 3
if hiera('step') >= 4 {
- include ::keystone::cron::token_flush
+ $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
+
+ include ::keystone::cron::token_flush
+ if $nova_enable_db_purge {
+ include ::nova::cron::archive_deleted_rows
+ }
if $pacemaker_master {
# Keystone
pacemaker::resource::service { $::keystone::params::service_name :
- clone_params => "interleave=true",
+ clone_params => 'interleave=true',
verify_on_create => true,
require => [File['/etc/keystone/ssl/certs/ca.pem'],
- File['/etc/keystone/ssl/private/signing_key.pem'],
- File['/etc/keystone/ssl/certs/signing_cert.pem']],
+ File['/etc/keystone/ssl/private/signing_key.pem'],
+ File['/etc/keystone/ssl/certs/signing_cert.pem']],
}
-
- pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
- constraint_type => 'order',
- first_resource => "haproxy-clone",
- second_resource => "${::keystone::params::service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ if $enable_load_balancer {
+ pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
+ constraint_type => 'order',
+ first_resource => 'haproxy-clone',
+ second_resource => "${::keystone::params::service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ }
}
pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
constraint_type => 'order',
- first_resource => "rabbitmq-clone",
+ first_resource => 'rabbitmq-clone',
second_resource => "${::keystone::params::service_name}-clone",
first_action => 'start',
second_action => 'start',
@@ -966,7 +1028,7 @@ if hiera('step') >= 4 {
}
pacemaker::constraint::base { 'memcached-then-keystone-constraint':
constraint_type => 'order',
- first_resource => "memcached-clone",
+ first_resource => 'memcached-clone',
second_resource => "${::keystone::params::service_name}-clone",
first_action => 'start',
second_action => 'start',
@@ -975,7 +1037,7 @@ if hiera('step') >= 4 {
}
pacemaker::constraint::base { 'galera-then-keystone-constraint':
constraint_type => 'order',
- first_resource => "galera-master",
+ first_resource => 'galera-master',
second_resource => "${::keystone::params::service_name}-clone",
first_action => 'promote',
second_action => 'start',
@@ -985,11 +1047,11 @@ if hiera('step') >= 4 {
# Cinder
pacemaker::resource::service { $::cinder::params::api_service :
- clone_params => "interleave=true",
+ clone_params => 'interleave=true',
require => Pacemaker::Resource::Service[$::keystone::params::service_name],
}
pacemaker::resource::service { $::cinder::params::scheduler_service :
- clone_params => "interleave=true",
+ clone_params => 'interleave=true',
}
pacemaker::resource::service { $::cinder::params::volume_service : }
@@ -1003,45 +1065,45 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::keystone::params::service_name]],
}
pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
- constraint_type => "order",
- first_resource => "${::cinder::params::api_service}-clone",
+ constraint_type => 'order',
+ first_resource => "${::cinder::params::api_service}-clone",
second_resource => "${::cinder::params::scheduler_service}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
- Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
+ Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
}
pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
- source => "${::cinder::params::scheduler_service}-clone",
- target => "${::cinder::params::api_service}-clone",
- score => "INFINITY",
+ source => "${::cinder::params::scheduler_service}-clone",
+ target => "${::cinder::params::api_service}-clone",
+ score => 'INFINITY',
require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
}
pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
- constraint_type => "order",
- first_resource => "${::cinder::params::scheduler_service}-clone",
- second_resource => "${::cinder::params::volume_service}",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
- Pacemaker::Resource::Service[$::cinder::params::volume_service]],
+ constraint_type => 'order',
+ first_resource => "${::cinder::params::scheduler_service}-clone",
+ second_resource => $::cinder::params::volume_service,
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
+ Pacemaker::Resource::Service[$::cinder::params::volume_service]],
}
pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
- source => "${::cinder::params::volume_service}",
- target => "${::cinder::params::scheduler_service}-clone",
- score => "INFINITY",
+ source => $::cinder::params::volume_service,
+ target => "${::cinder::params::scheduler_service}-clone",
+ score => 'INFINITY',
require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
Pacemaker::Resource::Service[$::cinder::params::volume_service]],
}
# Glance
pacemaker::resource::service { $::glance::params::registry_service_name :
- clone_params => "interleave=true",
+ clone_params => 'interleave=true',
require => Pacemaker::Resource::Service[$::keystone::params::service_name],
}
pacemaker::resource::service { $::glance::params::api_service_name :
- clone_params => "interleave=true",
+ clone_params => 'interleave=true',
}
pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
@@ -1054,177 +1116,253 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::keystone::params::service_name]],
}
pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
- constraint_type => "order",
+ constraint_type => 'order',
first_resource => "${::glance::params::registry_service_name}-clone",
second_resource => "${::glance::params::api_service_name}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
- Pacemaker::Resource::Service[$::glance::params::api_service_name]],
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
+ Pacemaker::Resource::Service[$::glance::params::api_service_name]],
}
pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
source => "${::glance::params::api_service_name}-clone",
target => "${::glance::params::registry_service_name}-clone",
- score => "INFINITY",
+ score => 'INFINITY',
require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
Pacemaker::Resource::Service[$::glance::params::api_service_name]],
}
- # Neutron
- # NOTE(gfidente): Neutron will try to populate the database with some data
- # as soon as neutron-server is started; to avoid races we want to make this
- # happen only on one node, before normal Pacemaker initialization
- # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
- exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } ->
- pacemaker::resource::service { $::neutron::params::server_service:
- clone_params => "interleave=true",
- require => Pacemaker::Resource::Service[$::keystone::params::service_name]
- }
- pacemaker::resource::service { $::neutron::params::l3_agent_service:
- clone_params => "interleave=true",
- }
- pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
- clone_params => "interleave=true",
- }
- pacemaker::resource::service { $::neutron::params::ovs_agent_service:
- clone_params => "interleave=true",
- }
- pacemaker::resource::service { $::neutron::params::metadata_agent_service:
- clone_params => "interleave=true",
- }
- pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
- ocf_agent_name => "neutron:OVSCleanup",
- clone_params => "interleave=true",
- }
- pacemaker::resource::ocf { 'neutron-netns-cleanup':
- ocf_agent_name => "neutron:NetnsCleanup",
- clone_params => "interleave=true",
- }
-
- # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
- pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
- constraint_type => "order",
- first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
- second_resource => "neutron-netns-cleanup-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
- Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
- }
- pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
- source => "neutron-netns-cleanup-clone",
- target => "${::neutron::params::ovs_cleanup_service}-clone",
- score => "INFINITY",
- require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
- Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
- }
- pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
- constraint_type => "order",
- first_resource => "neutron-netns-cleanup-clone",
- second_resource => "${::neutron::params::ovs_agent_service}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
- Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
- }
- pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
- source => "${::neutron::params::ovs_agent_service}-clone",
- target => "neutron-netns-cleanup-clone",
- score => "INFINITY",
- require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
- Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
- }
-
- #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3
+ if hiera('step') == 4 {
+ # Neutron
+ # NOTE(gfidente): Neutron will try to populate the database with some data
+ # as soon as neutron-server is started; to avoid races we want to make this
+ # happen only on one node, before normal Pacemaker initialization
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
+ # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
+ # will try to start the service while it's already started by Pacemaker
+ # It would result to a deployment failure since systemd would return 1 to Puppet
+ # and the overcloud would fail to deploy (6 would be returned).
+ # This conditional prevents from a race condition during the deployment.
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
+ exec { 'neutron-server-systemd-start-sleep' :
+ command => 'systemctl start neutron-server && /usr/bin/sleep 5',
+ path => '/usr/bin',
+ unless => '/sbin/pcs resource show neutron-server',
+ } ->
+ pacemaker::resource::service { $::neutron::params::server_service:
+ clone_params => 'interleave=true',
+ require => Pacemaker::Resource::Service[$::keystone::params::service_name]
+ }
+ } else {
+ pacemaker::resource::service { $::neutron::params::server_service:
+ clone_params => 'interleave=true',
+ require => Pacemaker::Resource::Service[$::keystone::params::service_name]
+ }
+ }
+ if hiera('neutron::enable_l3_agent', true) {
+ pacemaker::resource::service { $::neutron::params::l3_agent_service:
+ clone_params => 'interleave=true',
+ }
+ }
+ if hiera('neutron::enable_dhcp_agent', true) {
+ pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
+ clone_params => 'interleave=true',
+ }
+ }
+ if hiera('neutron::enable_ovs_agent', true) {
+ pacemaker::resource::service { $::neutron::params::ovs_agent_service:
+ clone_params => 'interleave=true',
+ }
+ }
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ pacemaker::resource::service {'tomcat':
+ clone_params => 'interleave=true',
+ }
+ }
+ if hiera('neutron::enable_metadata_agent', true) {
+ pacemaker::resource::service { $::neutron::params::metadata_agent_service:
+ clone_params => 'interleave=true',
+ }
+ }
+ if hiera('neutron::enable_ovs_agent', true) {
+ pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
+ ocf_agent_name => 'neutron:OVSCleanup',
+ clone_params => 'interleave=true',
+ }
+ pacemaker::resource::ocf { 'neutron-netns-cleanup':
+ ocf_agent_name => 'neutron:NetnsCleanup',
+ clone_params => 'interleave=true',
+ }
+
+ # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
+ pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
+ second_resource => 'neutron-netns-cleanup-clone',
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
+ Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
+ }
+ pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
+ source => 'neutron-netns-cleanup-clone',
+ target => "${::neutron::params::ovs_cleanup_service}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
+ Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
+ }
+ pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
+ constraint_type => 'order',
+ first_resource => 'neutron-netns-cleanup-clone',
+ second_resource => "${::neutron::params::ovs_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
+ Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
+ }
+ pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
+ source => "${::neutron::params::ovs_agent_service}-clone",
+ target => 'neutron-netns-cleanup-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
+ Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
+ }
+ }
+
pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
- constraint_type => "order",
- first_resource => "${::keystone::params::service_name}-clone",
+ constraint_type => 'order',
+ first_resource => "${::keystone::params::service_name}-clone",
second_resource => "${::neutron::params::server_service}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
- Pacemaker::Resource::Service[$::neutron::params::server_service]],
- }
- pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
- constraint_type => "order",
- first_resource => "${::neutron::params::server_service}-clone",
- second_resource => "${::neutron::params::ovs_agent_service}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
- }
- pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
- constraint_type => "order",
- first_resource => "${::neutron::params::ovs_agent_service}-clone",
- second_resource => "${::neutron::params::dhcp_agent_service}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
- Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
-
- }
- pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
- source => "${::neutron::params::dhcp_agent_service}-clone",
- target => "${::neutron::params::ovs_agent_service}-clone",
- score => "INFINITY",
- require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
- Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
- }
- pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
- constraint_type => "order",
- first_resource => "${::neutron::params::dhcp_agent_service}-clone",
- second_resource => "${::neutron::params::l3_agent_service}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
- Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
- }
- pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
- source => "${::neutron::params::l3_agent_service}-clone",
- target => "${::neutron::params::dhcp_agent_service}-clone",
- score => "INFINITY",
- require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
- Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
- }
- pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
- constraint_type => "order",
- first_resource => "${::neutron::params::l3_agent_service}-clone",
- second_resource => "${::neutron::params::metadata_agent_service}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
- Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
- }
- pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
- source => "${::neutron::params::metadata_agent_service}-clone",
- target => "${::neutron::params::l3_agent_service}-clone",
- score => "INFINITY",
- require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
- Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
+ Pacemaker::Resource::Service[$::neutron::params::server_service]],
+ }
+ if hiera('neutron::enable_ovs_agent',true) {
+ pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::server_service}-clone",
+ second_resource => "${::neutron::params::ovs_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
+ Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
+ }
+ }
+ if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) {
+ pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::ovs_agent_service}-clone",
+ second_resource => "${::neutron::params::dhcp_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
+
+ }
+ pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
+ source => "${::neutron::params::dhcp_agent_service}-clone",
+ target => "${::neutron::params::ovs_agent_service}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
+ }
+ }
+ if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) {
+ pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::dhcp_agent_service}-clone",
+ second_resource => "${::neutron::params::l3_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
+ }
+ pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
+ source => "${::neutron::params::l3_agent_service}-clone",
+ target => "${::neutron::params::dhcp_agent_service}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
+ }
+ }
+ if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) {
+ pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::l3_agent_service}-clone",
+ second_resource => "${::neutron::params::metadata_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
+ }
+ pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
+ source => "${::neutron::params::metadata_agent_service}-clone",
+ target => "${::neutron::params::l3_agent_service}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
+ }
+ }
+ if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+ #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
+ pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::server_service}-clone",
+ second_resource => "${::neutron::params::dhcp_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
+ Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
+ }
+ pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::dhcp_agent_service}-clone",
+ second_resource => "${::neutron::params::metadata_agent_service}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
+ }
+ pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
+ constraint_type => 'order',
+ first_resource => "${::neutron::params::metadata_agent_service}-clone",
+ second_resource => 'tomcat-clone',
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
+ Pacemaker::Resource::Service['tomcat']],
+ }
+ pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
+ source => "${::neutron::params::metadata_agent_service}-clone",
+ target => "${::neutron::params::dhcp_agent_service}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
+ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
+ }
}
# Nova
pacemaker::resource::service { $::nova::params::api_service_name :
- clone_params => "interleave=true",
- op_params => "start timeout=90s monitor start-delay=10s",
+ clone_params => 'interleave=true',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::conductor_service_name :
- clone_params => "interleave=true",
- op_params => "start timeout=90s monitor start-delay=10s",
+ clone_params => 'interleave=true',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::consoleauth_service_name :
- clone_params => "interleave=true",
- op_params => "start timeout=90s monitor start-delay=10s",
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
+ clone_params => 'interleave=true',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
+ require => Pacemaker::Resource::Service[$::keystone::params::service_name],
}
pacemaker::resource::service { $::nova::params::vncproxy_service_name :
- clone_params => "interleave=true",
- op_params => "start timeout=90s monitor start-delay=10s",
+ clone_params => 'interleave=true',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::resource::service { $::nova::params::scheduler_service_name :
- clone_params => "interleave=true",
- op_params => "start timeout=90s monitor start-delay=10s",
+ clone_params => 'interleave=true',
+ op_params => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
}
pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
@@ -1237,66 +1375,66 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::keystone::params::service_name]],
}
pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
- constraint_type => "order",
+ constraint_type => 'order',
first_resource => "${::nova::params::consoleauth_service_name}-clone",
second_resource => "${::nova::params::vncproxy_service_name}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
- Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
+ Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
}
pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
- source => "${::nova::params::vncproxy_service_name}-clone",
- target => "${::nova::params::consoleauth_service_name}-clone",
- score => "INFINITY",
+ source => "${::nova::params::vncproxy_service_name}-clone",
+ target => "${::nova::params::consoleauth_service_name}-clone",
+ score => 'INFINITY',
require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
}
pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
- constraint_type => "order",
+ constraint_type => 'order',
first_resource => "${::nova::params::vncproxy_service_name}-clone",
second_resource => "${::nova::params::api_service_name}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
- Pacemaker::Resource::Service[$::nova::params::api_service_name]],
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
+ Pacemaker::Resource::Service[$::nova::params::api_service_name]],
}
pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
- source => "${::nova::params::api_service_name}-clone",
- target => "${::nova::params::vncproxy_service_name}-clone",
- score => "INFINITY",
+ source => "${::nova::params::api_service_name}-clone",
+ target => "${::nova::params::vncproxy_service_name}-clone",
+ score => 'INFINITY',
require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
Pacemaker::Resource::Service[$::nova::params::api_service_name]],
}
pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
- constraint_type => "order",
+ constraint_type => 'order',
first_resource => "${::nova::params::api_service_name}-clone",
second_resource => "${::nova::params::scheduler_service_name}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
- Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
+ Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
}
pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
- source => "${::nova::params::scheduler_service_name}-clone",
- target => "${::nova::params::api_service_name}-clone",
- score => "INFINITY",
+ source => "${::nova::params::scheduler_service_name}-clone",
+ target => "${::nova::params::api_service_name}-clone",
+ score => 'INFINITY',
require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
}
pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
- constraint_type => "order",
+ constraint_type => 'order',
first_resource => "${::nova::params::scheduler_service_name}-clone",
second_resource => "${::nova::params::conductor_service_name}-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
- Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
+ Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
}
pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
- source => "${::nova::params::conductor_service_name}-clone",
- target => "${::nova::params::scheduler_service_name}-clone",
- score => "INFINITY",
+ source => "${::nova::params::conductor_service_name}-clone",
+ target => "${::nova::params::scheduler_service_name}-clone",
+ score => 'INFINITY',
require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
}
@@ -1313,7 +1451,7 @@ if hiera('step') >= 4 {
pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
clone_params => 'interleave=true',
require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
- Pacemaker::Resource::Service[$::mongodb::params::service_name]],
+ Pacemaker::Resource::Service[$::mongodb::params::service_name]],
}
}
}
@@ -1323,12 +1461,6 @@ if hiera('step') >= 4 {
pacemaker::resource::service { $::ceilometer::params::api_service_name :
clone_params => 'interleave=true',
}
- pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name :
- clone_params => 'interleave=true',
- }
- pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name :
- clone_params => 'interleave=true',
- }
pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
clone_params => 'interleave=true',
}
@@ -1345,7 +1477,7 @@ if hiera('step') >= 4 {
}
pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
constraint_type => 'order',
- first_resource => "redis-master",
+ first_resource => 'redis-master',
second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
first_action => 'promote',
second_action => 'start',
@@ -1403,54 +1535,6 @@ if hiera('step') >= 4 {
require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
Pacemaker::Resource::Ocf['delay']],
}
- pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint':
- constraint_type => 'order',
- first_resource => 'delay-clone',
- second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
- Pacemaker::Resource::Ocf['delay']],
- }
- pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation':
- source => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
- target => 'delay-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
- Pacemaker::Resource::Ocf['delay']],
- }
- pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint':
- constraint_type => 'order',
- first_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
- second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
- }
- pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation':
- source => "${::ceilometer::params::alarm_notifier_service_name}-clone",
- target => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
- }
- pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint':
- constraint_type => 'order',
- first_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone",
- second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
- }
- pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation':
- source => "${::ceilometer::params::agent_notification_service_name}-clone",
- target => "${::ceilometer::params::alarm_notifier_service_name}-clone",
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
- Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
- }
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
constraint_type => 'order',
@@ -1491,8 +1575,8 @@ if hiera('step') >= 4 {
second_resource => "${::heat::params::api_cfn_service_name}-clone",
first_action => 'start',
second_action => 'start',
- require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
- Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
+ require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
+ Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
}
pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
source => "${::heat::params::api_cfn_service_name}-clone",
@@ -1507,8 +1591,8 @@ if hiera('step') >= 4 {
second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
first_action => 'start',
second_action => 'start',
- require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
- Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
+ require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
+ Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
}
pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
source => "${::heat::params::api_cloudwatch_service_name}-clone",
@@ -1523,8 +1607,8 @@ if hiera('step') >= 4 {
second_resource => "${::heat::params::engine_service_name}-clone",
first_action => 'start',
second_action => 'start',
- require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
- Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
+ require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
+ Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
}
pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
source => "${::heat::params::engine_service_name}-clone",
@@ -1545,18 +1629,18 @@ if hiera('step') >= 4 {
# Horizon
pacemaker::resource::service { $::horizon::params::http_service:
- clone_params => "interleave=true",
+ clone_params => 'interleave=true',
}
#VSM
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+ if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
pacemaker::resource::ocf { 'vsm-p' :
ocf_agent_name => 'heartbeat:VirtualDomain',
resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
require => Class['n1k_vsm'],
meta_params => 'resource-stickiness=INFINITY',
}
- if str2bool(hiera('n1k_vsm::pacemaker_control', 'true')) {
+ if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
pacemaker::resource::ocf { 'vsm-s' :
ocf_agent_name => 'heartbeat:VirtualDomain',
resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
@@ -1564,9 +1648,9 @@ if hiera('step') >= 4 {
meta_params => 'resource-stickiness=INFINITY',
}
pacemaker::constraint::colocation { 'vsm-colocation-contraint':
- source => "vsm-p",
- target => "vsm-s",
- score => "-INFINITY",
+ source => 'vsm-p',
+ target => 'vsm-s',
+ score => '-INFINITY',
require => [Pacemaker::Resource::Ocf['vsm-p'],
Pacemaker::Resource::Ocf['vsm-s']],
}
@@ -1586,27 +1670,6 @@ if hiera('step') >= 5 {
} ->
class {'::keystone::endpoint' :
require => Pacemaker::Resource::Service[$::keystone::params::service_name],
- } ->
- class { '::ceilometer::keystone::auth' :
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
- } ->
- class { '::cinder::keystone::auth' :
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
- } ->
- class { '::glance::keystone::auth' :
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
- } ->
- class { '::heat::keystone::auth' :
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
- } ->
- class { '::neutron::keystone::auth' :
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
- } ->
- class { '::nova::keystone::auth' :
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
- } ->
- class { '::swift::keystone::auth' :
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
}
}
diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp
index 5f4b070d..1eabddf1 100644
--- a/puppet/manifests/overcloud_object.pp
+++ b/puppet/manifests/overcloud_object.pp
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-include tripleo::packages
+include ::tripleo::packages
+include ::tripleo::firewall
create_resources(sysctl::value, hiera('sysctl_settings'), {})
@@ -22,8 +23,8 @@ if count(hiera('ntp::servers')) > 0 {
}
include ::swift
-class {'swift::storage::all':
- mount_check => str2bool(hiera('swift_mount_check'))
+class { '::swift::storage::all':
+ mount_check => str2bool(hiera('swift_mount_check')),
}
if(!defined(File['/srv/node'])) {
file { '/srv/node':
@@ -43,7 +44,7 @@ snmp::snmpv3_user { $snmpd_user:
authtype => 'MD5',
authpass => hiera('snmpd_readonly_user_password'),
}
-class { 'snmp':
+class { '::snmp':
agentaddress => ['udp:161','udp6:[::1]:161'],
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp
index eaaed66e..2bdd8a9c 100644
--- a/puppet/manifests/overcloud_volume.pp
+++ b/puppet/manifests/overcloud_volume.pp
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-include tripleo::packages
+include ::tripleo::packages
+include ::tripleo::firewall
create_resources(sysctl::value, hiera('sysctl_settings'), {})
@@ -47,7 +48,7 @@ snmp::snmpv3_user { $snmpd_user:
authtype => 'MD5',
authpass => hiera('snmpd_readonly_user_password'),
}
-class { 'snmp':
+class { '::snmp':
agentaddress => ['udp:161','udp6:[::1]:161'],
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp
index 1897dcd0..2d880d33 100644
--- a/puppet/manifests/ringbuilder.pp
+++ b/puppet/manifests/ringbuilder.pp
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-include tripleo::packages
+include ::tripleo::packages
define add_devices(
$swift_zones = '1'
@@ -37,44 +37,46 @@ define add_devices(
$base = regsubst($name,'^r1.*-(.*)$','\1')
$object = regsubst($base, '%PORT%', '6000')
ring_object_device { $object:
- zone => '1',
- weight => 100,
+ zone => '1',
+ weight => 100,
}
$container = regsubst($base, '%PORT%', '6001')
ring_container_device { $container:
- zone => '1',
- weight => 100,
+ zone => '1',
+ weight => 100,
}
$account = regsubst($base, '%PORT%', '6002')
ring_account_device { $account:
- zone => '1',
- weight => 100,
+ zone => '1',
+ weight => 100,
}
}
class tripleo::ringbuilder (
$swift_zones = '1',
$devices = '',
- $build_ring = 'True',
+ $build_ring = true,
$part_power,
$replicas,
$min_part_hours,
) {
- if str2bool(downcase("$build_ring")) {
+ validate_bool($build_ring)
+
+ if $build_ring {
$device_array = strip(split(rstrip($devices), ','))
# create local rings
swift::ringbuilder::create{ ['object', 'account', 'container']:
part_power => $part_power,
- replicas => $replicas,
+ replicas => min(count($device_array), $replicas),
min_part_hours => $min_part_hours,
} ->
# add all other devices
add_devices {$device_array:
- swift_zones => $swift_zones
+ swift_zones => $swift_zones,
} ->
# rebalance
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index 22ec6096..b60664a1 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -7,7 +7,6 @@ parameters:
constraints:
- custom_constraint: nova.flavor
HashSuffix:
- default: unset
description: A random string to be used as a salt when hashing to determine mappings
in the ring.
hidden: true
@@ -17,7 +16,7 @@ parameters:
type: string
KeyName:
default: default
- description: Name of an existing EC2 KeyPair to enable SSH access to the instances
+ description: Name of an existing Nova key pair to enable SSH access to the instances
type: string
MountCheck:
default: 'false'
@@ -40,13 +39,13 @@ parameters:
description: The user name for SNMPd with readonly rights running on all Overcloud nodes
type: string
SnmpdReadonlyUserPassword:
- default: unset
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
NtpServer:
- type: string
default: ''
+ description: Comma-separated list of ntp servers
+ type: comma_delimited_list
EnablePackageInstall:
default: 'false'
description: Set to true to enable package installation via Puppet
@@ -76,7 +75,34 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
-
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
+ SoftwareConfigTransport:
+ default: POLL_SERVER_CFN
+ description: |
+ How the server should receive the metadata required for software configuration.
+ type: string
+ constraints:
+ - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
+ CloudDomain:
+ default: ''
+ type: string
+ description: >
+ The DNS domain used for the hosts. This should match the dhcp_domain
+ configured in the Undercloud neutron. Defaults to localdomain.
+ ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API.
+ type: json
+ SchedulerHints:
+ type: json
+ description: Optional scheduler hints to pass to nova
+ default: {}
resources:
@@ -91,6 +117,9 @@ resources:
user_data_format: SOFTWARE_CONFIG
user_data: {get_resource: UserData}
name: {get_param: Hostname}
+ software_config_transport: {get_param: SoftwareConfigTransport}
+ metadata: {get_param: ServerMetadata}
+ scheduler_hints: {get_param: SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
@@ -112,6 +141,11 @@ resources:
NodeUserData:
type: OS::TripleO::NodeUserData
+ ExternalPort:
+ type: OS::TripleO::SwiftStorage::Ports::ExternalPort
+ properties:
+ ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+
InternalApiPort:
type: OS::TripleO::SwiftStorage::Ports::InternalApiPort
properties:
@@ -127,27 +161,44 @@ resources:
properties:
ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ TenantPort:
+ type: OS::TripleO::SwiftStorage::Ports::TenantPort
+ properties:
+ ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+
+ ManagementPort:
+ type: OS::TripleO::SwiftStorage::Ports::ManagementPort
+ properties:
+ ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+
NetworkConfig:
type: OS::TripleO::ObjectStorage::Net::SoftwareConfig
properties:
ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+ TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+ ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
NetIpMap:
type: OS::TripleO::Network::Ports::NetIpMap
properties:
ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
StorageIp: {get_attr: [StoragePort, ip_address]}
StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+ TenantIp: {get_attr: [TenantPort, ip_address]}
+ ManagementIp: {get_attr: [ManagementPort, ip_address]}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
properties:
config: {get_resource: NetworkConfig}
server: {get_resource: SwiftStorage}
+ actions: {get_param: NetworkDeploymentActions}
SwiftStorageHieraConfig:
type: OS::Heat::StructuredConfig
@@ -207,19 +258,22 @@ resources:
swift_min_part_hours: {get_param: MinPartHours}
swift_part_power: {get_param: PartPower}
swift_replicas: { get_param: Replicas}
- ntp_servers:
- str_replace:
- template: '["server"]'
- params:
- server: {get_param: NtpServer}
+ ntp_servers: {get_param: NtpServer}
enable_package_install: {get_param: EnablePackageInstall}
enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
+ # Resource for site-specific injection of root certificate
+ NodeTLSCAData:
+ depends_on: SwiftStorageHieraDeploy
+ type: OS::TripleO::NodeTLSCAData
+ properties:
+ server: {get_resource: SwiftStorage}
+
# Hook for site-specific additional pre-deployment config,
# applying to all nodes, e.g node registration/unregistration
NodeExtraConfig:
- depends_on: SwiftStorageHieraDeploy
+ depends_on: NodeTLSCAData
type: OS::TripleO::NodeExtraConfig
properties:
server: {get_resource: SwiftStorage}
@@ -240,9 +294,10 @@ outputs:
hosts_entry:
value:
str_replace:
- template: "IP HOST.localdomain HOST"
+ template: "IP HOST.DOMAIN HOST"
params:
IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
HOST: {get_attr: [SwiftStorage, name]}
nova_server_resource:
description: Heat resource handle for the swift storage server
@@ -255,6 +310,9 @@ outputs:
template: 'r1z1-IP:%PORT%/d1'
params:
IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
+ external_ip_address:
+ description: IP address of the server in the external network
+ value: {get_attr: [ExternalPort, ip_address]}
internal_api_ip_address:
description: IP address of the server in the internal_api network
value: {get_attr: [InternalApiPort, ip_address]}
@@ -264,10 +322,17 @@ outputs:
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ tenant_ip_address:
+ description: IP address of the server in the tenant network
+ value: {get_attr: [TenantPort, ip_address]}
+ management_ip_address:
+ description: IP address of the server in the management network
+ value: {get_attr: [ManagementPort, ip_address]}
config_identifier:
description: identifier which changes if the node configuration may need re-applying
value:
list_join:
- ','
- - {get_attr: [SwiftStorageHieraDeploy, deploy_stdout]}
+ - {get_attr: [NodeTLSCAData, deploy_stdout]}
- {get_param: UpdateIdentifier}
diff --git a/setup.cfg b/setup.cfg
index 7b192c46..f7f96639 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -14,10 +14,3 @@ classifier =
Operating System :: POSIX :: Linux
Programming Language :: Other
Environment :: Console
-
-[files]
-packages =
- tripleo_heat_merge
-[entry_points]
-console_scripts =
- tripleo-heat-merge = tripleo_heat_merge.merge:main
diff --git a/test_merge.bash b/test_merge.bash
deleted file mode 100755
index de29d075..00000000
--- a/test_merge.bash
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-set -ue
-result=""
-cleanup() {
- if [ -n "$result" ] ; then
- rm -f $result
- fi
-}
-trap cleanup EXIT
-run_test() {
- local cmd=$1
- local expected=$2
- result=$(mktemp /tmp/test_merge.XXXXXX)
- fail=0
- $cmd --output $result
- if ! cmp $result $expected ; then
- diff -u $expected $result || :
- echo FAIL - $cmd result does not match expected
- fail=1
- else
- echo PASS - $cmd
- fi
- cleanup
-}
-echo
-merge_py="./tripleo_heat_merge/merge.py"
-run_test "python $merge_py examples/source.yaml" examples/source_lib_result.yaml
-run_test "python $merge_py examples/source2.yaml" examples/source2_lib_result.yaml
-run_test "python $merge_py examples/source_include_subkey.yaml" examples/source_include_subkey_result.yaml
-run_test "python $merge_py examples/launchconfig1.yaml examples/launchconfig2.yaml" examples/launchconfig_result.yaml
-run_test "python $merge_py --scale NovaCompute=3 examples/scale1.yaml" examples/scale_result.yaml
-run_test "python $merge_py --scale NovaCompute=3 examples/scale_map.yaml" examples/scale_map_result.yaml
-run_test "python $merge_py --hot examples/source_hot.yaml" examples/source_lib_result_hot.yaml
-run_test "python $merge_py --hot examples/source2_hot.yaml" examples/source2_lib_result_hot.yaml
-run_test "python $merge_py --hot examples/source_include_subkey_hot.yaml" examples/source_include_subkey_result_hot.yaml
-run_test "python $merge_py --hot examples/launchconfig1_hot.yaml examples/launchconfig2_hot.yaml" examples/launchconfig_result_hot.yaml
-run_test "python $merge_py --hot --scale NovaCompute=3 examples/scale1_hot.yaml" examples/scale_result_hot.yaml
-run_test "python $merge_py --hot --scale NovaCompute=3 examples/scale_map_hot.yaml" examples/scale_map_result_hot.yaml
-run_test "python $merge_py --hot --scale NovaCompute=5,1,2 examples/scale_map_hot.yaml" examples/scale_map_result_hot_blacklist.yaml
-run_test "python $merge_py --hot --scale NovaCompute=3, examples/scale_map_hot.yaml" examples/scale_map_result_hot.yaml
-echo
-trap - EXIT
-exit $fail
diff --git a/tripleo_heat_merge/__init__.py b/tripleo_heat_merge/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tripleo_heat_merge/__init__.py
+++ /dev/null
diff --git a/tripleo_heat_merge/merge.py b/tripleo_heat_merge/merge.py
deleted file mode 100644
index b5bec0f4..00000000
--- a/tripleo_heat_merge/merge.py
+++ /dev/null
@@ -1,436 +0,0 @@
-import os
-import sys
-import yaml
-import argparse
-
-
-class Cfn(object):
-
- base_template = {
- 'HeatTemplateFormatVersion': '2012-12-12',
- 'Description': []
- }
- get_resource = 'Ref'
- get_param = 'Ref'
- description = 'Description'
- parameters = 'Parameters'
- outputs = 'Outputs'
- resources = 'Resources'
- type = 'Type'
- properties = 'Properties'
- metadata = 'Metadata'
- depends_on = 'DependsOn'
- get_attr = 'Fn::GetAtt'
-
-
-class Hot(object):
-
- base_template = {
- 'heat_template_version': '2013-05-23',
- 'description': []
- }
- get_resource = 'get_resource'
- get_param = 'get_param'
- description = 'description'
- parameters = 'parameters'
- outputs = 'outputs'
- resources = 'resources'
- type = 'type'
- properties = 'properties'
- metadata = 'metadata'
- depends_on = 'depends_on'
- get_attr = 'get_attr'
-
-
-lang = Cfn()
-
-
-def apply_maps(template):
- """Apply Merge::Map within template.
-
- Any dict {'Merge::Map': {'Foo': 'Bar', 'Baz': 'Quux'}}
- will resolve to ['Bar', 'Quux'] - that is a dict with key
- 'Merge::Map' is replaced entirely by that dict['Merge::Map'].values().
- """
- if isinstance(template, dict):
- if 'Merge::Map' in template:
- return sorted(
- apply_maps(value) for value in template['Merge::Map'].values()
- )
- else:
- return dict((key, apply_maps(value))
- for key, value in template.items())
- elif isinstance(template, list):
- return [apply_maps(item) for item in template]
- else:
- return template
-
-
-def apply_scaling(template, scaling, in_copies=None):
- """Apply a set of scaling operations to template.
-
- This is a single pass recursive function: for each call we process one
- dict or list and recurse to handle children containers.
-
- Values are handled via scale_value.
-
- Keys in dicts are copied per the scaling rule.
- Values are either replaced or copied depending on whether the given
- scaling rule is in in_copies.
-
- in_copies is reset to None when a dict {'Merge::Map': someobject} is
- encountered.
-
- :param scaling: A dict of prefix -> (count, blacklists).
- """
- in_copies = dict(in_copies or {})
- # Shouldn't be needed but to avoid unexpected side effects/bugs we short
- # circuit no-ops.
- if not scaling:
- return template
- if isinstance(template, dict):
- if 'Merge::Map' in template:
- in_copies = None
- new_template = {}
- for key, value in template.items():
- for prefix, copy_num, new_key in scale_value(
- key, scaling, in_copies):
- if prefix:
- # e.g. Compute0, 1, Compute1Foo
- in_copies[prefix] = prefix[:-1] + str(copy_num)
- if isinstance(value, (dict, list)):
- new_value = apply_scaling(value, scaling, in_copies)
- new_template[new_key] = new_value
- else:
- new_values = list(scale_value(value, scaling, in_copies))
- # We have nowhere to multiply a non-container value of a
- # dict, so it may be copied or unchanged but not scaled.
- assert len(new_values) == 1
- new_template[new_key] = new_values[0][2]
- if prefix:
- del in_copies[prefix]
- return new_template
- elif isinstance(template, list):
- new_template = []
- for value in template:
- if isinstance(value, (dict, list)):
- new_template.append(apply_scaling(value, scaling, in_copies))
- else:
- for _, _, new_value in scale_value(value, scaling, in_copies):
- new_template.append(new_value)
- return new_template
- else:
- raise Exception("apply_scaling called with non-container %r" % template)
-
-
-def scale_value(value, scaling, in_copies):
- """Scale out a value.
-
- :param value: The value to scale (not a container).
- :param scaling: The scaling map (prefix-> (copies, blacklist) to use.
- :param in_copies: What containers we're currently copying.
- :return: An iterator of the new values for the value as tuples:
- (prefix, copy_num, value). E.g. Compute0, 1, Compute1Foo
- prefix and copy_num are only set when:
- - a prefix in scaling matches value
- - and that prefix is not in in_copies
- """
- if isinstance(value, (str, unicode)):
- for prefix, (copies, blacklist) in scaling.items():
- if not value.startswith(prefix):
- continue
- suffix = value[len(prefix):]
- if prefix in in_copies:
- # Adjust to the copy number we're on
- yield None, None, in_copies[prefix] + suffix
- return
- else:
- for n in range(copies):
- if n not in blacklist:
- yield prefix, n, prefix[:-1] + str(n) + suffix
- return
- yield None, None, value
- else:
- yield None, None, value
-
-
-def parse_scaling(scaling_args):
- """Translate a list of scaling requests to a dict prefix:count."""
- scaling_args = scaling_args or []
- result = {}
- for item in scaling_args:
- key, values = item.split('=')
- values = values.split(',')
- value = int(values[0])
- blacklist = frozenset(int(v) for v in values[1:] if v)
- result[key + '0'] = value, blacklist
- return result
-
-
-def _translate_role(role, master_role, slave_roles):
- if not master_role:
- return role
- if role == master_role:
- return role
- if role not in slave_roles:
- return role
- return master_role
-
-def translate_role(role, master_role, slave_roles):
- r = _translate_role(role, master_role, slave_roles)
- if not isinstance(r, basestring):
- raise Exception('%s -> %r' % (role, r))
- return r
-
-def resolve_params(item, param, value):
- if item in ({lang.get_param: param}, {lang.get_resource: param}):
- return value
- if isinstance(item, dict):
- copy_item = dict(item)
- for k, v in iter(copy_item.items()):
- item[k] = resolve_params(v, param, value)
- elif isinstance(item, list):
- copy_item = list(item)
- new_item = []
- for v in copy_item:
- new_item.append(resolve_params(v, param, value))
- item = new_item
- return item
-
-MERGABLE_TYPES = {'OS::Nova::Server':
- {'image': 'image'},
- 'AWS::EC2::Instance':
- {'image': 'ImageId'},
- 'AWS::AutoScaling::LaunchConfiguration':
- {},
- }
-INCLUDED_TEMPLATE_DIR = os.getcwd()
-
-
-def resolve_includes(template, params=None):
- new_template = {}
- if params is None:
- params = {}
- for key, value in iter(template.items()):
- if key == '__include__':
- new_params = dict(params) # do not propagate up the stack
- if not isinstance(value, dict):
- raise ValueError('__include__ must be a mapping')
- if 'path' not in value:
- raise ValueError('__include__ must have path')
- if 'params' in value:
- if not isinstance(value['params'], dict):
- raise ValueError('__include__ params must be a mapping')
- new_params.update(value['params'])
- with open(value['path']) as include_file:
- sub_template = yaml.safe_load(include_file.read())
- if 'subkey' in value:
- if ((not isinstance(value['subkey'], int)
- and not isinstance(sub_template, dict))):
- raise RuntimeError('subkey requires mapping root or'
- ' integer for list root')
- sub_template = sub_template[value['subkey']]
- for k, v in iter(new_params.items()):
- sub_template = resolve_params(sub_template, k, v)
- new_template.update(resolve_includes(sub_template))
- else:
- if isinstance(value, dict):
- new_template[key] = resolve_includes(value)
- else:
- new_template[key] = value
- return new_template
-
-def main(argv=None):
- if argv is None:
- argv = sys.argv[1:]
- parser = argparse.ArgumentParser()
- parser.add_argument('templates', nargs='+')
- parser.add_argument('--master-role', nargs='?',
- help='Translate slave_roles to this')
- parser.add_argument('--slave-roles', nargs='*',
- help='Translate all of these to master_role')
- parser.add_argument('--included-template-dir', nargs='?',
- default=INCLUDED_TEMPLATE_DIR,
- help='Path for resolving included templates')
- parser.add_argument('--output',
- help='File to write output to. - for stdout',
- default='-')
- parser.add_argument('--scale', action="append",
- help="Names to scale out. Pass Prefix=2 to cause a key Prefix0Foo to "
- "be copied to Prefix1Foo in the output, and value Prefix0Bar to be"
- "renamed to Prefix1Bar inside that copy, or copied to Prefix1Bar "
- "outside of any copy. Pass Prefix=3,1 to cause Prefix1* to be elided"
- "when scaling Prefix out. Prefix=4,1,2 will likewise elide Prefix1 and"
- "Prefix2.")
- parser.add_argument(
- '--change-image-params', action='store_true', default=False,
- help="Change parameters in templates to match resource names. This was "
- " the default at one time but it causes issues when parameter "
- " names need to remain stable.")
- parser.add_argument(
- '--hot', action='store_true', default=False,
- help="Assume source templates are in the HOT format, and generate a "
- "HOT template artifact.")
- args = parser.parse_args(argv)
- if args.hot:
- global lang
- lang = Hot()
-
- templates = args.templates
- scaling = parse_scaling(args.scale)
- merged_template = merge(templates, args.master_role, args.slave_roles,
- args.included_template_dir, scaling=scaling,
- change_image_params=args.change_image_params)
- if args.output == '-':
- out_file = sys.stdout
- else:
- out_file = file(args.output, 'wt')
- out_file.write(merged_template)
-
-
-def merge(templates, master_role=None, slave_roles=None,
- included_template_dir=INCLUDED_TEMPLATE_DIR,
- scaling=None, change_image_params=None):
- scaling = scaling or {}
- errors = []
- end_template = dict(lang.base_template)
- resource_changes=[]
- for template_path in templates:
- template = yaml.safe_load(open(template_path))
- # Resolve __include__ tags
- template = resolve_includes(template)
- end_template[lang.description].append(template.get(lang.description,
- template_path))
- new_parameters = template.get(lang.parameters, {})
- for p, pbody in sorted(new_parameters.items()):
- if p in end_template.get(lang.parameters, {}):
- if pbody != end_template[lang.parameters][p]:
- errors.append('Parameter %s from %s conflicts.' % (p,
- template_path))
- continue
- if lang.parameters not in end_template:
- end_template[lang.parameters] = {}
- end_template[lang.parameters][p] = pbody
-
- new_outputs = template.get(lang.outputs, {})
- for o, obody in sorted(new_outputs.items()):
- if o in end_template.get(lang.outputs, {}):
- if pbody != end_template[lang.outputs][p]:
- errors.append('Output %s from %s conflicts.' % (o,
- template_path))
- continue
- if lang.outputs not in end_template:
- end_template[lang.outputs] = {}
- end_template[lang.outputs][o] = obody
-
- new_resources = template.get(lang.resources, {})
- for r, rbody in sorted(new_resources.items()):
- if rbody[lang.type] in MERGABLE_TYPES:
- if change_image_params:
- if 'image' in MERGABLE_TYPES[rbody[lang.type]]:
- image_key = MERGABLE_TYPES[rbody[lang.type]]['image']
- # XXX Assuming ImageId is always a Ref
- ikey_val = end_template[lang.parameters][rbody[lang.properties][image_key][lang.get_param]]
- del end_template[lang.parameters][rbody[lang.properties][image_key][lang.get_param]]
- if 'OpenStack::Role' in rbody.get(lang.metadata, {}):
- sys.stderr.write("WARNING: OpenStack::Role is deprecated"
- " and will be removed in a later release\n");
- role = rbody.get(lang.metadata, {}).get('OpenStack::Role', r)
- role = translate_role(role, master_role, slave_roles)
- if role != r:
- resource_changes.append((r, role))
- if role in end_template.get(lang.resources, {}):
- new_metadata = rbody.get(lang.metadata, {})
- for m, mbody in iter(new_metadata.items()):
- if m in end_template[lang.resources][role].get(lang.metadata, {}):
- if m == 'OpenStack::ImageBuilder::Elements':
- end_template[lang.resources][role][lang.metadata][m].extend(mbody)
- sys.stderr.write(
- "WARNING: OpenStack::ImageBuilder::Elements"
- " is deprecated and will be removed in a"
- " later release\n");
- continue
- if mbody != end_template[lang.resources][role][lang.metadata][m]:
- errors.append('Role %s metadata key %s conflicts.' %
- (role, m))
- continue
- role_res = end_template[lang.resources][role]
- if role_res[lang.type] == 'OS::Heat::StructuredConfig':
- end_template[lang.resources][role][lang.properties]['config'][m] = mbody
- else:
- end_template[lang.resources][role][lang.metadata][m] = mbody
- continue
- if lang.resources not in end_template:
- end_template[lang.resources] = {}
- end_template[lang.resources][role] = rbody
- if change_image_params:
- if 'image' in MERGABLE_TYPES[rbody[lang.type]]:
- ikey = '%sImage' % (role)
- end_template[lang.resources][role][lang.properties][image_key] = {lang.get_param: ikey}
- end_template[lang.parameters][ikey] = ikey_val
- elif rbody[lang.type] == 'FileInclude':
- # we trust os.path.join to DTRT: if FileInclude path isn't
- # absolute, join to included_template_dir (./)
- with open(os.path.join(included_template_dir, rbody['Path'])) as rfile:
- include_content = yaml.safe_load(rfile.read())
- subkeys = rbody.get('SubKey','').split('.')
- while len(subkeys) and subkeys[0]:
- include_content = include_content[subkeys.pop(0)]
- for replace_param, replace_value in iter(rbody.get(lang.parameters,
- {}).items()):
- include_content = resolve_params(include_content,
- replace_param,
- replace_value)
- if lang.resources not in end_template:
- end_template[lang.resources] = {}
- end_template[lang.resources][r] = include_content
- else:
- if r in end_template.get(lang.resources, {}):
- if rbody != end_template[lang.resources][r]:
- errors.append('Resource %s from %s conflicts' % (r,
- template_path))
- continue
- if lang.resources not in end_template:
- end_template[lang.resources] = {}
- end_template[lang.resources][r] = rbody
-
- end_template = apply_scaling(end_template, scaling)
- end_template = apply_maps(end_template)
-
- def fix_ref(item, old, new):
- if isinstance(item, dict):
- copy_item = dict(item)
- for k, v in sorted(copy_item.items()):
- if k == lang.get_resource and v == old:
- item[k] = new
- continue
- if k == lang.depends_on and v == old:
- item[k] = new
- continue
- if k == lang.get_attr and isinstance(v, list) and v[0] == old:
- new_list = list(v)
- new_list[0] = new
- item[k] = new_list
- continue
- if k == 'AllowedResources' and isinstance(v, list) and old in v:
- while old in v:
- pos = v.index(old)
- v[pos] = new
- continue
- fix_ref(v, old, new)
- elif isinstance(item, list):
- copy_item = list(item)
- for v in item:
- fix_ref(v, old, new)
-
- for change in resource_changes:
- fix_ref(end_template, change[0], change[1])
-
- if errors:
- for e in errors:
- sys.stderr.write("ERROR: %s\n" % e)
- end_template[lang.description] = ','.join(end_template[lang.description])
- return yaml.safe_dump(end_template, default_flow_style=False)
-
-if __name__ == "__main__":
- main()