aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--environments/manage-firewall.yaml2
-rw-r--r--environments/updates/README.md9
-rw-r--r--environments/updates/update-from-keystone-admin-internal-api.yaml33
-rwxr-xr-xextraconfig/tasks/yum_update.sh128
-rw-r--r--overcloud-without-mergepy.yaml10
-rw-r--r--puppet/ceph-storage.yaml7
-rw-r--r--puppet/cinder-storage.yaml6
-rw-r--r--puppet/compute.yaml6
-rw-r--r--puppet/controller.yaml19
-rw-r--r--puppet/hieradata/compute.yaml6
-rw-r--r--puppet/hieradata/controller.yaml106
-rw-r--r--puppet/manifests/overcloud_cephstorage.pp1
-rw-r--r--puppet/manifests/overcloud_compute.pp1
-rw-r--r--puppet/manifests/overcloud_controller.pp1
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp1
-rw-r--r--puppet/manifests/overcloud_object.pp1
-rw-r--r--puppet/manifests/overcloud_volume.pp1
-rw-r--r--puppet/swift-storage.yaml7
18 files changed, 265 insertions, 80 deletions
diff --git a/environments/manage-firewall.yaml b/environments/manage-firewall.yaml
new file mode 100644
index 00000000..071f4108
--- /dev/null
+++ b/environments/manage-firewall.yaml
@@ -0,0 +1,2 @@
+parameters:
+ ManageFirewall: true
diff --git a/environments/updates/README.md b/environments/updates/README.md
new file mode 100644
index 00000000..8c03411d
--- /dev/null
+++ b/environments/updates/README.md
@@ -0,0 +1,9 @@
+This directory contains Heat environment file snippets which can
+be used to ensure smooth updates of the Overcloud.
+
+Contents
+--------
+
+**update-from-keystone-admin-internal-api.yaml**
+ To be used if the Keystone Admin API was originally deployed on the
+ Internal API network.
diff --git a/environments/updates/update-from-keystone-admin-internal-api.yaml b/environments/updates/update-from-keystone-admin-internal-api.yaml
new file mode 100644
index 00000000..3c71ef1b
--- /dev/null
+++ b/environments/updates/update-from-keystone-admin-internal-api.yaml
@@ -0,0 +1,33 @@
+# This environment file provides a default value for ServiceNetMap where
+# Keystone Admin API service is running on the Internal API network
+
+parameters:
+ ServiceNetMap:
+ NeutronTenantNetwork: tenant
+ CeilometerApiNetwork: internal_api
+ MongoDbNetwork: internal_api
+ CinderApiNetwork: internal_api
+ CinderIscsiNetwork: storage
+ GlanceApiNetwork: storage
+ GlanceRegistryNetwork: internal_api
+ KeystoneAdminApiNetwork: internal_api
+ KeystonePublicApiNetwork: internal_api
+ NeutronApiNetwork: internal_api
+ HeatApiNetwork: internal_api
+ NovaApiNetwork: internal_api
+ NovaMetadataNetwork: internal_api
+ NovaVncProxyNetwork: internal_api
+ SwiftMgmtNetwork: storage_mgmt
+ SwiftProxyNetwork: storage
+ HorizonNetwork: internal_api
+ MemcachedNetwork: internal_api
+ RabbitMqNetwork: internal_api
+ RedisNetwork: internal_api
+ MysqlNetwork: internal_api
+ CephClusterNetwork: storage_mgmt
+ CephPublicNetwork: storage
+ ControllerHostnameResolveNetwork: internal_api
+ ComputeHostnameResolveNetwork: internal_api
+ BlockStorageHostnameResolveNetwork: internal_api
+ ObjectStorageHostnameResolveNetwork: internal_api
+ CephStorageHostnameResolveNetwork: storage
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index fa523e83..6ab2501c 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -22,7 +22,7 @@ mkdir -p $timestamp_dir
update_identifier=${update_identifier//[^a-zA-Z0-9-_]/}
# seconds to wait for this node to rejoin the cluster after update
-cluster_start_timeout=360
+cluster_start_timeout=600
galera_sync_timeout=360
timestamp_file="$timestamp_dir/$update_identifier"
@@ -42,109 +42,81 @@ if [[ "$list_updates" == "" ]]; then
fi
pacemaker_status=$(systemctl is-active pacemaker)
+pacemaker_dumpfile=$(mktemp)
if [[ "$pacemaker_status" == "active" ]] ; then
- echo "Checking for and adding missing constraints"
+ echo "Dumping Pacemaker config"
+ pcs cluster cib $pacemaker_dumpfile
+
+ echo "Checking for missing constraints"
if ! pcs constraint order show | grep "start openstack-nova-novncproxy-clone then start openstack-nova-api-clone"; then
- pcs constraint order start openstack-nova-novncproxy-clone then openstack-nova-api-clone
+ pcs -f $pacemaker_dumpfile constraint order start openstack-nova-novncproxy-clone then openstack-nova-api-clone
fi
if ! pcs constraint order show | grep "start rabbitmq-clone then start openstack-keystone-clone"; then
- pcs constraint order start rabbitmq-clone then openstack-keystone-clone
+ pcs -f $pacemaker_dumpfile constraint order start rabbitmq-clone then openstack-keystone-clone
fi
if ! pcs constraint order show | grep "promote galera-master then start openstack-keystone-clone"; then
- pcs constraint order promote galera-master then openstack-keystone-clone
+ pcs -f $pacemaker_dumpfile constraint order promote galera-master then openstack-keystone-clone
fi
if ! pcs constraint order show | grep "start haproxy-clone then start openstack-keystone-clone"; then
- pcs constraint order start haproxy-clone then openstack-keystone-clone
+ pcs -f $pacemaker_dumpfile constraint order start haproxy-clone then openstack-keystone-clone
fi
if ! pcs constraint order show | grep "start memcached-clone then start openstack-keystone-clone"; then
- pcs constraint order start memcached-clone then openstack-keystone-clone
+ pcs -f $pacemaker_dumpfile constraint order start memcached-clone then openstack-keystone-clone
fi
if ! pcs constraint order show | grep "promote redis-master then start openstack-ceilometer-central-clone"; then
- pcs constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false
+ pcs -f $pacemaker_dumpfile constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false
fi
if ! pcs resource defaults | grep "resource-stickiness: INFINITY"; then
- pcs resource defaults resource-stickiness=INFINITY
+ pcs -f $pacemaker_dumpfile resource defaults resource-stickiness=INFINITY
fi
echo "Setting resource start/stop timeouts"
-
- # timeouts for non-openstack services and special cases
- pcs resource update haproxy op start timeout=100s
- pcs resource update haproxy op stop timeout=100s
- # mongod start timeout is also higher, setting only stop timeout
+ SERVICES="
+haproxy
+memcached
+httpd
+neutron-dhcp-agent
+neutron-l3-agent
+neutron-metadata-agent
+neutron-openvswitch-agent
+neutron-server
+openstack-ceilometer-alarm-evaluator
+openstack-ceilometer-alarm-notifier
+openstack-ceilometer-api
+openstack-ceilometer-central
+openstack-ceilometer-collector
+openstack-ceilometer-notification
+openstack-cinder-api
+openstack-cinder-scheduler
+openstack-cinder-volume
+openstack-glance-api
+openstack-glance-registry
+openstack-heat-api
+openstack-heat-api-cfn
+openstack-heat-api-cloudwatch
+openstack-heat-engine
+openstack-keystone
+openstack-nova-api
+openstack-nova-conductor
+openstack-nova-consoleauth
+openstack-nova-novncproxy
+openstack-nova-scheduler"
+ for service in $SERVICES; do
+ pcs -f $pacemaker_dumpfile resource update $service op start timeout=100s op stop timeout=100s
+ done
+ # mongod start timeout is higher, setting only stop timeout
pcs resource update mongod op stop timeout=100s
- # rabbit start timeout is already 100s
- pcs resource update rabbitmq op stop timeout=100s
- pcs resource update memcached op start timeout=100s
- pcs resource update memcached op stop timeout=100s
- pcs resource update httpd op start timeout=100s
- pcs resource update httpd op stop timeout=100s
- # neutron-netns-cleanup stop timeout is 300s, setting only start timeout
- pcs resource update neutron-netns-cleanup op start timeout=100s
- # neutron-ovs-cleanup stop timeout is 300s, setting only start timeout
- pcs resource update neutron-ovs-cleanup op start timeout=100s
-
- # timeouts for openstack services
- pcs resource update neutron-dhcp-agent op start timeout=100s
- pcs resource update neutron-dhcp-agent op stop timeout=100s
- pcs resource update neutron-l3-agent op start timeout=100s
- pcs resource update neutron-l3-agent op stop timeout=100s
- pcs resource update neutron-metadata-agent op start timeout=100s
- pcs resource update neutron-metadata-agent op stop timeout=100s
- pcs resource update neutron-openvswitch-agent op start timeout=100s
- pcs resource update neutron-openvswitch-agent op stop timeout=100s
- pcs resource update neutron-server op start timeout=100s
- pcs resource update neutron-server op stop timeout=100s
- pcs resource update openstack-ceilometer-alarm-evaluator op start timeout=100s
- pcs resource update openstack-ceilometer-alarm-evaluator op stop timeout=100s
- pcs resource update openstack-ceilometer-alarm-notifier op start timeout=100s
- pcs resource update openstack-ceilometer-alarm-notifier op stop timeout=100s
- pcs resource update openstack-ceilometer-api op start timeout=100s
- pcs resource update openstack-ceilometer-api op stop timeout=100s
- pcs resource update openstack-ceilometer-central op start timeout=100s
- pcs resource update openstack-ceilometer-central op stop timeout=100s
- pcs resource update openstack-ceilometer-collector op start timeout=100s
- pcs resource update openstack-ceilometer-collector op stop timeout=100s
- pcs resource update openstack-ceilometer-notification op start timeout=100s
- pcs resource update openstack-ceilometer-notification op stop timeout=100s
- pcs resource update openstack-cinder-api op start timeout=100s
- pcs resource update openstack-cinder-api op stop timeout=100s
- pcs resource update openstack-cinder-scheduler op start timeout=100s
- pcs resource update openstack-cinder-scheduler op stop timeout=100s
- pcs resource update openstack-cinder-volume op start timeout=100s
- pcs resource update openstack-cinder-volume op stop timeout=100s
- pcs resource update openstack-glance-api op start timeout=100s
- pcs resource update openstack-glance-api op stop timeout=100s
- pcs resource update openstack-glance-registry op start timeout=100s
- pcs resource update openstack-glance-registry op stop timeout=100s
- pcs resource update openstack-heat-api op start timeout=100s
- pcs resource update openstack-heat-api op stop timeout=100s
- pcs resource update openstack-heat-api-cfn op start timeout=100s
- pcs resource update openstack-heat-api-cfn op stop timeout=100s
- pcs resource update openstack-heat-api-cloudwatch op start timeout=100s
- pcs resource update openstack-heat-api-cloudwatch op stop timeout=100s
- pcs resource update openstack-heat-engine op start timeout=100s
- pcs resource update openstack-heat-engine op stop timeout=100s
- pcs resource update openstack-keystone op start timeout=100s
- pcs resource update openstack-keystone op stop timeout=100s
- pcs resource update openstack-nova-api op start timeout=100s
- pcs resource update openstack-nova-api op stop timeout=100s
- pcs resource update openstack-nova-conductor op start timeout=100s
- pcs resource update openstack-nova-conductor op stop timeout=100s
- pcs resource update openstack-nova-consoleauth op start timeout=100s
- pcs resource update openstack-nova-consoleauth op stop timeout=100s
- pcs resource update openstack-nova-novncproxy op start timeout=100s
- pcs resource update openstack-nova-novncproxy op stop timeout=100s
- pcs resource update openstack-nova-scheduler op start timeout=100s
- pcs resource update openstack-nova-scheduler op stop timeout=100s
+
+ echo "Applying new Pacemaker config"
+ pcs cluster cib-push $pacemaker_dumpfile
echo "Pacemaker running, stopping cluster node and doing full package update"
node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")
diff --git a/overcloud-without-mergepy.yaml b/overcloud-without-mergepy.yaml
index f679c6bb..8efdc173 100644
--- a/overcloud-without-mergepy.yaml
+++ b/overcloud-without-mergepy.yaml
@@ -457,6 +457,14 @@ parameters:
type: string
constraints:
- allowed_values: [ 'basic', 'cadf' ]
+ ManageFirewall:
+ default: false
+ description: Whether to manage IPtables rules.
+ type: boolean
+ PurgeFirewallRules:
+ default: false
+ description: Whether IPtables rules should be purged before setting up the ones.
+ type: boolean
MysqlInnodbBufferPoolSize:
description: >
Specifies the size of the buffer pool in megabytes. Setting to
@@ -811,6 +819,8 @@ resources:
ControllerExtraConfig: {get_param: controllerExtraConfig}
Debug: {get_param: Debug}
EnableFencing: {get_param: EnableFencing}
+ ManageFirewall: {get_param: ManageFirewall}
+ PurgeFirewallRules: {get_param: PurgeFirewallRules}
EnableGalera: {get_param: EnableGalera}
EnableCephStorage: {get_param: ControllerEnableCephStorage}
EnableSwiftStorage: {get_param: ControllerEnableSwiftStorage}
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index 0d968504..0bf0fde4 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -54,7 +54,11 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
-
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
resources:
CephStorage:
@@ -126,6 +130,7 @@ resources:
properties:
config: {get_resource: NetworkConfig}
server: {get_resource: CephStorage}
+ actions: {get_param: NetworkDeploymentActions}
CephStorageDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index b536418d..b500e43b 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -107,6 +107,11 @@ parameters:
MysqlVirtualIP:
type: string
default: ''
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
resources:
BlockStorage:
@@ -178,6 +183,7 @@ resources:
properties:
config: {get_resource: NetworkConfig}
server: {get_resource: BlockStorage}
+ actions: {get_param: NetworkDeploymentActions}
BlockStorageDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/compute.yaml b/puppet/compute.yaml
index c147a0f7..6ae39132 100644
--- a/puppet/compute.yaml
+++ b/puppet/compute.yaml
@@ -273,6 +273,11 @@ parameters:
Hostname:
type: string
default: '' # Defaults to Heat created hostname
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
resources:
@@ -347,6 +352,7 @@ resources:
properties:
config: {get_resource: NetworkConfig}
server: {get_resource: NovaCompute}
+ actions: {get_param: NetworkDeploymentActions}
input_values:
bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index 2e8c3126..d47e013e 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -278,6 +278,14 @@ parameters:
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ ManageFirewall:
+ default: false
+ description: Whether to manage IPtables rules.
+ type: boolean
+ PurgeFirewallRules:
+ default: false
+ description: Whether IPtables rules should be purged before setting up the new ones.
+ type: boolean
MysqlClusterUniquePart:
description: A unique identifier of the MySQL cluster the controller is in.
type: string
@@ -606,6 +614,11 @@ parameters:
Hostname:
type: string
default: '' # Defaults to Heat created hostname
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
resources:
@@ -702,6 +715,7 @@ resources:
properties:
config: {get_resource: NetworkConfig}
server: {get_resource: Controller}
+ actions: {get_param: NetworkDeploymentActions}
input_values:
bridge_name: br-ex
interface_name: {get_param: NeutronPublicInterface}
@@ -819,6 +833,8 @@ resources:
enable_galera: {get_param: EnableGalera}
enable_ceph_storage: {get_param: EnableCephStorage}
enable_swift_storage: {get_param: EnableSwiftStorage}
+ manage_firewall: {get_param: ManageFirewall}
+ purge_firewall_rules: {get_param: PurgeFirewallRules}
mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize}
mysql_max_connections: {get_param: MysqlMaxConnections}
mysql_root_password: {get_param: MysqlRootPassword}
@@ -1274,6 +1290,9 @@ resources:
# Redis
redis::bind: {get_input: redis_network}
redis_vip: {get_input: redis_vip}
+ # Firewall
+ tripleo::firewall::manage_firewall: {get_input: manage_firewall}
+ tripleo::firewall::purge_firewall_rules: {get_input: purge_firewall_rules}
# Misc
memcached::listen_ip: {get_input: memcached_network}
neutron_public_interface_ip: {get_input: neutron_public_interface_ip}
diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml
index 173020f8..fa8dcc81 100644
--- a/puppet/hieradata/compute.yaml
+++ b/puppet/hieradata/compute.yaml
@@ -12,6 +12,12 @@ nova::compute::libvirt::migration_support: true
nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
+# Changing the default from 512MB. The current templates can not deploy
+# overclouds with swap. On an idle compute node, we see ~1024MB of RAM
+# used. 2048 is suggested to account for other possible operations for
+# example openvswitch.
+nova::compute::reserved_host_memory: 2048
+
ceilometer::agent::auth::auth_tenant_name: 'service'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index a4914c0e..f42ddf6c 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -127,3 +127,109 @@ tripleo::loadbalancer::heat_cfn: true
tripleo::loadbalancer::horizon: true
controller_classes: []
+# firewall
+tripleo::firewall::firewall_rules:
+ '101 mongodb_config':
+ port: 27019
+ '102 mongodb_sharding':
+ port: 27018
+ '103 mongod':
+ port: 27017
+ '104 mysql galera':
+ port:
+ - 873
+ - 3306
+ - 4444
+ - 4567
+ - 4568
+ - 9200
+ '105 ntp':
+ port: 123
+ proto: udp
+ '106 vrrp':
+ proto: vrrp
+ '107 haproxy stats':
+ port: 1993
+ '108 redis':
+ port:
+ - 6379
+ - 26379
+ '109 rabbitmq':
+ port:
+ - 5672
+ - 35672
+ '110 ceph':
+ port:
+ - 6789
+ - '6800-6810'
+ '111 keystone':
+ port:
+ - 5000
+ - 13000
+ - 35357
+ - 13357
+ '112 glance':
+ port:
+ - 9292
+ - 9191
+ - 13292
+ '113 nova':
+ port:
+ - 6080
+ - 13080
+ - 8773
+ - 3773
+ - 8774
+ - 13774
+ - 8775
+ '114 neutron server':
+ port:
+ - 9696
+ - 13696
+ '115 neutron dhcp input':
+ proto: 'udp'
+ port: 67
+ '116 neutron dhcp output':
+ proto: 'udp'
+ chain: 'OUTPUT'
+ port: 68
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ port: 4789
+ '119 cinder':
+ port:
+ - 8776
+ - 13776
+ '120 iscsi initiator':
+ port: 3260
+ '121 memcached':
+ port: 11211
+ '122 swift proxy':
+ port:
+ - 8080
+ - 13808
+ '123 swift storage':
+ port:
+ - 873
+ - 6000
+ - 6001
+ - 6002
+ '124 ceilometer':
+ port:
+ - 8777
+ - 13777
+ '125 heat':
+ port:
+ - 8000
+ - 13800
+ - 8003
+ - 13003
+ - 8004
+ - 13004
+ '126 horizon':
+ port:
+ - 80
+ - 443
+ '127 snmp':
+ port: 161
+ proto: 'udp'
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
index 51f5e88d..7f8970cc 100644
--- a/puppet/manifests/overcloud_cephstorage.pp
+++ b/puppet/manifests/overcloud_cephstorage.pp
@@ -14,6 +14,7 @@
# under the License.
include ::tripleo::packages
+include ::tripleo::firewall
create_resources(sysctl::value, hiera('sysctl_settings'), {})
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index cd41cc79..58182346 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -14,6 +14,7 @@
# under the License.
include ::tripleo::packages
+include ::tripleo::firewall
create_resources(sysctl::value, hiera('sysctl_settings'), {})
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 570c43ba..f758c55a 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -14,6 +14,7 @@
# under the License.
include ::tripleo::packages
+include ::tripleo::firewall
if hiera('step') >= 1 {
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 2a3f1f92..95b7992c 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -19,6 +19,7 @@ Pcmk_resource <| |> {
}
include ::tripleo::packages
+include ::tripleo::firewall
if $::hostname == downcase(hiera('bootstrap_nodeid')) {
$pacemaker_master = true
diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp
index 5f0b4c82..1eabddf1 100644
--- a/puppet/manifests/overcloud_object.pp
+++ b/puppet/manifests/overcloud_object.pp
@@ -14,6 +14,7 @@
# under the License.
include ::tripleo::packages
+include ::tripleo::firewall
create_resources(sysctl::value, hiera('sysctl_settings'), {})
diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp
index 7f24959a..2bdd8a9c 100644
--- a/puppet/manifests/overcloud_volume.pp
+++ b/puppet/manifests/overcloud_volume.pp
@@ -14,6 +14,7 @@
# under the License.
include ::tripleo::packages
+include ::tripleo::firewall
create_resources(sysctl::value, hiera('sysctl_settings'), {})
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index 3d9b9018..19a7c7a3 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -77,7 +77,11 @@ parameters:
description: |
Role specific additional hiera configuration to inject into the cluster.
type: json
-
+ NetworkDeploymentActions:
+ type: comma_delimited_list
+ description: >
+ Heat action when to apply network configuration changes
+ default: ['CREATE']
resources:
@@ -149,6 +153,7 @@ resources:
properties:
config: {get_resource: NetworkConfig}
server: {get_resource: SwiftStorage}
+ actions: {get_param: NetworkDeploymentActions}
SwiftStorageHieraConfig:
type: OS::Heat::StructuredConfig