aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--environments/ceph-radosgw.yaml5
-rw-r--r--environments/hyperconverged-ceph.yaml12
-rw-r--r--environments/tls-endpoints-public-dns.yaml3
-rw-r--r--environments/tls-endpoints-public-ip.yaml3
-rw-r--r--extraconfig/all_nodes/mac_hostname.j2.yaml (renamed from extraconfig/all_nodes/mac_hostname.yaml)52
-rw-r--r--extraconfig/all_nodes/random_string.j2.yaml (renamed from extraconfig/all_nodes/random_string.yaml)14
-rw-r--r--extraconfig/all_nodes/swap-partition.j2.yaml44
-rw-r--r--extraconfig/all_nodes/swap-partition.yaml86
-rw-r--r--extraconfig/all_nodes/swap.j2.yaml58
-rw-r--r--extraconfig/all_nodes/swap.yaml104
-rwxr-xr-xextraconfig/tasks/major_upgrade_ceph_mon.sh8
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh2
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh104
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh68
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml22
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml (renamed from extraconfig/tasks/major_upgrade_pacemaker_init.yaml)78
-rw-r--r--extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml12
-rw-r--r--network/endpoints/endpoint_data.yaml15
-rw-r--r--network/endpoints/endpoint_map.yaml249
-rw-r--r--network/ports/external_from_pool_v6.yaml2
-rw-r--r--network/ports/internal_api_from_pool_v6.yaml2
-rw-r--r--network/ports/management_from_pool_v6.yaml2
-rw-r--r--network/ports/storage_from_pool_v6.yaml2
-rw-r--r--network/ports/storage_mgmt_from_pool_v6.yaml2
-rw-r--r--network/ports/tenant_from_pool_v6.yaml2
-rw-r--r--network/service_net_map.yaml1
-rw-r--r--overcloud-resource-registry-puppet.yaml1
-rw-r--r--overcloud.j2.yaml338
-rw-r--r--puppet/ceph-storage.yaml5
-rw-r--r--puppet/cinder-storage.yaml5
-rw-r--r--puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml14
-rw-r--r--puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml26
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml12
-rw-r--r--puppet/post.j2.yaml139
-rw-r--r--puppet/post.yaml644
-rw-r--r--puppet/services/aodh-api.yaml6
-rw-r--r--puppet/services/ceph-rgw.yaml77
-rw-r--r--puppet/services/glance-api.yaml1
-rw-r--r--puppet/services/keystone.yaml1
-rw-r--r--puppet/services/manila-api.yaml1
-rw-r--r--puppet/swift-storage.yaml5
-rw-r--r--roles_data.yaml11
42 files changed, 890 insertions, 1348 deletions
diff --git a/environments/ceph-radosgw.yaml b/environments/ceph-radosgw.yaml
new file mode 100644
index 00000000..a9221a2a
--- /dev/null
+++ b/environments/ceph-radosgw.yaml
@@ -0,0 +1,5 @@
+resource_registry:
+ OS::TripleO::Services::CephRgw: ../puppet/services/ceph-rgw.yaml
+ OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::SwiftStorage: OS::Heat::None
+ OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml
new file mode 100644
index 00000000..87ebb1d7
--- /dev/null
+++ b/environments/hyperconverged-ceph.yaml
@@ -0,0 +1,12 @@
+# If using an isolated StorageMgmt network, this will have to be uncommented to
+# plug the network on the compute nodes as well.
+#resource_registry:
+# OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+
+# Should match the default list of services for the compute node plus CephOSD
+parameter_defaults:
+ ComputeServices:
+ - OS::TripleO::Services::CephOSD
+
+parameter_merge_strategies:
+ ComputeServices: merge \ No newline at end of file
diff --git a/environments/tls-endpoints-public-dns.yaml b/environments/tls-endpoints-public-dns.yaml
index 7c8e850c..0a0996d3 100644
--- a/environments/tls-endpoints-public-dns.yaml
+++ b/environments/tls-endpoints-public-dns.yaml
@@ -8,6 +8,9 @@ parameter_defaults:
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
diff --git a/environments/tls-endpoints-public-ip.yaml b/environments/tls-endpoints-public-ip.yaml
index 80595c6c..5a2b8839 100644
--- a/environments/tls-endpoints-public-ip.yaml
+++ b/environments/tls-endpoints-public-ip.yaml
@@ -8,6 +8,9 @@ parameter_defaults:
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'}
diff --git a/extraconfig/all_nodes/mac_hostname.yaml b/extraconfig/all_nodes/mac_hostname.j2.yaml
index 7d8704e3..af6aa7f7 100644
--- a/extraconfig/all_nodes/mac_hostname.yaml
+++ b/extraconfig/all_nodes/mac_hostname.j2.yaml
@@ -9,15 +9,7 @@ description: >
# out-of-tree templates they may require additional parameters if the
# in-tree templates add a new role.
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
# Note extra parameters can be defined, then passed data via the
# environment parameter_defaults, without modifying the parent template
@@ -37,47 +29,17 @@ resources:
# FIXME(shardy): Long term it'd be better if Heat SoftwareDeployments accepted
# list instead of a map, then we could join the lists of servers into one
# deployment instead of requiring one deployment per-role.
- CollectMacDeploymentsController:
+{% for role in roles %}
+ CollectMacDeployments{{role.name}}:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsController
- servers: {get_param: controller_servers}
- config: {get_resource: CollectMacConfig}
- actions: ['CREATE'] # Only do this on CREATE
-
- CollectMacDeploymentsCompute:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: CollectMacDeploymentsCompute
- servers: {get_param: compute_servers}
- config: {get_resource: CollectMacConfig}
- actions: ['CREATE'] # Only do this on CREATE
-
- CollectMacDeploymentsBlockStorage:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: CollectMacDeploymentsBlockStorage
- servers: {get_param: blockstorage_servers}
- config: {get_resource: CollectMacConfig}
- actions: ['CREATE'] # Only do this on CREATE
-
- CollectMacDeploymentsObjectStorage:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: CollectMacDeploymentsObjectStorage
- servers: {get_param: objectstorage_servers}
- config: {get_resource: CollectMacConfig}
- actions: ['CREATE'] # Only do this on CREATE
-
- CollectMacDeploymentsCephStorage:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: CollectMacDeploymentsCephStorage
- servers: {get_param: cephstorage_servers}
+ servers: {get_param: servers, {{role.name}}}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
+{% endfor %}
- # Now we distribute all-the-macs to all nodes
+ # Now we distribute all-the-macs to all Controller nodes
DistributeMacConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -101,7 +63,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: DistributeMacDeploymentsController
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: DistributeMacConfig}
input_values:
# FIXME(shardy): It'd be more convenient if we could join these
diff --git a/extraconfig/all_nodes/random_string.yaml b/extraconfig/all_nodes/random_string.j2.yaml
index d38701e2..1c42cb85 100644
--- a/extraconfig/all_nodes/random_string.yaml
+++ b/extraconfig/all_nodes/random_string.j2.yaml
@@ -10,15 +10,7 @@ description: >
# out-of-tree templates they may require additional parameters if the
# in-tree templates add a new role.
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
# Note extra parameters can be defined, then passed data via the
# environment parameter_defaults, without modifying the parent template
@@ -42,7 +34,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: RandomDeploymentsController
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: RandomConfig}
actions: ['CREATE'] # Only do this on CREATE
input_values:
@@ -52,7 +44,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: RandomDeploymentsCompute
- servers: {get_param: compute_servers}
+ servers: {get_param: servers, Compute}
config: {get_resource: RandomConfig}
actions: ['CREATE'] # Only do this on CREATE
input_values:
diff --git a/extraconfig/all_nodes/swap-partition.j2.yaml b/extraconfig/all_nodes/swap-partition.j2.yaml
new file mode 100644
index 00000000..014a96a1
--- /dev/null
+++ b/extraconfig/all_nodes/swap-partition.j2.yaml
@@ -0,0 +1,44 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Extra config to add swap space to nodes.
+
+# Parameters passed from the parent template - note if you maintain
+# out-of-tree templates they may require additional parameters if the
+# in-tree templates add a new role.
+parameters:
+ servers:
+ type: json
+ swap_partition_label:
+ type: string
+ description: Swap partition label
+ default: 'swap1'
+
+
+resources:
+
+ SwapConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -eux
+ swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label)
+ swapon $swap_partition
+ echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
+ inputs:
+ - name: swap_partition_label
+ description: Swap partition label
+ default: 'swap1'
+
+{% for role in roles %}
+ {{role.name}}SwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: servers, {{role.name}}}
+ input_values:
+ swap_partition_label: {get_param: swap_partition_label}
+ actions: ["CREATE"]
+{% endfor %}
diff --git a/extraconfig/all_nodes/swap-partition.yaml b/extraconfig/all_nodes/swap-partition.yaml
deleted file mode 100644
index e6fa9eca..00000000
--- a/extraconfig/all_nodes/swap-partition.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Extra config to add swap space to nodes.
-
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
-parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
- type: json
- swap_partition_label:
- type: string
- description: Swap partition label
- default: 'swap1'
-
-
-resources:
-
- SwapConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: |
- #!/bin/bash
- set -eux
- swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label)
- swapon $swap_partition
- echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
- inputs:
- - name: swap_partition_label
- description: Swap partition label
- default: 'swap1'
-
- ControllerSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: controller_servers}
- input_values:
- swap_partition_label: {get_param: swap_partition_label}
- actions: ["CREATE"]
-
- ComputeSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: compute_servers}
- input_values:
- swap_partition_label: {get_param: swap_partition_label}
- actions: ["CREATE"]
-
- BlockStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: blockstorage_servers}
- input_values:
- swap_partition_label: {get_param: swap_partition_label}
- actions: ["CREATE"]
-
- ObjectStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: objectstorage_servers}
- input_values:
- swap_partition_label: {get_param: swap_partition_label}
- actions: ["CREATE"]
-
- CephStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: cephstorage_servers}
- input_values:
- swap_partition_label: {get_param: swap_partition_label}
- actions: ["CREATE"]
diff --git a/extraconfig/all_nodes/swap.j2.yaml b/extraconfig/all_nodes/swap.j2.yaml
new file mode 100644
index 00000000..97149080
--- /dev/null
+++ b/extraconfig/all_nodes/swap.j2.yaml
@@ -0,0 +1,58 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Extra config to add swap space to nodes.
+
+# Parameters passed from the parent template - note if you maintain
+# out-of-tree templates they may require additional parameters if the
+# in-tree templates add a new role.
+parameters:
+ servers:
+ type: json
+ swap_size_megabytes:
+ type: string
+ description: Amount of swap space to allocate in megabytes
+ default: '4096'
+ swap_path:
+ type: string
+ description: Full path to location of swap file
+ default: '/swap'
+
+
+resources:
+
+ SwapConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -eux
+ if [ ! -f $swap_path ]; then
+ dd if=/dev/zero of=$swap_path count=$swap_size_megabytes bs=1M
+ chmod 0600 $swap_path
+ mkswap $swap_path
+ swapon $swap_path
+ else
+ echo "$swap_path already exists"
+ fi
+ echo "$swap_path swap swap defaults 0 0" >> /etc/fstab
+ inputs:
+ - name: swap_size_megabytes
+ description: Amount of swap space to allocate in megabytes
+ default: '4096'
+ - name: swap_path
+ description: Full path to location of swap file
+ default: '/swap'
+
+{% for role in roles %}
+ {{role.name}}SwapDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: SwapConfig}
+ servers: {get_param: servers, {{role.name}}}
+ input_values:
+ swap_size_megabytes: {get_param: swap_size_megabytes}
+ swap_path: {get_param: swap_path}
+ actions: ["CREATE"]
+{% endfor %}
diff --git a/extraconfig/all_nodes/swap.yaml b/extraconfig/all_nodes/swap.yaml
deleted file mode 100644
index 5383ffc9..00000000
--- a/extraconfig/all_nodes/swap.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- Extra config to add swap space to nodes.
-
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
-parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
- type: json
- swap_size_megabytes:
- type: string
- description: Amount of swap space to allocate in megabytes
- default: '4096'
- swap_path:
- type: string
- description: Full path to location of swap file
- default: '/swap'
-
-
-resources:
-
- SwapConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: |
- #!/bin/bash
- set -eux
- if [ ! -f $swap_path ]; then
- dd if=/dev/zero of=$swap_path count=$swap_size_megabytes bs=1M
- chmod 0600 $swap_path
- mkswap $swap_path
- swapon $swap_path
- else
- echo "$swap_path already exists"
- fi
- echo "$swap_path swap swap defaults 0 0" >> /etc/fstab
- inputs:
- - name: swap_size_megabytes
- description: Amount of swap space to allocate in megabytes
- default: '4096'
- - name: swap_path
- description: Full path to location of swap file
- default: '/swap'
-
- ControllerSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: controller_servers}
- input_values:
- swap_size_megabytes: {get_param: swap_size_megabytes}
- swap_path: {get_param: swap_path}
- actions: ["CREATE"]
-
- ComputeSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: compute_servers}
- input_values:
- swap_size_megabytes: {get_param: swap_size_megabytes}
- swap_path: {get_param: swap_path}
- actions: ["CREATE"]
-
- BlockStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: blockstorage_servers}
- input_values:
- swap_size_megabytes: {get_param: swap_size_megabytes}
- swap_path: {get_param: swap_path}
- actions: ["CREATE"]
-
- ObjectStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: objectstorage_servers}
- input_values:
- swap_size_megabytes: {get_param: swap_size_megabytes}
- swap_path: {get_param: swap_path}
- actions: ["CREATE"]
-
- CephStorageSwapDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: SwapConfig}
- servers: {get_param: cephstorage_servers}
- input_values:
- swap_size_megabytes: {get_param: swap_size_megabytes}
- swap_path: {get_param: swap_path}
- actions: ["CREATE"]
diff --git a/extraconfig/tasks/major_upgrade_ceph_mon.sh b/extraconfig/tasks/major_upgrade_ceph_mon.sh
index b76dd7c3..21a2b5bc 100755
--- a/extraconfig/tasks/major_upgrade_ceph_mon.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_mon.sh
@@ -18,13 +18,13 @@ if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then
fi
CEPH_STATUS=$(ceph health | awk '{print $1}')
-if [ ${CEPH_STATUS} = HEALTH_ERR ]; do
+if [ ${CEPH_STATUS} = HEALTH_ERR ]; then
echo ERROR: Ceph cluster status is HEALTH_ERR, cannot be upgraded
exit 1
fi
# Useful when upgrading with OSDs num < replica size
-if [ $ignore_ceph_upgrade_warnings != "true" ]; then
+if [ ${ignore_ceph_upgrade_warnings:-false} != "true" ]; then
timeout 300 bash -c "while [ ${CEPH_STATUS} != HEALTH_OK ]; do
echo WARNING: Waiting for Ceph cluster status to go HEALTH_OK;
sleep 30;
@@ -44,7 +44,7 @@ timeout 60 bash -c "while kill -0 ${MON_PID} 2> /dev/null; do
done"
# Update to Jewel
-yum -y -q update ceph-mon
+yum -y -q update ceph-mon ceph
# Restart/Exit if not on Jewel, only in that case we need the changes
UPDATED_VERSION=$(ceph --version | awk '{print $3}')
@@ -54,7 +54,7 @@ if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
# RPM could own some of these but we can't take risks on the pre-existing files
for d in /var/lib/ceph/mon /var/log/ceph /var/run/ceph /etc/ceph; do
- chown -R ceph:ceph $d
+ chown -R ceph:ceph $d || echo WARNING: chown of $d failed
done
# Replay udev events with newer rules
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
index 03a1c1c2..dc80a724 100644
--- a/extraconfig/tasks/major_upgrade_ceph_storage.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -63,7 +63,7 @@ if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
# RPM could own some of these but we can't take risks on the pre-existing files
for d in /var/lib/ceph/osd /var/log/ceph /var/run/ceph /etc/ceph; do
- chown -R ceph:ceph $d
+ chown -R ceph:ceph $d || echo WARNING: chown of $d failed
done
# Replay udev events with newer rules
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
new file mode 100755
index 00000000..dc7ec71a
--- /dev/null
+++ b/extraconfig/tasks/major_upgrade_check.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+set -eu
+
+check_cluster()
+{
+ if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
+ echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
+ exit 1
+ fi
+}
+
+check_pcsd()
+{
+ if pcs status 2>&1 | grep -E 'Offline'; then
+ echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
+ exit 1
+ fi
+}
+
+check_disk_for_mysql_dump()
+{
+ # Where to backup current database if mysql need to be upgraded
+ MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
+ MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
+ # Spare disk ratio for extra safety
+ MYSQL_BACKUP_SIZE_RATIO=1.2
+
+ # Shall we upgrade mysql data directory during the stack upgrade?
+ if [ "$mariadb_do_major_upgrade" = "auto" ]; then
+ ret=$(is_mysql_upgrade_needed)
+ if [ $ret = "1" ]; then
+ DO_MYSQL_UPGRADE=1
+ else
+ DO_MYSQL_UPGRADE=0
+ fi
+ echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
+ elif [ "$mariadb_do_major_upgrade" = "no" ]; then
+ DO_MYSQL_UPGRADE=0
+ else
+ DO_MYSQL_UPGRADE=1
+ fi
+
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+
+ if [ -d "$MYSQL_BACKUP_DIR" ]; then
+ echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
+ exit 1
+ fi
+ mkdir "$MYSQL_BACKUP_DIR"
+ if [ $? -ne 0 ]; then
+ echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
+ exit 1
+ fi
+
+ # the /root/.my.cnf is needed because we set the mysql root
+ # password from liberty onwards
+ backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
+ # While not ideal, this step allows us to calculate exactly how much space the dump
+ # will need. Our main goal here is avoiding any chance of corruption due to disk space
+ # exhaustion
+ backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
+ database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
+ free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
+
+ # we need at least space for a new mysql database + dump of the existing one,
+ # times a small factor for additional safety room
+ # note: bash doesn't do floating point math or floats in if statements,
+ # so use python to apply the ratio and cast it back to integer
+ required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
+ if [ $required_space -ge $free_space ]; then
+ echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
+ exit 1
+ fi
+ fi
+ fi
+}
+
+check_python_rpm()
+{
+ # If for some reason rpm-python are missing we want to error out early enough
+ if ! rpm -q rpm-python &> /dev/null; then
+ echo_error "ERROR: upgrade cannot start without rpm-python installed"
+ exit 1
+ fi
+}
+
+check_clean_cluster()
+{
+ if crm_mon -1 | grep -A3 Failed; then
+ echo_error "ERROR: upgrade cannot start with failed resources on the cluster. Clean them up before starting: pcs resource cleanup."
+ exit 1
+ fi
+}
+
+check_galera_root_password()
+{
+ # BZ: 1357112
+ if [ ! -e /root/.my.cnf ]; then
+ echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
+ exit 1
+ fi
+}
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
index 0b702630..e81ca086 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
@@ -4,11 +4,12 @@ set -eu
cluster_sync_timeout=1800
-if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
-fi
-
+check_cluster
+check_pcsd
+check_clean_cluster
+check_python_rpm
+check_galera_root_password
+check_disk_for_mysql_dump
# We want to disable fencing during the cluster --stop as it might fence
# nodes where a service fails to stop, which could be fatal during an upgrade
@@ -17,12 +18,6 @@ fi
STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
pcs property set stonith-enabled=false
-# If for some reason rpm-python are missing we want to error out early enough
-if ! rpm -q rpm-python &> /dev/null; then
- echo_error "ERROR: upgrade cannot start without rpm-python installed"
- exit 1
-fi
-
# In case the mysql package is updated, the database on disk must be
# upgraded as well. This typically needs to happen during major
# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
@@ -35,59 +30,8 @@ fi
# on mysql package versionning, but this can be overriden manually
# to support specific upgrade scenario
-# Where to backup current database if mysql need to be upgraded
-MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
-MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
-# Spare disk ratio for extra safety
-MYSQL_BACKUP_SIZE_RATIO=1.2
-
-# Shall we upgrade mysql data directory during the stack upgrade?
-if [ "$mariadb_do_major_upgrade" = "auto" ]; then
- ret=$(is_mysql_upgrade_needed)
- if [ $ret = "1" ]; then
- DO_MYSQL_UPGRADE=1
- else
- DO_MYSQL_UPGRADE=0
- fi
- echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
-elif [ "$mariadb_do_major_upgrade" = "no" ]; then
- DO_MYSQL_UPGRADE=0
-else
- DO_MYSQL_UPGRADE=1
-fi
-
if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d "$MYSQL_BACKUP_DIR" ]; then
- echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
- exit 1
- fi
- mkdir "$MYSQL_BACKUP_DIR"
- if [ $? -ne 0 ]; then
- echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
- exit 1
- fi
-
- # the /root/.my.cnf is needed because we set the mysql root
- # password from liberty onwards
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- # While not ideal, this step allows us to calculate exactly how much space the dump
- # will need. Our main goal here is avoiding any chance of corruption due to disk space
- # exhaustion
- backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
- database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
- free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
- # we need at least space for a new mysql database + dump of the existing one,
- # times a small factor for additional safety room
- # note: bash doesn't do floating point math or floats in if statements,
- # so use python to apply the ratio and cast it back to integer
- required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
- if [ $required_space -ge $free_space ]; then
- echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
- exit 1
- fi
-
mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
fi
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index 598d22d0..7244f949 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -1,16 +1,8 @@
-heat_template_version: 2014-10-16
+heat_template_version: 2016-10-14
description: 'Upgrade for Pacemaker deployments'
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
input_values:
type: json
@@ -54,9 +46,10 @@ resources:
CephMonUpgradeDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: CephMonUpgradeConfig}
input_values: {get_param: input_values}
+ update_policy:
batch_create:
max_batch_size: 1
rolling_update:
@@ -82,6 +75,7 @@ resources:
params:
MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_check.sh
- get_file: major_upgrade_pacemaker_migrations.sh
- get_file: major_upgrade_controller_pacemaker_1.sh
@@ -89,7 +83,7 @@ resources:
type: OS::Heat::SoftwareDeploymentGroup
depends_on: CephMonUpgradeDeployment
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
input_values: {get_param: input_values}
@@ -103,7 +97,7 @@ resources:
BlockStorageUpgradeDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: blockstorage_servers}
+ servers: {get_param: servers, BlockStorage}
config: {get_resource: BlockStorageUpgradeConfig}
input_values: {get_param: input_values}
@@ -122,7 +116,7 @@ resources:
type: OS::Heat::SoftwareDeploymentGroup
depends_on: BlockStorageUpgradeDeployment
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
index 623549a0..f6aa3066 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
@@ -3,15 +3,7 @@ description: 'Upgrade for Pacemaker deployments'
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
input_values:
type: json
@@ -43,45 +35,12 @@ resources:
- "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- get_param: UpgradeInitCommand
- UpgradeInitControllerDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: controller_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitComputeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: compute_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitBlockStorageDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: blockstorage_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitObjectStorageDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: objectstorage_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
- UpgradeInitCephStorageDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: cephstorage_servers}
- config: {get_resource: UpgradeInitConfig}
- input_values: {get_param: input_values}
-
# TODO(jistr): for Mitaka->Newton upgrades and further we can use
# map_merge with input_values instead of feeding params into scripts
# via str_replace on bash snippets
+ # FIXME(shardy) we have hard-coded per-role *ScriptConfig's here
+ # Would be better to have a common config for all roles
ComputeDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
@@ -97,35 +56,32 @@ resources:
UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- get_file: major_upgrade_compute.sh
- ComputeDeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: compute_servers}
- config: {get_resource: ComputeDeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
-
ObjectStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: major_upgrade_object_storage.sh}
- ObjectStorageDeliverUpgradeScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: objectstorage_servers}
- config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig}
- input_values: {get_param: input_values}
-
CephStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: major_upgrade_ceph_storage.sh}
- CephStorageDeliverUpgradeScriptDeployment:
+{% for role in roles %}
+ UpgradeInit{{role.name}}Deployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: UpgradeInitConfig}
+ input_values: {get_param: input_values}
+
+ {% if not role.name in ['Controller', 'BlockStorage'] %}
+ {{role.name}}DeliverUpgradeScriptDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: cephstorage_servers}
- config: {get_resource: CephStorageDeliverUpgradeScriptConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig}
input_values: {get_param: input_values}
+ {% endif %}
+{% endfor %}
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
index 9414ac19..91406fba 100644
--- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
+++ b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
@@ -4,15 +4,7 @@ description: >
Software-config for performing aodh data migration
parameters:
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
input_values:
type: json
@@ -28,6 +20,6 @@ resources:
AodhMysqlMigrationScriptDeployment:
type: OS::Heat::SoftwareDeploymentGroup
properties:
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: AodhMysqlMigrationScriptConfig}
input_values: {get_param: input_values}
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
index 84b03c7e..fb01925b 100644
--- a/network/endpoints/endpoint_data.yaml
+++ b/network/endpoints/endpoint_data.yaml
@@ -199,6 +199,21 @@ Swift:
S3:
port: 8080
+CephRgw:
+ Internal:
+ net_param: CephRgw
+ uri_suffixes:
+ '': /swift/v1
+ Public:
+ net_param: Public
+ uri_suffixes:
+ '': /swift/v1
+ Admin:
+ net_param: CephRgw
+ uri_suffixes:
+ '': /swift/v1
+ port: 8080
+
Sahara:
Internal:
net_param: SaharaApi
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index dd29bcde..734b6431 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -25,6 +25,9 @@ parameters:
CeilometerAdmin: {protocol: http, port: '8777', host: IP_ADDRESS}
CeilometerInternal: {protocol: http, port: '8777', host: IP_ADDRESS}
CeilometerPublic: {protocol: http, port: '8777', host: IP_ADDRESS}
+ CephRgwAdmin: {protocol: http, port: '8080', host: IP_ADDRESS}
+ CephRgwInternal: {protocol: http, port: '8080', host: IP_ADDRESS}
+ CephRgwPublic: {protocol: http, port: '8080', host: IP_ADDRESS}
CinderAdmin: {protocol: http, port: '8776', host: IP_ADDRESS}
CinderInternal: {protocol: http, port: '8776', host: IP_ADDRESS}
CinderPublic: {protocol: http, port: '8776', host: IP_ADDRESS}
@@ -563,6 +566,252 @@ outputs:
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, CeilometerPublic, port]
+ CephRgwAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ port:
+ get_param: [EndpointMap, CephRgwAdmin, port]
+ protocol:
+ get_param: [EndpointMap, CephRgwAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwAdmin, port]
+ - /swift/v1
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwAdmin, port]
+ CephRgwInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ port:
+ get_param: [EndpointMap, CephRgwInternal, port]
+ protocol:
+ get_param: [EndpointMap, CephRgwInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwInternal, port]
+ - /swift/v1
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, CephRgwNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, CephRgwNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwInternal, port]
+ CephRgwPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, CephRgwPublic, port]
+ protocol:
+ get_param: [EndpointMap, CephRgwPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwPublic, port]
+ - /swift/v1
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, CephRgwPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, CephRgwPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, CephRgwPublic, port]
CinderAdmin:
host:
str_replace:
diff --git a/network/ports/external_from_pool_v6.yaml b/network/ports/external_from_pool_v6.yaml
index baa544e7..e541049d 100644
--- a/network/ports/external_from_pool_v6.yaml
+++ b/network/ports/external_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [ExternalPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: ExternalNetCidr}, 1]}
diff --git a/network/ports/internal_api_from_pool_v6.yaml b/network/ports/internal_api_from_pool_v6.yaml
index 8d0a91b6..afb144ba 100644
--- a/network/ports/internal_api_from_pool_v6.yaml
+++ b/network/ports/internal_api_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [InternalApiPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: InternalApiNetCidr}, 1]}
diff --git a/network/ports/management_from_pool_v6.yaml b/network/ports/management_from_pool_v6.yaml
index d9ac6046..4c1cc216 100644
--- a/network/ports/management_from_pool_v6.yaml
+++ b/network/ports/management_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [ManagementPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: ManagementNetCidr}, 1]}
diff --git a/network/ports/storage_from_pool_v6.yaml b/network/ports/storage_from_pool_v6.yaml
index 328f8385..18faf1bd 100644
--- a/network/ports/storage_from_pool_v6.yaml
+++ b/network/ports/storage_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [StoragePort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: StorageNetCidr}, 1]}
diff --git a/network/ports/storage_mgmt_from_pool_v6.yaml b/network/ports/storage_mgmt_from_pool_v6.yaml
index 50470c92..e1145a31 100644
--- a/network/ports/storage_mgmt_from_pool_v6.yaml
+++ b/network/ports/storage_mgmt_from_pool_v6.yaml
@@ -49,4 +49,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [StorageMgmtPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: StorageMgmtNetCidr}, 1]}
diff --git a/network/ports/tenant_from_pool_v6.yaml b/network/ports/tenant_from_pool_v6.yaml
index bbe6f736..d4f0d29c 100644
--- a/network/ports/tenant_from_pool_v6.yaml
+++ b/network/ports/tenant_from_pool_v6.yaml
@@ -48,4 +48,4 @@ outputs:
- ''
- - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
- '/'
- - {str_split: ['/', {get_attr: [TenantPort, subnets, 0, cidr]}, 1]}
+ - {str_split: ['/', {get_param: TenantNetCidr}, 1]}
diff --git a/network/service_net_map.yaml b/network/service_net_map.yaml
index a61af1b3..6e5c2449 100644
--- a/network/service_net_map.yaml
+++ b/network/service_net_map.yaml
@@ -45,6 +45,7 @@ parameters:
MysqlNetwork: internal_api
CephClusterNetwork: storage_mgmt
CephMonNetwork: storage
+ CephRgwNetwork: storage
ControllerHostnameResolveNetwork: internal_api
ComputeHostnameResolveNetwork: internal_api
BlockStorageHostnameResolveNetwork: internal_api
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
index 45c15fc1..681a2da8 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.yaml
@@ -139,6 +139,7 @@ resource_registry:
OS::TripleO::Services::Apache: puppet/services/apache.yaml
OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
OS::TripleO::Services::CephMon: OS::Heat::None
+ OS::TripleO::Services::CephRgw: OS::Heat::None
OS::TripleO::Services::CephOSD: OS::Heat::None
OS::TripleO::Services::CephClient: OS::Heat::None
OS::TripleO::Services::CephExternal: OS::Heat::None
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index d9dcaee9..067f1517 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -99,15 +99,8 @@ parameters:
the overcloud. It's accessible via the Nova metadata API.
type: json
- # Controller-specific params
- ControllerCount:
- type: number
- default: 1
-
# Compute-specific params
- ComputeCount:
- type: number
- default: 1
+# FIXME(shardy) handle these deprecated names as they don't match compute.yaml
HypervisorNeutronPhysicalBridge:
default: 'br-ex'
description: >
@@ -122,7 +115,7 @@ parameters:
# Jinja loop for Role in role_data.yaml
{% for role in roles %}
- # Resources generated for {{role.name}} Role
+ # Parameters generated for {{role.name}} Role
{{role.name}}Services:
description: A list of service resources (configured in the Heat
resource_registry) which represent nested stacks
@@ -131,47 +124,32 @@ parameters:
{% if role.ServicesDefault %}
default: {{role.ServicesDefault}}
{% endif %}
-{% endfor %}
-
-# Block storage specific parameters
- BlockStorageCount:
- type: number
- default: 0
-
-# Object storage specific parameters
- ObjectStorageCount:
- type: number
- default: 0
-# Ceph storage specific parameters
- CephStorageCount:
+ {{role.name}}Count:
+ description: Number of {{role.name}} nodes to deploy
type: number
- default: 0
+ {% if role.CountDefault %}
+ default: {{role.CountDefault}}
+ {% endif %}
- # Hostname format for each role
- # Note %index% is translated into the index of the node, e.g 0/1/2 etc
- # and %stackname% is replaced with OS::stack_name in the template below.
- # If you want to use the heat generated names, pass '' (empty string).
- ControllerHostnameFormat:
- type: string
- description: Format for Controller node hostnames
- default: '%stackname%-controller-%index%'
- ComputeHostnameFormat:
- type: string
- description: Format for Compute node hostnames
- default: '%stackname%-novacompute-%index%'
- BlockStorageHostnameFormat:
- type: string
- description: Format for BlockStorage node hostnames
- default: '%stackname%-blockstorage-%index%'
- ObjectStorageHostnameFormat:
+ {{role.name}}HostnameFormat:
type: string
- description: Format for SwiftStorage node hostnames
- default: '%stackname%-objectstorage-%index%'
- CephStorageHostnameFormat:
- type: string
- description: Format for CephStorage node hostnames
- default: '%stackname%-cephstorage-%index%'
+ description: >
+ Format for {{role.name}} node hostnames
+ Note %index% is translated into the index of the node, e.g 0/1/2 etc
+ and %stackname% is replaced with the stack name e.g overcloud
+ {% if role.HostnameFormatDefault %}
+ default: "{{role.HostnameFormatDefault}}"
+ {% endif %}
+
+ {{role.name}}RemovalPolicies:
+ default: []
+ type: json
+ description: >
+ List of resources to be removed from {{role.name}} ResourceGroup when
+ doing an update which requires removal of specific resources.
+ Example format ComputeRemovalPolicies: [{'resource_list': ['0']}]
+{% endfor %}
# Identifiers to trigger tasks on nodes
UpdateIdentifier:
@@ -187,41 +165,6 @@ parameters:
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
- # If you want to remove a specific node from a resource group, you can pass
- # the node name or id as a <Group>RemovalPolicies parameter, for example:
- # ComputeRemovalPolicies: [{'resource_list': ['0']}]
- ControllerRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from ControllerResourceGroup when
- doing an update which requires removal of specific resources.
- ComputeRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from ComputeResourceGroup when
- doing an update which requires removal of specific resources.
- BlockStorageRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from BlockStorageResourceGroup when
- doing an update which requires removal of specific resources.
- ObjectStorageRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from ObjectStorageResourceGroup when
- doing an update which requires removal of specific resources.
- CephStorageRemovalPolicies:
- default: []
- type: json
- description: >
- List of resources to be removed from CephStorageResourceGroup when
- doing an update which requires removal of specific resources.
-
-
resources:
HeatAuthEncryptionKey:
@@ -296,158 +239,33 @@ resources:
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
ServiceHostnameList: {get_attr: [{{role.name}}, hostname]}
-{% endfor %}
-
- Controller:
+ {{role.name}}:
type: OS::Heat::ResourceGroup
depends_on: Networks
properties:
- count: {get_param: ControllerCount}
- removal_policies: {get_param: ControllerRemovalPolicies}
+ count: {get_param: {{role.name}}Count}
+ removal_policies: {get_param: {{role.name}}RemovalPolicies}
resource_def:
- type: OS::TripleO::Controller
+ type: OS::TripleO::{{role.name}}
properties:
CloudDomain: {get_param: CloudDomain}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
Hostname:
str_replace:
- template: {get_param: ControllerHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- NodeIndex: '%index%'
- ServiceConfigSettings:
- map_merge:
- - get_attr: [ControllerServiceChain, role_data, config_settings]
- - get_attr: [ControllerServiceChain, role_data, global_config_settings]
- - get_attr: [ComputeServiceChain, role_data, global_config_settings]
- - get_attr: [BlockStorageServiceChain, role_data, global_config_settings]
- - get_attr: [ObjectStorageServiceChain, role_data, global_config_settings]
- - get_attr: [CephStorageServiceChain, role_data, global_config_settings]
- - get_attr: [ControllerServiceChain, role_data, global_config_settings]
- ServiceNames: {get_attr: [ControllerServiceChain, role_data, service_names]}
- MonitoringSubscriptions: {get_attr: [ControllerServiceChain, role_data, monitoring_subscriptions]}
-
- Compute:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: ComputeCount}
- removal_policies: {get_param: ComputeRemovalPolicies}
- resource_def:
- type: OS::TripleO::Compute
- properties:
- CloudDomain: {get_param: CloudDomain}
- NeutronPhysicalBridge: {get_param: HypervisorNeutronPhysicalBridge}
- NeutronPublicInterface: {get_param: HypervisorNeutronPublicInterface}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
- Hostname:
- str_replace:
- template: {get_param: ComputeHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- NodeIndex: '%index%'
- ServiceConfigSettings:
- map_merge:
- - get_attr: [ComputeServiceChain, role_data, config_settings]
- - get_attr: [ControllerServiceChain, role_data, global_config_settings]
- - get_attr: [ComputeServiceChain, role_data, global_config_settings]
- - get_attr: [BlockStorageServiceChain, role_data, global_config_settings]
- - get_attr: [ObjectStorageServiceChain, role_data, global_config_settings]
- - get_attr: [CephStorageServiceChain, role_data, global_config_settings]
- ServiceNames: {get_attr: [ComputeServiceChain, role_data, service_names]}
- MonitoringSubscriptions: {get_attr: [ComputeServiceChain, role_data, monitoring_subscriptions]}
-
- BlockStorage:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: BlockStorageCount}
- removal_policies: {get_param: BlockStorageRemovalPolicies}
- resource_def:
- type: OS::TripleO::BlockStorage
- properties:
- UpdateIdentifier: {get_param: UpdateIdentifier}
- Hostname:
- str_replace:
- template: {get_param: BlockStorageHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- CloudDomain: {get_param: CloudDomain}
- ServerMetadata: {get_param: ServerMetadata}
- NodeIndex: '%index%'
- ServiceConfigSettings:
- map_merge:
- - get_attr: [BlockStorageServiceChain, role_data, config_settings]
- - get_attr: [ControllerServiceChain, role_data, global_config_settings]
- - get_attr: [ComputeServiceChain, role_data, global_config_settings]
- - get_attr: [BlockStorageServiceChain, role_data, global_config_settings]
- - get_attr: [ObjectStorageServiceChain, role_data, global_config_settings]
- - get_attr: [CephStorageServiceChain, role_data, global_config_settings]
- ServiceNames: {get_attr: [BlockStorageServiceChain, role_data, service_names]}
- MonitoringSubscriptions: {get_attr: [BlockStorageServiceChain, role_data, monitoring_subscriptions]}
-
- ObjectStorage:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: ObjectStorageCount}
- removal_policies: {get_param: ObjectStorageRemovalPolicies}
- resource_def:
- type: OS::TripleO::ObjectStorage
- properties:
- UpdateIdentifier: {get_param: UpdateIdentifier}
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- Hostname:
- str_replace:
- template: {get_param: ObjectStorageHostnameFormat}
- params:
- '%stackname%': {get_param: 'OS::stack_name'}
- CloudDomain: {get_param: CloudDomain}
- ServerMetadata: {get_param: ServerMetadata}
- NodeIndex: '%index%'
- ServiceConfigSettings:
- map_merge:
- - get_attr: [ObjectStorageServiceChain, role_data, config_settings]
- - get_attr: [ControllerServiceChain, role_data, global_config_settings]
- - get_attr: [ComputeServiceChain, role_data, global_config_settings]
- - get_attr: [BlockStorageServiceChain, role_data, global_config_settings]
- - get_attr: [ObjectStorageServiceChain, role_data, global_config_settings]
- - get_attr: [CephStorageServiceChain, role_data, global_config_settings]
- ServiceNames: {get_attr: [ObjectStorageServiceChain, role_data, service_names]}
- MonitoringSubscriptions: {get_attr: [ObjectStorageServiceChain, role_data, monitoring_subscriptions]}
-
- CephStorage:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
- properties:
- count: {get_param: CephStorageCount}
- removal_policies: {get_param: CephStorageRemovalPolicies}
- resource_def:
- type: OS::TripleO::CephStorage
- properties:
- ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
- UpdateIdentifier: {get_param: UpdateIdentifier}
- Hostname:
- str_replace:
- template: {get_param: CephStorageHostnameFormat}
+ template: {get_param: {{role.name}}HostnameFormat}
params:
'%stackname%': {get_param: 'OS::stack_name'}
- CloudDomain: {get_param: CloudDomain}
- ServerMetadata: {get_param: ServerMetadata}
NodeIndex: '%index%'
ServiceConfigSettings:
map_merge:
- - get_attr: [CephStorageServiceChain, role_data, config_settings]
- - get_attr: [ControllerServiceChain, role_data, global_config_settings]
- - get_attr: [ComputeServiceChain, role_data, global_config_settings]
- - get_attr: [BlockStorageServiceChain, role_data, global_config_settings]
- - get_attr: [ObjectStorageServiceChain, role_data, global_config_settings]
- - get_attr: [CephStorageServiceChain, role_data, global_config_settings]
- ServiceNames: {get_attr: [CephStorageServiceChain, role_data, service_names]}
- MonitoringSubscriptions: {get_attr: [CephStorageServiceChain, role_data, monitoring_subscriptions]}
+ - get_attr: [{{role.name}}ServiceChain, role_data, config_settings]
+ {% for r in roles %}
+ - get_attr: [{{r.name}}ServiceChain, role_data, global_config_settings]
+ {% endfor %}
+ ServiceNames: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]}
+{% endfor %}
allNodesConfig:
type: OS::TripleO::AllNodes::SoftwareConfig
@@ -458,29 +276,17 @@ resources:
cloud_name_storage_mgmt: {get_param: CloudNameStorageManagement}
cloud_name_ctlplane: {get_param: CloudNameCtlplane}
hosts:
+{% for role in roles %}
- list_join:
- '\n'
- - {get_attr: [Compute, hosts_entry]}
- - list_join:
- - '\n'
- - {get_attr: [Controller, hosts_entry]}
- - list_join:
- - '\n'
- - {get_attr: [BlockStorage, hosts_entry]}
- - list_join:
- - '\n'
- - {get_attr: [ObjectStorage, hosts_entry]}
- - list_join:
- - '\n'
- - {get_attr: [CephStorage, hosts_entry]}
+ - {get_attr: [{{role.name}}, hosts_entry]}
+{% endfor %}
enabled_services:
list_join:
- ','
- - {get_attr: [ControllerServiceChain, role_data, service_names]}
- - {get_attr: [ComputeServiceChain, role_data, service_names]}
- - {get_attr: [BlockStorageServiceChain, role_data, service_names]}
- - {get_attr: [ObjectStorageServiceChain, role_data, service_names]}
- - {get_attr: [CephStorageServiceChain, role_data, service_names]}
+{% for role in roles %}
+ - {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+{% endfor %}
controller_ips: {get_attr: [Controller, ip_address]}
controller_names: {get_attr: [Controller, hostname]}
service_ips:
@@ -492,21 +298,17 @@ resources:
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
data:
l:
- - {get_attr: [ControllerIpListMap, service_ips]}
- - {get_attr: [ComputeIpListMap, service_ips]}
- - {get_attr: [BlockStorageIpListMap, service_ips]}
- - {get_attr: [ObjectStorageIpListMap, service_ips]}
- - {get_attr: [CephStorageIpListMap, service_ips]}
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, service_ips]}
+{% endfor %}
service_node_names:
yaql:
expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
data:
l:
- - {get_attr: [ControllerIpListMap, service_hostnames]}
- - {get_attr: [ComputeIpListMap, service_hostnames]}
- - {get_attr: [BlockStorageIpListMap, service_hostnames]}
- - {get_attr: [ObjectStorageIpListMap, service_hostnames]}
- - {get_attr: [CephStorageIpListMap, service_hostnames]}
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, service_hostnames]}
+{% endfor %}
# FIXME(shardy): These require further work to move into service_ips
memcache_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
keystone_public_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]}
@@ -626,11 +428,10 @@ resources:
UpdateWorkflow:
type: OS::TripleO::Tasks::UpdateWorkflow
properties:
- controller_servers: {get_attr: [Controller, attributes, nova_server_resource]}
- compute_servers: {get_attr: [Compute, attributes, nova_server_resource]}
- blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
- objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
- cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+ servers:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
input_values:
deploy_identifier: {get_param: DeployIdentifier}
update_identifier: {get_param: UpdateIdentifier}
@@ -641,34 +442,26 @@ resources:
type: OS::TripleO::AllNodesExtraConfig
depends_on:
- UpdateWorkflow
- - ComputeAllNodesValidationDeployment
- - BlockStorageAllNodesValidationDeployment
- - ObjectStorageAllNodesValidationDeployment
- - CephStorageAllNodesValidationDeployment
- - ControllerAllNodesValidationDeployment
+{% for role in roles %}
+ - {{role.name}}AllNodesValidationDeployment
+{% endfor %}
properties:
- controller_servers: {get_attr: [Controller, attributes, nova_server_resource]}
- compute_servers: {get_attr: [Compute, attributes, nova_server_resource]}
- blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
- objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
- cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+{% for role in roles %}
+ servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
# Post deployment steps for all roles
AllNodesDeploySteps:
type: OS::TripleO::PostDeploySteps
properties:
servers:
- Controller: {get_attr: [Controller, attributes, nova_server_resource]}
- Compute: {get_attr: [Compute, attributes, nova_server_resource]}
- BlockStorage: {get_attr: [BlockStorage, attributes, nova_server_resource]}
- ObjectStorage: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
- CephStorage: {get_attr: [CephStorage, attributes, nova_server_resource]}
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
role_data:
- Controller: {get_attr: [ControllerServiceChain, role_data]}
- Compute: {get_attr: [ComputeServiceChain, role_data]}
- BlockStorage: {get_attr: [BlockStorageServiceChain, role_data]}
- ObjectStorage: {get_attr: [ObjectStorageServiceChain, role_data]}
- CephStorage: {get_attr: [CephStorageServiceChain, role_data]}
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+{% endfor %}
outputs:
ManagedEndpoints:
@@ -689,6 +482,9 @@ outputs:
CeilometerInternalVip:
description: VIP for Ceilometer API internal endpoint
value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CeilometerApiNetwork]}]}
+ CephRgwInternalVip:
+ description: VIP for Ceph RGW internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CephRgwNetwork]}]}
CinderInternalVip:
description: VIP for Cinder API internal endpoint
value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, CinderApiNetwork]}]}
diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml
index 17825aaa..62748f94 100644
--- a/puppet/ceph-storage.yaml
+++ b/puppet/ceph-storage.yaml
@@ -27,6 +27,11 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
UpdateIdentifier:
default: ''
type: string
diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml
index 41d5ef8e..f5118c2c 100644
--- a/puppet/cinder-storage.yaml
+++ b/puppet/cinder-storage.yaml
@@ -48,6 +48,11 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
NetworkDeploymentActions:
type: comma_delimited_list
description: >
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
index aa5c3c43..5dea044e 100644
--- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
@@ -4,15 +4,7 @@ description: Configure hieradata for all MidoNet nodes
parameters:
# Parameters passed from the parent template
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
EnableZookeeperOnController:
@@ -102,10 +94,10 @@ resources:
type: OS::Heat::StructuredDeploymentGroup
properties:
config: {get_resource: NetworkMidoNetConfig}
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
NetworkMidonetDeploymentComputes:
type: OS::Heat::StructuredDeploymentGroup
properties:
config: {get_resource: NetworkMidoNetConfig}
- servers: {get_param: compute_servers}
+ servers: {get_param: servers, Compute}
diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
index e924fc87..728c7ccc 100644
--- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
@@ -4,15 +4,7 @@ description: Configure hieradata for Network Cisco configuration
parameters:
# Parameters passed from the parent template
- controller_servers:
- type: json
- compute_servers:
- type: json
- blockstorage_servers:
- type: json
- objectstorage_servers:
- type: json
- cephstorage_servers:
+ servers:
type: json
# extra parameters passed via parameter_defaults
@@ -140,7 +132,7 @@ resources:
properties:
name: NetworkCiscoDeployment
config: {get_resource: NetworkCiscoConfig}
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
input_values:
UCSM_ip: {get_param: NetworkUCSMIp}
UCSM_username: {get_param: NetworkUCSMUsername}
@@ -187,7 +179,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsController
- servers: {get_param: controller_servers}
+ servers: {get_param: servers, Controller}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -195,7 +187,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsCompute
- servers: {get_param: compute_servers}
+ servers: {get_param: servers, Compute}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -203,7 +195,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsBlockStorage
- servers: {get_param: blockstorage_servers}
+ servers: {get_param: servers, BlockStorage}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -211,7 +203,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsObjectStorage
- servers: {get_param: objectstorage_servers}
+ servers: {get_param: servers, ObjectStorage}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -219,7 +211,7 @@ resources:
type: OS::Heat::SoftwareDeployments
properties:
name: CollectMacDeploymentsCephStorage
- servers: {get_param: cephstorage_servers}
+ servers: {get_param: servers, CephStorage}
config: {get_resource: CollectMacConfig}
actions: ['CREATE'] # Only do this on CREATE
@@ -294,7 +286,7 @@ resources:
type: OS::Heat::SoftwareDeployment
properties:
name: MappingToNexusDeploymentsController
- server: {get_param: [controller_servers, '0']}
+ server: {get_param: [servers, Controller, '0']}
config: {get_resource: MappingToNexusConfig}
input_values:
# FIXME(shardy): It'd be more convenient if we could join these
@@ -338,7 +330,7 @@ resources:
depends_on: MappingToNexusDeploymentsController
properties:
name: MappingToUCSMDeploymentsController
- server: {get_param: [controller_servers, '0']}
+ server: {get_param: [servers, Controller, '0']}
config: {get_resource: MappingToUCSMConfig}
input_values:
ucsm_config: {get_param: NetworkUCSMHostList}
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
index e496553a..f5b1f0e6 100644
--- a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
@@ -32,6 +32,18 @@ resources:
contrail::vrouter::provision_vrouter::keystone_admin_tenant_name: admin
contrail::vrouter::provision_vrouter::keystone_admin_password: '"%{::admin_password}"'
+ contrail::vnc_api::vnc_api_config:
+ 'auth/AUTHN_TYPE':
+ value: keystone
+ 'auth/AUTHN_PROTOCOL':
+ value: http
+ 'auth/AUTHN_SERVER':
+ value: "%{hiera('keystone_admin_api_vip')}"
+ 'auth/AUTHN_PORT':
+ value: 35357
+ 'auth/AUTHN_URL':
+ value: '/v2.0/tokens'
+
ComputeContrailDeployment:
type: OS::Heat::StructuredDeployment
properties:
diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml
new file mode 100644
index 00000000..65c96ac2
--- /dev/null
+++ b/puppet/post.j2.yaml
@@ -0,0 +1,139 @@
+heat_template_version: 2016-10-14
+
+description: >
+ Post-deploy configuration steps via puppet for all roles,
+ as defined in ../roles_data.yaml
+
+parameters:
+ servers:
+ type: json
+ description: Mapping of Role name e.g Controller to a list of servers
+
+ role_data:
+ type: json
+ description: Mapping of Role name e.g Controller to the per-role data
+
+ DeployIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+
+resources:
+
+{% for role in roles %}
+ # Post deployment steps for all roles
+ # A single config is re-applied with an incrementing step number
+ # {{role.name}} Role steps
+ {{role.name}}ArtifactsConfig:
+ type: deploy-artifacts.yaml
+
+ {{role.name}}ArtifactsDeploy:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ArtifactsConfig}
+
+ {{role.name}}PreConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PreConfig
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Config:
+ type: OS::TripleO::{{role.name}}Config
+ properties:
+ StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
+
+ # Step through a series of configuration steps
+ {{role.name}}Deployment_Step1:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ properties:
+ name: {{role.name}}Deployment_Step1
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 1
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step2:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step1
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step2
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 2
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step3:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step2
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step3
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 3
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step4:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step3
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step4
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 4
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Deployment_Step5:
+ type: OS::Heat::StructuredDeploymentGroup
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step4
+ {% endfor %}
+ properties:
+ name: {{role.name}}Deployment_Step5
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: 5
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}PostConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PostConfig
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ {% endfor %}
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ # Note, this should come last, so use depends_on to ensure
+ # this is created after any other resources.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}PostConfig
+ {% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+{% endfor %}
diff --git a/puppet/post.yaml b/puppet/post.yaml
deleted file mode 100644
index 8f57b34e..00000000
--- a/puppet/post.yaml
+++ /dev/null
@@ -1,644 +0,0 @@
-heat_template_version: 2016-10-14
-
-description: >
- Post-deploy configuration steps via puppet for all roles,
- Controller, Compute, BlockStorage, SwiftStorage and CephStorage.
-
-parameters:
- servers:
- type: json
- description: Mapping of Role name e.g Controller to a list of servers
-
- role_data:
- type: json
- description: Mapping of Role name e.g Controller to the per-role data
-
- DeployIdentifier:
- default: ''
- type: string
- description: >
- Setting this to a unique value will re-run any deployment tasks which
- perform configuration on a Heat stack-update.
-
-resources:
- # Post deployment steps for all roles
- # A single config is re-applied with an incrementing step number
- # Controller Role steps
- ControllerArtifactsConfig:
- type: deploy-artifacts.yaml
-
- ControllerArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerArtifactsConfig}
-
- ControllerPreConfig:
- type: OS::TripleO::Tasks::ControllerPreConfig
- properties:
- servers: {get_param: [servers, Controller]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerConfig:
- type: OS::TripleO::ControllerConfig
- properties:
- StepConfig: {get_param: [role_data, Controller, step_config]}
-
- # Step through a series of configuration steps
- ControllerDeployment_Step1:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: [ControllerPreConfig, ControllerArtifactsDeploy]
- properties:
- name: ControllerDeployment_Step1
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerConfig}
- input_values:
- step: 1
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerDeployment_Step2:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step1
- - ComputeDeployment_Step1
- - BlockStorageDeployment_Step1
- - ObjectStorageDeployment_Step1
- - CephStorageDeployment_Step1
- properties:
- name: ControllerDeployment_Step2
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerConfig}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerDeployment_Step3:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step2
- - ComputeDeployment_Step2
- - BlockStorageDeployment_Step2
- - ObjectStorageDeployment_Step2
- - CephStorageDeployment_Step2
- properties:
- name: ControllerDeployment_Step3
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerConfig}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerDeployment_Step4:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step3
- - ComputeDeployment_Step3
- - BlockStorageDeployment_Step3
- - ObjectStorageDeployment_Step3
- - CephStorageDeployment_Step3
- properties:
- name: ControllerDeployment_Step4
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerConfig}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerDeployment_Step5:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step4
- - ComputeDeployment_Step4
- - BlockStorageDeployment_Step4
- - ObjectStorageDeployment_Step4
- - CephStorageDeployment_Step4
- properties:
- name: ControllerDeployment_Step5
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerConfig}
- input_values:
- step: 5
- update_identifier: {get_param: DeployIdentifier}
-
- ControllerPostConfig:
- type: OS::TripleO::Tasks::ControllerPostConfig
- depends_on:
- - ControllerDeployment_Step5
- - ComputeDeployment_Step5
- - BlockStorageDeployment_Step5
- - ObjectStorageDeployment_Step5
- - CephStorageDeployment_Step5
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ControllerExtraConfigPost:
- depends_on:
- - ControllerPostConfig
- - ComputePostConfig
- - BlockStoragePostConfig
- - ObjectStoragePostConfig
- - CephStoragePostConfig
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: [servers, Controller]}
-
- # Compute Role steps
- ComputeArtifactsConfig:
- type: deploy-artifacts.yaml
-
- ComputeArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: [servers, Compute]}
- config: {get_resource: ComputeArtifactsConfig}
-
- ComputePreConfig:
- type: OS::TripleO::Tasks::ComputePreConfig
- properties:
- servers: {get_param: [servers, Compute]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- ComputeConfig:
- type: OS::TripleO::ComputeConfig
- properties:
- StepConfig: {get_param: [role_data, Compute, step_config]}
-
- # Step through a series of configuration steps
- ComputeDeployment_Step1:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: [ComputePreConfig, ComputeArtifactsDeploy]
- properties:
- name: ComputeDeployment_Step1
- servers: {get_param: [servers, Compute]}
- config: {get_resource: ComputeConfig}
- input_values:
- step: 1
- update_identifier: {get_param: DeployIdentifier}
-
- ComputeDeployment_Step2:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step1
- - ComputeDeployment_Step1
- - BlockStorageDeployment_Step1
- - ObjectStorageDeployment_Step1
- - CephStorageDeployment_Step1
- properties:
- name: ComputeDeployment_Step2
- servers: {get_param: [servers, Compute]}
- config: {get_resource: ComputeConfig}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- ComputeDeployment_Step3:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step2
- - ComputeDeployment_Step2
- - BlockStorageDeployment_Step2
- - ObjectStorageDeployment_Step2
- - CephStorageDeployment_Step2
- properties:
- name: ComputeDeployment_Step3
- servers: {get_param: [servers, Compute]}
- config: {get_resource: ComputeConfig}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- ComputeDeployment_Step4:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step3
- - ComputeDeployment_Step3
- - BlockStorageDeployment_Step3
- - ObjectStorageDeployment_Step3
- - CephStorageDeployment_Step3
- properties:
- name: ComputeDeployment_Step4
- servers: {get_param: [servers, Compute]}
- config: {get_resource: ComputeConfig}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- ComputeDeployment_Step5:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step4
- - ComputeDeployment_Step4
- - BlockStorageDeployment_Step4
- - ObjectStorageDeployment_Step4
- - CephStorageDeployment_Step4
- properties:
- name: ComputeDeployment_Step5
- servers: {get_param: [servers, Compute]}
- config: {get_resource: ComputeConfig}
- input_values:
- step: 5
- update_identifier: {get_param: DeployIdentifier}
-
- ComputePostConfig:
- type: OS::TripleO::Tasks::ComputePostConfig
- depends_on:
- - ControllerDeployment_Step5
- - ComputeDeployment_Step5
- - BlockStorageDeployment_Step5
- - ObjectStorageDeployment_Step5
- - CephStorageDeployment_Step5
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ComputeExtraConfigPost:
- depends_on:
- - ControllerPostConfig
- - ComputePostConfig
- - BlockStoragePostConfig
- - ObjectStoragePostConfig
- - CephStoragePostConfig
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: [servers, Compute]}
-
- # BlockStorage Role steps
- BlockStorageArtifactsConfig:
- type: deploy-artifacts.yaml
-
- BlockStorageArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: [servers, BlockStorage]}
- config: {get_resource: BlockStorageArtifactsConfig}
-
- BlockStoragePreConfig:
- type: OS::TripleO::Tasks::BlockStoragePreConfig
- properties:
- servers: {get_param: [servers, BlockStorage]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- BlockStorageConfig:
- type: OS::TripleO::BlockStorageConfig
- properties:
- StepConfig: {get_param: [role_data, BlockStorage, step_config]}
-
- # Step through a series of configuration steps
- BlockStorageDeployment_Step1:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: [BlockStoragePreConfig, BlockStorageArtifactsDeploy]
- properties:
- name: BlockStorageDeployment_Step1
- servers: {get_param: [servers, BlockStorage]}
- config: {get_resource: BlockStorageConfig}
- input_values:
- step: 1
- update_identifier: {get_param: DeployIdentifier}
-
- BlockStorageDeployment_Step2:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step1
- - ComputeDeployment_Step1
- - BlockStorageDeployment_Step1
- - ObjectStorageDeployment_Step1
- - CephStorageDeployment_Step1
- properties:
- name: BlockStorageDeployment_Step2
- servers: {get_param: [servers, BlockStorage]}
- config: {get_resource: BlockStorageConfig}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- BlockStorageDeployment_Step3:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step2
- - ComputeDeployment_Step2
- - BlockStorageDeployment_Step2
- - ObjectStorageDeployment_Step2
- - CephStorageDeployment_Step2
- properties:
- name: BlockStorageDeployment_Step3
- servers: {get_param: [servers, BlockStorage]}
- config: {get_resource: BlockStorageConfig}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- BlockStorageDeployment_Step4:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step3
- - ComputeDeployment_Step3
- - BlockStorageDeployment_Step3
- - ObjectStorageDeployment_Step3
- - CephStorageDeployment_Step3
- properties:
- name: BlockStorageDeployment_Step4
- servers: {get_param: [servers, BlockStorage]}
- config: {get_resource: BlockStorageConfig}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- BlockStorageDeployment_Step5:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step4
- - ComputeDeployment_Step4
- - BlockStorageDeployment_Step4
- - ObjectStorageDeployment_Step4
- - CephStorageDeployment_Step4
- properties:
- name: BlockStorageDeployment_Step5
- servers: {get_param: [servers, BlockStorage]}
- config: {get_resource: BlockStorageConfig}
- input_values:
- step: 5
- update_identifier: {get_param: DeployIdentifier}
-
- BlockStoragePostConfig:
- type: OS::TripleO::Tasks::BlockStoragePostConfig
- depends_on:
- - ControllerDeployment_Step5
- - ComputeDeployment_Step5
- - BlockStorageDeployment_Step5
- - ObjectStorageDeployment_Step5
- - CephStorageDeployment_Step5
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- BlockStorageExtraConfigPost:
- depends_on:
- - ControllerPostConfig
- - ComputePostConfig
- - BlockStoragePostConfig
- - ObjectStoragePostConfig
- - CephStoragePostConfig
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: [servers, BlockStorage]}
-
- # ObjectStorage Role steps
- ObjectStorageArtifactsConfig:
- type: deploy-artifacts.yaml
-
- ObjectStorageArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: [servers, ObjectStorage]}
- config: {get_resource: ObjectStorageArtifactsConfig}
-
- ObjectStoragePreConfig:
- type: OS::TripleO::Tasks::ObjectStoragePreConfig
- properties:
- servers: {get_param: [servers, ObjectStorage]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- ObjectStorageConfig:
- type: OS::TripleO::ObjectStorageConfig
- properties:
- StepConfig: {get_param: [role_data, ObjectStorage, step_config]}
-
- # Step through a series of configuration steps
- ObjectStorageDeployment_Step1:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: [ObjectStoragePreConfig, ObjectStorageArtifactsDeploy]
- properties:
- name: ObjectStorageDeployment_Step1
- servers: {get_param: [servers, ObjectStorage]}
- config: {get_resource: ObjectStorageConfig}
- input_values:
- step: 1
- update_identifier: {get_param: DeployIdentifier}
-
- ObjectStorageDeployment_Step2:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step1
- - ComputeDeployment_Step1
- - BlockStorageDeployment_Step1
- - ObjectStorageDeployment_Step1
- - CephStorageDeployment_Step1
- properties:
- name: ObjectStorageDeployment_Step2
- servers: {get_param: [servers, ObjectStorage]}
- config: {get_resource: ObjectStorageConfig}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- ObjectStorageDeployment_Step3:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step2
- - ComputeDeployment_Step2
- - BlockStorageDeployment_Step2
- - ObjectStorageDeployment_Step2
- - CephStorageDeployment_Step2
- properties:
- name: ObjectStorageDeployment_Step3
- servers: {get_param: [servers, ObjectStorage]}
- config: {get_resource: ObjectStorageConfig}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- ObjectStorageDeployment_Step4:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step3
- - ComputeDeployment_Step3
- - BlockStorageDeployment_Step3
- - ObjectStorageDeployment_Step3
- - CephStorageDeployment_Step3
- properties:
- name: ObjectStorageDeployment_Step4
- servers: {get_param: [servers, ObjectStorage]}
- config: {get_resource: ObjectStorageConfig}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- ObjectStorageDeployment_Step5:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step4
- - ComputeDeployment_Step4
- - BlockStorageDeployment_Step4
- - ObjectStorageDeployment_Step4
- - CephStorageDeployment_Step4
- properties:
- name: ObjectStorageDeployment_Step5
- servers: {get_param: [servers, ObjectStorage]}
- config: {get_resource: ObjectStorageConfig}
- input_values:
- step: 5
- update_identifier: {get_param: DeployIdentifier}
-
- ObjectStoragePostConfig:
- type: OS::TripleO::Tasks::ObjectStoragePostConfig
- depends_on:
- - ControllerDeployment_Step5
- - ComputeDeployment_Step5
- - BlockStorageDeployment_Step5
- - ObjectStorageDeployment_Step5
- - CephStorageDeployment_Step5
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- ObjectStorageExtraConfigPost:
- depends_on:
- - ControllerPostConfig
- - ComputePostConfig
- - BlockStoragePostConfig
- - ObjectStoragePostConfig
- - CephStoragePostConfig
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: [servers, ObjectStorage]}
-
- # CephStorage Role steps
- CephStorageArtifactsConfig:
- type: deploy-artifacts.yaml
-
- CephStorageArtifactsDeploy:
- type: OS::Heat::StructuredDeployments
- properties:
- servers: {get_param: [servers, CephStorage]}
- config: {get_resource: CephStorageArtifactsConfig}
-
- CephStoragePreConfig:
- type: OS::TripleO::Tasks::CephStoragePreConfig
- properties:
- servers: {get_param: [servers, CephStorage]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- CephStorageConfig:
- type: OS::TripleO::CephStorageConfig
- properties:
- StepConfig: {get_param: [role_data, CephStorage, step_config]}
-
- # Step through a series of configuration steps
- CephStorageDeployment_Step1:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on: [CephStoragePreConfig, CephStorageArtifactsDeploy]
- properties:
- name: CephStorageDeployment_Step1
- servers: {get_param: [servers, CephStorage]}
- config: {get_resource: CephStorageConfig}
- input_values:
- step: 1
- update_identifier: {get_param: DeployIdentifier}
-
- CephStorageDeployment_Step2:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step1
- - ComputeDeployment_Step1
- - BlockStorageDeployment_Step1
- - ObjectStorageDeployment_Step1
- - CephStorageDeployment_Step1
- properties:
- name: CephStorageDeployment_Step2
- servers: {get_param: [servers, CephStorage]}
- config: {get_resource: CephStorageConfig}
- input_values:
- step: 2
- update_identifier: {get_param: DeployIdentifier}
-
- CephStorageDeployment_Step3:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step2
- - ComputeDeployment_Step2
- - BlockStorageDeployment_Step2
- - ObjectStorageDeployment_Step2
- - CephStorageDeployment_Step2
- properties:
- name: CephStorageDeployment_Step3
- servers: {get_param: [servers, CephStorage]}
- config: {get_resource: CephStorageConfig}
- input_values:
- step: 3
- update_identifier: {get_param: DeployIdentifier}
-
- CephStorageDeployment_Step4:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step3
- - ComputeDeployment_Step3
- - BlockStorageDeployment_Step3
- - ObjectStorageDeployment_Step3
- - CephStorageDeployment_Step3
- properties:
- name: CephStorageDeployment_Step4
- servers: {get_param: [servers, CephStorage]}
- config: {get_resource: CephStorageConfig}
- input_values:
- step: 4
- update_identifier: {get_param: DeployIdentifier}
-
- CephStorageDeployment_Step5:
- type: OS::Heat::StructuredDeploymentGroup
- depends_on:
- - ControllerDeployment_Step4
- - ComputeDeployment_Step4
- - BlockStorageDeployment_Step4
- - ObjectStorageDeployment_Step4
- - CephStorageDeployment_Step4
- properties:
- name: CephStorageDeployment_Step5
- servers: {get_param: [servers, CephStorage]}
- config: {get_resource: CephStorageConfig}
- input_values:
- step: 5
- update_identifier: {get_param: DeployIdentifier}
-
- CephStoragePostConfig:
- type: OS::TripleO::Tasks::CephStoragePostConfig
- depends_on:
- - ControllerDeployment_Step5
- - ComputeDeployment_Step5
- - BlockStorageDeployment_Step5
- - ObjectStorageDeployment_Step5
- - CephStorageDeployment_Step5
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- CephStorageExtraConfigPost:
- depends_on:
- - ControllerPostConfig
- - ComputePostConfig
- - BlockStoragePostConfig
- - ObjectStoragePostConfig
- - CephStoragePostConfig
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: [servers, CephStorage]}
diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml
index 65afffad..c84614ba 100644
--- a/puppet/services/aodh-api.yaml
+++ b/puppet/services/aodh-api.yaml
@@ -21,6 +21,11 @@ parameters:
MonitoringSubscriptionAodhApi:
default: 'overcloud-ceilometer-aodh-api'
type: string
+ EnableCombinationAlarms:
+ default: false
+ description: Combination alarms are deprecated in Newton, hence disabled
+ by default. To enable, set this parameter to true.
+ type: boolean
resources:
AodhBase:
@@ -62,5 +67,6 @@ outputs:
# internal_api_subnet - > IP/CIDR
aodh::api::host: {get_param: [ServiceNetMap, AodhApiNetwork]}
aodh::wsgi::apache::bind_host: {get_param: [ServiceNetMap, AodhApiNetwork]}
+ tripleo::profile::base::aodh::api::enable_combination_alarms: {get_param: EnableCombinationAlarms}
step_config: |
include tripleo::profile::base::aodh::api
diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml
new file mode 100644
index 00000000..6bb4f6d1
--- /dev/null
+++ b/puppet/services/ceph-rgw.yaml
@@ -0,0 +1,77 @@
+heat_template_version: 2016-04-08
+
+description: >
+ Ceph RadosGW service.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ AdminToken:
+ description: The keystone auth secret and db password.
+ type: string
+ hidden: true
+ CephRgwKey:
+ description: The cephx key for the radosgw client. Can be created
+ with ceph-authtool --gen-print-key.
+ type: string
+ hidden: true
+ SwiftPassword:
+ description: The password for the swift service account, used by the Ceph RGW services.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+
+resources:
+ CephBase:
+ type: ./ceph-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Ceph RadosGW service.
+ value:
+ service_name: ceph_rgw
+ config_settings:
+ map_merge:
+ - get_attr: [CephBase, role_data, config_settings]
+ - tripleo::profile::base::ceph::rgw::rgw_key: {get_param: CephRgwKey}
+ tripleo::profile::base::ceph::rgw::keystone_admin_token: {get_param: AdminToken}
+ tripleo::profile::base::ceph::rgw::keystone_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ ceph::profile::params::frontend_type: 'civetweb'
+ ceph_rgw_civetweb_bind_address: {get_param: [ServiceNetMap, CephRgwNetwork]}
+ ceph::profile::params::rgw_frontends:
+ list_join:
+ - ''
+ - - 'civetweb port='
+ - '%{hiera("ceph_rgw_civetweb_bind_address")}'
+ - ':'
+ - {get_param: [EndpointMap, CephRgwInternal, port]}
+ tripleo.ceph_rgw.firewall_rules:
+ '122 ceph rgw':
+ dport: {get_param: [EndpointMap, CephRgwInternal, port]}
+ ceph::rgw::keystone::auth::public_url: {get_param: [EndpointMap, CephRgwPublic, uri]}
+ ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
+ ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
+ ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
+ ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
+ ceph::rgw::keystone::auth::tenant: 'service'
+ step_config: |
+ include ::tripleo::profile::base::ceph::rgw
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index adc1b4cb..03abe79b 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -101,6 +101,7 @@ outputs:
template: "'REGISTRY_HOST'"
params:
REGISTRY_HOST: {get_param: [EndpointMap, GlanceRegistryInternal, host]}
+ glance::api::registry_client_protocol: {get_param: [EndpointMap, GlanceRegistryInternal, protocol] }
glance::api::authtoken::password: {get_param: GlancePassword}
glance::api::enable_proxy_headers_parsing: true
glance::api::debug: {get_param: Debug}
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index b321ecbe..18fc9158 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -148,7 +148,6 @@ outputs:
keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
keystone::endpoint::region: {get_param: KeystoneRegion}
keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
- keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]}
keystone::db::mysql::user: keystone
keystone::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
keystone::db::mysql::dbname: keystone
diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml
index 2e43730d..1513ab31 100644
--- a/puppet/services/manila-api.yaml
+++ b/puppet/services/manila-api.yaml
@@ -66,6 +66,7 @@ outputs:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
manila::api::bind_host: {get_param: [ServiceNetMap, ManilaApiNetwork]}
+ manila::api::enable_proxy_headers_parsing: true
step_config: |
include ::tripleo::profile::base::manila::api
diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml
index ff0012ff..9eb66314 100644
--- a/puppet/swift-storage.yaml
+++ b/puppet/swift-storage.yaml
@@ -27,6 +27,11 @@ parameters:
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry.
type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
Hostname:
type: string
default: '' # Defaults to Heat created hostname
diff --git a/roles_data.yaml b/roles_data.yaml
index a349ca72..03c71485 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -1,8 +1,11 @@
- name: Controller
+ CountDefault: 1
+ HostnameFormatDefault: '%stackname%-controller-%index%'
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
+ - OS::TripleO::Services::CephRgw
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
@@ -71,6 +74,8 @@
- OS::TripleO::Services::VipHosts
- name: Compute
+ CountDefault: 1
+ HostnameFormatDefault: '%stackname%-novacompute-%index%'
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephClient
@@ -94,6 +99,8 @@
- OS::TripleO::Services::VipHosts
- name: BlockStorage
+ CountDefault: 0
+ HostnameFormatDefault: '%stackname%-blockstorage-%index%'
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CinderVolume
@@ -107,6 +114,8 @@
- OS::TripleO::Services::VipHosts
- name: ObjectStorage
+ CountDefault: 0
+ HostnameFormatDefault: '%stackname%-objectstorage-%index%'
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::Kernel
@@ -121,6 +130,8 @@
- OS::TripleO::Services::VipHosts
- name: CephStorage
+ CountDefault: 0
+ HostnameFormatDefault: '%stackname%-cephstorage-%index%'
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephOSD